xref: /freebsd/sys/dev/ath/if_ath.c (revision 7a0a89d2cb29ee2c383600fa59e42d714a6dcbcb)
1 /*-
2  * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/mbuf.h>
47 #include <sys/malloc.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/errno.h>
54 #include <sys/callout.h>
55 #include <sys/bus.h>
56 #include <sys/endian.h>
57 #include <sys/kthread.h>
58 #include <sys/taskqueue.h>
59 
60 #include <machine/bus.h>
61 
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
69 
70 #include <net80211/ieee80211_var.h>
71 
72 #include <net/bpf.h>
73 
74 #ifdef INET
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 #endif
78 
79 #include <dev/ath/if_athvar.h>
80 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
81 
82 #ifdef ATH_TX99_DIAG
83 #include <dev/ath/ath_tx99/ath_tx99.h>
84 #endif
85 
86 /*
87  * We require a HAL w/ the changes for split tx/rx MIC.
88  */
89 CTASSERT(HAL_ABI_VERSION > 0x06052200);
90 
91 /*
92  * ATH_BCBUF determines the number of vap's that can transmit
93  * beacons and also (currently) the number of vap's that can
94  * have unique mac addresses/bssid.  When staggering beacons
95  * 4 is probably a good max as otherwise the beacons become
96  * very closely spaced and there is limited time for cab q traffic
97  * to go out.  You can burst beacons instead but that is not good
98  * for stations in power save and at some point you really want
99  * another radio (and channel).
100  *
101  * The limit on the number of mac addresses is tied to our use of
102  * the U/L bit and tracking addresses in a byte; it would be
103  * worthwhile to allow more for applications like proxy sta.
104  */
105 CTASSERT(ATH_BCBUF <= 8);
106 
107 /* unaligned little endian access */
108 #define LE_READ_2(p)							\
109 	((u_int16_t)							\
110 	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8)))
111 #define LE_READ_4(p)							\
112 	((u_int32_t)							\
113 	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8) |	\
114 	  (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
115 
116 #define	CTRY_XR9	5001		/* Ubiquiti XR9 */
117 #define	CTRY_GZ901	5002		/* ZComax GZ-901 */
118 
119 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
120 		    const char name[IFNAMSIZ], int unit, int opmode,
121 		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
122 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
123 static void	ath_vap_delete(struct ieee80211vap *);
124 static void	ath_init(void *);
125 static void	ath_stop_locked(struct ifnet *);
126 static void	ath_stop(struct ifnet *);
127 static void	ath_start(struct ifnet *);
128 static int	ath_reset(struct ifnet *);
129 static int	ath_reset_vap(struct ieee80211vap *, u_long);
130 static int	ath_media_change(struct ifnet *);
131 static void	ath_watchdog(struct ifnet *);
132 static int	ath_ioctl(struct ifnet *, u_long, caddr_t);
133 static void	ath_fatal_proc(void *, int);
134 static void	ath_bmiss_vap(struct ieee80211vap *);
135 static void	ath_bmiss_proc(void *, int);
136 static int	ath_keyset(struct ath_softc *, const struct ieee80211_key *,
137 			struct ieee80211_node *);
138 static int	ath_key_alloc(struct ieee80211vap *,
139 			struct ieee80211_key *,
140 			ieee80211_keyix *, ieee80211_keyix *);
141 static int	ath_key_delete(struct ieee80211vap *,
142 			const struct ieee80211_key *);
143 static int	ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
144 			const u_int8_t mac[IEEE80211_ADDR_LEN]);
145 static void	ath_key_update_begin(struct ieee80211vap *);
146 static void	ath_key_update_end(struct ieee80211vap *);
147 static void	ath_update_mcast(struct ifnet *);
148 static void	ath_update_promisc(struct ifnet *);
149 static void	ath_mode_init(struct ath_softc *);
150 static void	ath_setslottime(struct ath_softc *);
151 static void	ath_updateslot(struct ifnet *);
152 static int	ath_beaconq_setup(struct ath_hal *);
153 static int	ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
154 static void	ath_beacon_update(struct ieee80211vap *, int item);
155 static void	ath_beacon_setup(struct ath_softc *, struct ath_buf *);
156 static void	ath_beacon_proc(void *, int);
157 static struct ath_buf *ath_beacon_generate(struct ath_softc *,
158 			struct ieee80211vap *);
159 static void	ath_bstuck_proc(void *, int);
160 static void	ath_beacon_return(struct ath_softc *, struct ath_buf *);
161 static void	ath_beacon_free(struct ath_softc *);
162 static void	ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
163 static void	ath_descdma_cleanup(struct ath_softc *sc,
164 			struct ath_descdma *, ath_bufhead *);
165 static int	ath_desc_alloc(struct ath_softc *);
166 static void	ath_desc_free(struct ath_softc *);
167 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
168 			const uint8_t [IEEE80211_ADDR_LEN]);
169 static void	ath_node_free(struct ieee80211_node *);
170 static void	ath_node_getsignal(const struct ieee80211_node *,
171 			int8_t *, int8_t *);
172 static int	ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
173 static void	ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
174 			int subtype, int rssi, int noise, u_int32_t rstamp);
175 static void	ath_setdefantenna(struct ath_softc *, u_int);
176 static void	ath_rx_proc(void *, int);
177 static void	ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
178 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
179 static int	ath_tx_setup(struct ath_softc *, int, int);
180 static int	ath_wme_update(struct ieee80211com *);
181 static void	ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
182 static void	ath_tx_cleanup(struct ath_softc *);
183 static void	ath_freetx(struct mbuf *);
184 static int	ath_tx_start(struct ath_softc *, struct ieee80211_node *,
185 			     struct ath_buf *, struct mbuf *);
186 static void	ath_tx_proc_q0(void *, int);
187 static void	ath_tx_proc_q0123(void *, int);
188 static void	ath_tx_proc(void *, int);
189 static void	ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
190 static int	ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
191 static void	ath_draintxq(struct ath_softc *);
192 static void	ath_stoprecv(struct ath_softc *);
193 static int	ath_startrecv(struct ath_softc *);
194 static void	ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
195 static void	ath_scan_start(struct ieee80211com *);
196 static void	ath_scan_end(struct ieee80211com *);
197 static void	ath_set_channel(struct ieee80211com *);
198 static void	ath_calibrate(void *);
199 static int	ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
200 static void	ath_setup_stationkey(struct ieee80211_node *);
201 static void	ath_newassoc(struct ieee80211_node *, int);
202 static int	ath_setregdomain(struct ieee80211com *,
203 		    struct ieee80211_regdomain *, int,
204 		    struct ieee80211_channel []);
205 static void	ath_getradiocaps(struct ieee80211com *, int *,
206 		    struct ieee80211_channel []);
207 static int	ath_getchannels(struct ath_softc *);
208 static void	ath_led_event(struct ath_softc *, int);
209 
210 static int	ath_rate_setup(struct ath_softc *, u_int mode);
211 static void	ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
212 
213 static void	ath_sysctlattach(struct ath_softc *);
214 static int	ath_raw_xmit(struct ieee80211_node *,
215 			struct mbuf *, const struct ieee80211_bpf_params *);
216 static void	ath_bpfattach(struct ath_softc *);
217 static void	ath_announce(struct ath_softc *);
218 
219 SYSCTL_DECL(_hw_ath);
220 
221 /* XXX validate sysctl values */
222 static	int ath_longcalinterval = 30;		/* long cals every 30 secs */
223 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
224 	    0, "long chip calibration interval (secs)");
225 static	int ath_shortcalinterval = 100;		/* short cals every 100 ms */
226 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
227 	    0, "short chip calibration interval (msecs)");
228 static	int ath_resetcalinterval = 20*60;	/* reset cal state 20 mins */
229 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
230 	    0, "reset chip calibration results (secs)");
231 
232 static	int ath_rxbuf = ATH_RXBUF;		/* # rx buffers to allocate */
233 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
234 	    0, "rx buffers allocated");
235 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
236 static	int ath_txbuf = ATH_TXBUF;		/* # tx buffers to allocate */
237 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
238 	    0, "tx buffers allocated");
239 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
240 
241 #ifdef ATH_DEBUG
242 enum {
243 	ATH_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
244 	ATH_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
245 	ATH_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
246 	ATH_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
247 	ATH_DEBUG_RATE		= 0x00000010,	/* rate control */
248 	ATH_DEBUG_RESET		= 0x00000020,	/* reset processing */
249 	ATH_DEBUG_MODE		= 0x00000040,	/* mode init/setup */
250 	ATH_DEBUG_BEACON 	= 0x00000080,	/* beacon handling */
251 	ATH_DEBUG_WATCHDOG 	= 0x00000100,	/* watchdog timeout */
252 	ATH_DEBUG_INTR		= 0x00001000,	/* ISR */
253 	ATH_DEBUG_TX_PROC	= 0x00002000,	/* tx ISR proc */
254 	ATH_DEBUG_RX_PROC	= 0x00004000,	/* rx ISR proc */
255 	ATH_DEBUG_BEACON_PROC	= 0x00008000,	/* beacon ISR proc */
256 	ATH_DEBUG_CALIBRATE	= 0x00010000,	/* periodic calibration */
257 	ATH_DEBUG_KEYCACHE	= 0x00020000,	/* key cache management */
258 	ATH_DEBUG_STATE		= 0x00040000,	/* 802.11 state transitions */
259 	ATH_DEBUG_NODE		= 0x00080000,	/* node management */
260 	ATH_DEBUG_LED		= 0x00100000,	/* led management */
261 	ATH_DEBUG_FF		= 0x00200000,	/* fast frames */
262 	ATH_DEBUG_DFS		= 0x00400000,	/* DFS processing */
263 	ATH_DEBUG_REGDOMAIN	= 0x02000000,	/* regulatory processing */
264 	ATH_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
265 	ATH_DEBUG_ANY		= 0xffffffff
266 };
267 static	int ath_debug = 0;
268 SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
269 	    0, "control debugging printfs");
270 TUNABLE_INT("hw.ath.debug", &ath_debug);
271 
272 #define	IFF_DUMPPKTS(sc, m) \
273 	((sc->sc_debug & (m)) || \
274 	    (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
275 #define	DPRINTF(sc, m, fmt, ...) do {				\
276 	if (sc->sc_debug & (m))					\
277 		printf(fmt, __VA_ARGS__);			\
278 } while (0)
279 #define	KEYPRINTF(sc, ix, hk, mac) do {				\
280 	if (sc->sc_debug & ATH_DEBUG_KEYCACHE)			\
281 		ath_keyprint(sc, __func__, ix, hk, mac);	\
282 } while (0)
283 static	void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
284 	u_int ix, int);
285 static	void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
286 	u_int qnum, u_int ix, int done);
287 #else
288 #define	IFF_DUMPPKTS(sc, m) \
289 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
290 #define	DPRINTF(sc, m, fmt, ...) do {				\
291 	(void) sc;						\
292 } while (0)
293 #define	KEYPRINTF(sc, k, ix, mac) do {				\
294 	(void) sc;						\
295 } while (0)
296 #endif
297 
298 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
299 
300 int
301 ath_attach(u_int16_t devid, struct ath_softc *sc)
302 {
303 	struct ifnet *ifp;
304 	struct ieee80211com *ic;
305 	struct ath_hal *ah = NULL;
306 	HAL_STATUS status;
307 	int error = 0, i;
308 	u_int wmodes;
309 
310 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
311 
312 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
313 	if (ifp == NULL) {
314 		device_printf(sc->sc_dev, "can not if_alloc()\n");
315 		error = ENOSPC;
316 		goto bad;
317 	}
318 	ic = ifp->if_l2com;
319 
320 	/* set these up early for if_printf use */
321 	if_initname(ifp, device_get_name(sc->sc_dev),
322 		device_get_unit(sc->sc_dev));
323 
324 	ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
325 	if (ah == NULL) {
326 		if_printf(ifp, "unable to attach hardware; HAL status %u\n",
327 			status);
328 		error = ENXIO;
329 		goto bad;
330 	}
331 	if (ah->ah_abi != HAL_ABI_VERSION) {
332 		if_printf(ifp, "HAL ABI mismatch detected "
333 			"(HAL:0x%x != driver:0x%x)\n",
334 			ah->ah_abi, HAL_ABI_VERSION);
335 		error = ENXIO;
336 		goto bad;
337 	}
338 	sc->sc_ah = ah;
339 	sc->sc_invalid = 0;	/* ready to go, enable interrupt handling */
340 #ifdef	ATH_DEBUG
341 	sc->sc_debug = ath_debug;
342 #endif
343 
344 	/*
345 	 * Check if the MAC has multi-rate retry support.
346 	 * We do this by trying to setup a fake extended
347 	 * descriptor.  MAC's that don't have support will
348 	 * return false w/o doing anything.  MAC's that do
349 	 * support it will return true w/o doing anything.
350 	 */
351 	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
352 
353 	/*
354 	 * Check if the device has hardware counters for PHY
355 	 * errors.  If so we need to enable the MIB interrupt
356 	 * so we can act on stat triggers.
357 	 */
358 	if (ath_hal_hwphycounters(ah))
359 		sc->sc_needmib = 1;
360 
361 	/*
362 	 * Get the hardware key cache size.
363 	 */
364 	sc->sc_keymax = ath_hal_keycachesize(ah);
365 	if (sc->sc_keymax > ATH_KEYMAX) {
366 		if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
367 			ATH_KEYMAX, sc->sc_keymax);
368 		sc->sc_keymax = ATH_KEYMAX;
369 	}
370 	/*
371 	 * Reset the key cache since some parts do not
372 	 * reset the contents on initial power up.
373 	 */
374 	for (i = 0; i < sc->sc_keymax; i++)
375 		ath_hal_keyreset(ah, i);
376 
377 	/*
378 	 * Collect the default channel list.
379 	 */
380 	error = ath_getchannels(sc);
381 	if (error != 0)
382 		goto bad;
383 
384 	/*
385 	 * Setup rate tables for all potential media types.
386 	 */
387 	ath_rate_setup(sc, IEEE80211_MODE_11A);
388 	ath_rate_setup(sc, IEEE80211_MODE_11B);
389 	ath_rate_setup(sc, IEEE80211_MODE_11G);
390 	ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
391 	ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
392 	ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
393 	ath_rate_setup(sc, IEEE80211_MODE_11NA);
394 	ath_rate_setup(sc, IEEE80211_MODE_11NG);
395 	ath_rate_setup(sc, IEEE80211_MODE_HALF);
396 	ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
397 
398 	/* NB: setup here so ath_rate_update is happy */
399 	ath_setcurmode(sc, IEEE80211_MODE_11A);
400 
401 	/*
402 	 * Allocate tx+rx descriptors and populate the lists.
403 	 */
404 	error = ath_desc_alloc(sc);
405 	if (error != 0) {
406 		if_printf(ifp, "failed to allocate descriptors: %d\n", error);
407 		goto bad;
408 	}
409 	callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE);
410 
411 	ATH_TXBUF_LOCK_INIT(sc);
412 
413 	sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
414 		taskqueue_thread_enqueue, &sc->sc_tq);
415 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
416 		"%s taskq", ifp->if_xname);
417 
418 	TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
419 	TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
420 	TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
421 
422 	/*
423 	 * Allocate hardware transmit queues: one queue for
424 	 * beacon frames and one data queue for each QoS
425 	 * priority.  Note that the hal handles reseting
426 	 * these queues at the needed time.
427 	 *
428 	 * XXX PS-Poll
429 	 */
430 	sc->sc_bhalq = ath_beaconq_setup(ah);
431 	if (sc->sc_bhalq == (u_int) -1) {
432 		if_printf(ifp, "unable to setup a beacon xmit queue!\n");
433 		error = EIO;
434 		goto bad2;
435 	}
436 	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
437 	if (sc->sc_cabq == NULL) {
438 		if_printf(ifp, "unable to setup CAB xmit queue!\n");
439 		error = EIO;
440 		goto bad2;
441 	}
442 	/* NB: insure BK queue is the lowest priority h/w queue */
443 	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
444 		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
445 			ieee80211_wme_acnames[WME_AC_BK]);
446 		error = EIO;
447 		goto bad2;
448 	}
449 	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
450 	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
451 	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
452 		/*
453 		 * Not enough hardware tx queues to properly do WME;
454 		 * just punt and assign them all to the same h/w queue.
455 		 * We could do a better job of this if, for example,
456 		 * we allocate queues when we switch from station to
457 		 * AP mode.
458 		 */
459 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
460 			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
461 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
462 			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
463 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
464 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
465 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
466 	}
467 
468 	/*
469 	 * Special case certain configurations.  Note the
470 	 * CAB queue is handled by these specially so don't
471 	 * include them when checking the txq setup mask.
472 	 */
473 	switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
474 	case 0x01:
475 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
476 		break;
477 	case 0x0f:
478 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
479 		break;
480 	default:
481 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
482 		break;
483 	}
484 
485 	/*
486 	 * Setup rate control.  Some rate control modules
487 	 * call back to change the anntena state so expose
488 	 * the necessary entry points.
489 	 * XXX maybe belongs in struct ath_ratectrl?
490 	 */
491 	sc->sc_setdefantenna = ath_setdefantenna;
492 	sc->sc_rc = ath_rate_attach(sc);
493 	if (sc->sc_rc == NULL) {
494 		error = EIO;
495 		goto bad2;
496 	}
497 
498 	sc->sc_blinking = 0;
499 	sc->sc_ledstate = 1;
500 	sc->sc_ledon = 0;			/* low true */
501 	sc->sc_ledidle = (2700*hz)/1000;	/* 2.7sec */
502 	callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
503 	/*
504 	 * Auto-enable soft led processing for IBM cards and for
505 	 * 5211 minipci cards.  Users can also manually enable/disable
506 	 * support with a sysctl.
507 	 */
508 	sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
509 	if (sc->sc_softled) {
510 		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
511 		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
512 	}
513 
514 	ifp->if_softc = sc;
515 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
516 	ifp->if_start = ath_start;
517 	ifp->if_watchdog = ath_watchdog;
518 	ifp->if_ioctl = ath_ioctl;
519 	ifp->if_init = ath_init;
520 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
521 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
522 	IFQ_SET_READY(&ifp->if_snd);
523 
524 	ic->ic_ifp = ifp;
525 	/* XXX not right but it's not used anywhere important */
526 	ic->ic_phytype = IEEE80211_T_OFDM;
527 	ic->ic_opmode = IEEE80211_M_STA;
528 	ic->ic_caps =
529 		  IEEE80211_C_STA		/* station mode */
530 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
531 		| IEEE80211_C_HOSTAP		/* hostap mode */
532 		| IEEE80211_C_MONITOR		/* monitor mode */
533 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
534 		| IEEE80211_C_WDS		/* 4-address traffic works */
535 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
536 		| IEEE80211_C_SHSLOT		/* short slot time supported */
537 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
538 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
539 		| IEEE80211_C_TXFRAG		/* handle tx frags */
540 		;
541 	/*
542 	 * Query the hal to figure out h/w crypto support.
543 	 */
544 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
545 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
546 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
547 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
548 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
549 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
550 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
551 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
552 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
553 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
554 		/*
555 		 * Check if h/w does the MIC and/or whether the
556 		 * separate key cache entries are required to
557 		 * handle both tx+rx MIC keys.
558 		 */
559 		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
560 			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
561 		/*
562 		 * If the h/w supports storing tx+rx MIC keys
563 		 * in one cache slot automatically enable use.
564 		 */
565 		if (ath_hal_hastkipsplit(ah) ||
566 		    !ath_hal_settkipsplit(ah, AH_FALSE))
567 			sc->sc_splitmic = 1;
568 		/*
569 		 * If the h/w can do TKIP MIC together with WME then
570 		 * we use it; otherwise we force the MIC to be done
571 		 * in software by the net80211 layer.
572 		 */
573 		if (ath_hal_haswmetkipmic(ah))
574 			sc->sc_wmetkipmic = 1;
575 	}
576 	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
577 	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
578 	/*
579 	 * Mark key cache slots associated with global keys
580 	 * as in use.  If we knew TKIP was not to be used we
581 	 * could leave the +32, +64, and +32+64 slots free.
582 	 */
583 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
584 		setbit(sc->sc_keymap, i);
585 		setbit(sc->sc_keymap, i+64);
586 		if (sc->sc_splitmic) {
587 			setbit(sc->sc_keymap, i+32);
588 			setbit(sc->sc_keymap, i+32+64);
589 		}
590 	}
591 	/*
592 	 * TPC support can be done either with a global cap or
593 	 * per-packet support.  The latter is not available on
594 	 * all parts.  We're a bit pedantic here as all parts
595 	 * support a global cap.
596 	 */
597 	if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
598 		ic->ic_caps |= IEEE80211_C_TXPMGT;
599 
600 	/*
601 	 * Mark WME capability only if we have sufficient
602 	 * hardware queues to do proper priority scheduling.
603 	 */
604 	if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
605 		ic->ic_caps |= IEEE80211_C_WME;
606 	/*
607 	 * Check for misc other capabilities.
608 	 */
609 	if (ath_hal_hasbursting(ah))
610 		ic->ic_caps |= IEEE80211_C_BURST;
611 	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
612 	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
613 	if (ath_hal_hasfastframes(ah))
614 		ic->ic_caps |= IEEE80211_C_FF;
615 	wmodes = ath_hal_getwirelessmodes(ah, ic->ic_regdomain.country);
616 	if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
617 		ic->ic_caps |= IEEE80211_C_TURBOP;
618 
619 	/*
620 	 * Indicate we need the 802.11 header padded to a
621 	 * 32-bit boundary for 4-address and QoS frames.
622 	 */
623 	ic->ic_flags |= IEEE80211_F_DATAPAD;
624 
625 	/*
626 	 * Query the hal about antenna support.
627 	 */
628 	sc->sc_defant = ath_hal_getdefantenna(ah);
629 
630 	/*
631 	 * Not all chips have the VEOL support we want to
632 	 * use with IBSS beacons; check here for it.
633 	 */
634 	sc->sc_hasveol = ath_hal_hasveol(ah);
635 
636 	/* get mac address from hardware */
637 	ath_hal_getmac(ah, ic->ic_myaddr);
638 	if (sc->sc_hasbmask)
639 		ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
640 
641 	/* NB: used to size node table key mapping array */
642 	ic->ic_max_keyix = sc->sc_keymax;
643 	/* call MI attach routine. */
644 	ieee80211_ifattach(ic);
645 	ic->ic_setregdomain = ath_setregdomain;
646 	ic->ic_getradiocaps = ath_getradiocaps;
647 	sc->sc_opmode = HAL_M_STA;
648 
649 	/* override default methods */
650 	ic->ic_newassoc = ath_newassoc;
651 	ic->ic_updateslot = ath_updateslot;
652 	ic->ic_wme.wme_update = ath_wme_update;
653 	ic->ic_vap_create = ath_vap_create;
654 	ic->ic_vap_delete = ath_vap_delete;
655 	ic->ic_raw_xmit = ath_raw_xmit;
656 	ic->ic_update_mcast = ath_update_mcast;
657 	ic->ic_update_promisc = ath_update_promisc;
658 	ic->ic_node_alloc = ath_node_alloc;
659 	sc->sc_node_free = ic->ic_node_free;
660 	ic->ic_node_free = ath_node_free;
661 	ic->ic_node_getsignal = ath_node_getsignal;
662 	ic->ic_scan_start = ath_scan_start;
663 	ic->ic_scan_end = ath_scan_end;
664 	ic->ic_set_channel = ath_set_channel;
665 
666 	ath_bpfattach(sc);
667 	/*
668 	 * Setup dynamic sysctl's now that country code and
669 	 * regdomain are available from the hal.
670 	 */
671 	ath_sysctlattach(sc);
672 
673 	if (bootverbose)
674 		ieee80211_announce(ic);
675 	ath_announce(sc);
676 	return 0;
677 bad2:
678 	ath_tx_cleanup(sc);
679 	ath_desc_free(sc);
680 bad:
681 	if (ah)
682 		ath_hal_detach(ah);
683 	if (ifp != NULL)
684 		if_free(ifp);
685 	sc->sc_invalid = 1;
686 	return error;
687 }
688 
689 int
690 ath_detach(struct ath_softc *sc)
691 {
692 	struct ifnet *ifp = sc->sc_ifp;
693 
694 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
695 		__func__, ifp->if_flags);
696 
697 	/*
698 	 * NB: the order of these is important:
699 	 * o stop the chip so no more interrupts will fire
700 	 * o call the 802.11 layer before detaching the hal to
701 	 *   insure callbacks into the driver to delete global
702 	 *   key cache entries can be handled
703 	 * o free the taskqueue which drains any pending tasks
704 	 * o reclaim the bpf tap now that we know nothing will use
705 	 *   it (e.g. rx processing from the task q thread)
706 	 * o reclaim the tx queue data structures after calling
707 	 *   the 802.11 layer as we'll get called back to reclaim
708 	 *   node state and potentially want to use them
709 	 * o to cleanup the tx queues the hal is called, so detach
710 	 *   it last
711 	 * Other than that, it's straightforward...
712 	 */
713 	ath_stop(ifp);
714 	ieee80211_ifdetach(ifp->if_l2com);
715 	taskqueue_free(sc->sc_tq);
716 	bpfdetach(ifp);
717 #ifdef ATH_TX99_DIAG
718 	if (sc->sc_tx99 != NULL)
719 		sc->sc_tx99->detach(sc->sc_tx99);
720 #endif
721 	ath_rate_detach(sc->sc_rc);
722 	ath_desc_free(sc);
723 	ath_tx_cleanup(sc);
724 	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
725 	if_free(ifp);
726 
727 	return 0;
728 }
729 
730 /*
731  * MAC address handling for multiple BSS on the same radio.
732  * The first vap uses the MAC address from the EEPROM.  For
733  * subsequent vap's we set the U/L bit (bit 1) in the MAC
734  * address and use the next six bits as an index.
735  */
736 static void
737 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
738 {
739 	int i;
740 
741 	if (clone && sc->sc_hasbmask) {
742 		/* NB: we only do this if h/w supports multiple bssid */
743 		for (i = 0; i < 8; i++)
744 			if ((sc->sc_bssidmask & (1<<i)) == 0)
745 				break;
746 		if (i != 0)
747 			mac[0] |= (i << 2)|0x2;
748 	} else
749 		i = 0;
750 	sc->sc_bssidmask |= 1<<i;
751 	sc->sc_hwbssidmask[0] &= ~mac[0];
752 	if (i == 0)
753 		sc->sc_nbssid0++;
754 }
755 
756 static void
757 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
758 {
759 	int i = mac[0] >> 2;
760 	uint8_t mask;
761 
762 	if (i != 0 || --sc->sc_nbssid0 == 0) {
763 		sc->sc_bssidmask &= ~(1<<i);
764 		/* recalculate bssid mask from remaining addresses */
765 		mask = 0xff;
766 		for (i = 1; i < 8; i++)
767 			if (sc->sc_bssidmask & (1<<i))
768 				mask &= ~((i<<2)|0x2);
769 		sc->sc_hwbssidmask[0] |= mask;
770 	}
771 }
772 
773 /*
774  * Assign a beacon xmit slot.  We try to space out
775  * assignments so when beacons are staggered the
776  * traffic coming out of the cab q has maximal time
777  * to go out before the next beacon is scheduled.
778  */
779 static int
780 assign_bslot(struct ath_softc *sc)
781 {
782 	u_int slot, free;
783 
784 	free = 0;
785 	for (slot = 0; slot < ATH_BCBUF; slot++)
786 		if (sc->sc_bslot[slot] == NULL) {
787 			if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
788 			    sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
789 				return slot;
790 			free = slot;
791 			/* NB: keep looking for a double slot */
792 		}
793 	return free;
794 }
795 
796 static struct ieee80211vap *
797 ath_vap_create(struct ieee80211com *ic,
798 	const char name[IFNAMSIZ], int unit, int opmode, int flags,
799 	const uint8_t bssid[IEEE80211_ADDR_LEN],
800 	const uint8_t mac0[IEEE80211_ADDR_LEN])
801 {
802 	struct ath_softc *sc = ic->ic_ifp->if_softc;
803 	struct ath_vap *avp;
804 	struct ieee80211vap *vap;
805 	uint8_t mac[IEEE80211_ADDR_LEN];
806 	int ic_opmode, needbeacon, error;
807 
808 	avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
809 	    M_80211_VAP, M_WAITOK | M_ZERO);
810 	needbeacon = 0;
811 	IEEE80211_ADDR_COPY(mac, mac0);
812 
813 	ATH_LOCK(sc);
814 	switch (opmode) {
815 	case IEEE80211_M_STA:
816 		if (sc->sc_nstavaps != 0) {	/* XXX only 1 sta for now */
817 			device_printf(sc->sc_dev, "only 1 sta vap supported\n");
818 			goto bad;
819 		}
820 		if (sc->sc_nvaps) {
821 			/*
822 			 * When there are multiple vaps we must fall
823 			 * back to s/w beacon miss handling.
824 			 */
825 			flags |= IEEE80211_CLONE_NOBEACONS;
826 		}
827 		if (flags & IEEE80211_CLONE_NOBEACONS) {
828 			sc->sc_swbmiss = 1;
829 			ic_opmode = IEEE80211_M_HOSTAP;
830 		} else
831 			ic_opmode = opmode;
832 		break;
833 	case IEEE80211_M_IBSS:
834 		if (sc->sc_nvaps != 0) {	/* XXX only 1 for now */
835 			device_printf(sc->sc_dev,
836 			    "only 1 ibss vap supported\n");
837 			goto bad;
838 		}
839 		ic_opmode = opmode;
840 		needbeacon = 1;
841 		break;
842 	case IEEE80211_M_AHDEMO:
843 		/* fall thru... */
844 	case IEEE80211_M_MONITOR:
845 		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
846 			/* XXX not right for monitor mode */
847 			ic_opmode = ic->ic_opmode;
848 		} else
849 			ic_opmode = opmode;
850 		break;
851 	case IEEE80211_M_HOSTAP:
852 		needbeacon = 1;
853 		/* fall thru... */
854 	case IEEE80211_M_WDS:
855 		if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) {
856 			device_printf(sc->sc_dev,
857 			    "wds not supported in sta mode\n");
858 			goto bad;
859 		}
860 		if (opmode == IEEE80211_M_WDS) {
861 			/*
862 			 * Silently remove any request for a unique
863 			 * bssid; WDS vap's always share the local
864 			 * mac address.
865 			 */
866 			flags &= ~IEEE80211_CLONE_BSSID;
867 		}
868 		ic_opmode = IEEE80211_M_HOSTAP;
869 		break;
870 	default:
871 		device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
872 		goto bad;
873 	}
874 	/*
875 	 * Check that a beacon buffer is available; the code below assumes it.
876 	 */
877 	if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
878 		device_printf(sc->sc_dev, "no beacon buffer available\n");
879 		goto bad;
880 	}
881 
882 	/* STA, AHDEMO? */
883 	if (opmode == IEEE80211_M_HOSTAP) {
884 		assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
885 		ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
886 	}
887 
888 	vap = &avp->av_vap;
889 	/* XXX can't hold mutex across if_alloc */
890 	ATH_UNLOCK(sc);
891 	error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
892 	    bssid, mac);
893 	ATH_LOCK(sc);
894 	if (error != 0) {
895 		device_printf(sc->sc_dev, "%s: error %d creating vap\n",
896 		    __func__, error);
897 		goto bad2;
898 	}
899 
900 	/* h/w crypto support */
901 	vap->iv_key_alloc = ath_key_alloc;
902 	vap->iv_key_delete = ath_key_delete;
903 	vap->iv_key_set = ath_key_set;
904 	vap->iv_key_update_begin = ath_key_update_begin;
905 	vap->iv_key_update_end = ath_key_update_end;
906 
907 	/* override various methods */
908 	avp->av_recv_mgmt = vap->iv_recv_mgmt;
909 	vap->iv_recv_mgmt = ath_recv_mgmt;
910 	vap->iv_reset = ath_reset_vap;
911 	vap->iv_update_beacon = ath_beacon_update;
912 	avp->av_newstate = vap->iv_newstate;
913 	vap->iv_newstate = ath_newstate;
914 	avp->av_bmiss = vap->iv_bmiss;
915 	vap->iv_bmiss = ath_bmiss_vap;
916 
917 	avp->av_bslot = -1;
918 	if (needbeacon) {
919 		/*
920 		 * Allocate beacon state and setup the q for buffered
921 		 * multicast frames.  We know a beacon buffer is
922 		 * available because we checked above.
923 		 */
924 		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
925 		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
926 		if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
927 			/*
928 			 * Assign the vap to a beacon xmit slot.  As above
929 			 * this cannot fail to find a free one.
930 			 */
931 			avp->av_bslot = assign_bslot(sc);
932 			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
933 			    ("beacon slot %u not empty", avp->av_bslot));
934 			sc->sc_bslot[avp->av_bslot] = vap;
935 			sc->sc_nbcnvaps++;
936 		}
937 		if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
938 			/*
939 			 * Multple vaps are to transmit beacons and we
940 			 * have h/w support for TSF adjusting; enable
941 			 * use of staggered beacons.
942 			 */
943 			sc->sc_stagbeacons = 1;
944 		}
945 		ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
946 	}
947 
948 	ic->ic_opmode = ic_opmode;
949 	if (opmode != IEEE80211_M_WDS) {
950 		sc->sc_nvaps++;
951 		if (opmode == IEEE80211_M_STA)
952 			sc->sc_nstavaps++;
953 	}
954 	switch (ic_opmode) {
955 	case IEEE80211_M_IBSS:
956 		sc->sc_opmode = HAL_M_IBSS;
957 		break;
958 	case IEEE80211_M_STA:
959 		sc->sc_opmode = HAL_M_STA;
960 		break;
961 	case IEEE80211_M_AHDEMO:
962 	case IEEE80211_M_HOSTAP:
963 		sc->sc_opmode = HAL_M_HOSTAP;
964 		break;
965 	case IEEE80211_M_MONITOR:
966 		sc->sc_opmode = HAL_M_MONITOR;
967 		break;
968 	default:
969 		/* XXX should not happen */
970 		break;
971 	}
972 	if (sc->sc_hastsfadd) {
973 		/*
974 		 * Configure whether or not TSF adjust should be done.
975 		 */
976 		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
977 	}
978 	ATH_UNLOCK(sc);
979 
980 	/* complete setup */
981 	ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
982 	return vap;
983 bad2:
984 	reclaim_address(sc, mac);
985 	ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
986 bad:
987 	free(avp, M_80211_VAP);
988 	ATH_UNLOCK(sc);
989 	return NULL;
990 }
991 
992 static void
993 ath_vap_delete(struct ieee80211vap *vap)
994 {
995 	struct ieee80211com *ic = vap->iv_ic;
996 	struct ifnet *ifp = ic->ic_ifp;
997 	struct ath_softc *sc = ifp->if_softc;
998 	struct ath_hal *ah = sc->sc_ah;
999 	struct ath_vap *avp = ATH_VAP(vap);
1000 
1001 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1002 		/*
1003 		 * Quiesce the hardware while we remove the vap.  In
1004 		 * particular we need to reclaim all references to
1005 		 * the vap state by any frames pending on the tx queues.
1006 		 */
1007 		ath_hal_intrset(ah, 0);		/* disable interrupts */
1008 		ath_draintxq(sc);		/* stop xmit side */
1009 		ath_stoprecv(sc);		/* stop recv side */
1010 	}
1011 
1012 	ieee80211_vap_detach(vap);
1013 	ATH_LOCK(sc);
1014 	/*
1015 	 * Reclaim beacon state.  Note this must be done before
1016 	 * the vap instance is reclaimed as we may have a reference
1017 	 * to it in the buffer for the beacon frame.
1018 	 */
1019 	if (avp->av_bcbuf != NULL) {
1020 		if (avp->av_bslot != -1) {
1021 			sc->sc_bslot[avp->av_bslot] = NULL;
1022 			sc->sc_nbcnvaps--;
1023 		}
1024 		ath_beacon_return(sc, avp->av_bcbuf);
1025 		avp->av_bcbuf = NULL;
1026 		if (sc->sc_nbcnvaps == 0) {
1027 			sc->sc_stagbeacons = 0;
1028 			if (sc->sc_hastsfadd)
1029 				ath_hal_settsfadjust(sc->sc_ah, 0);
1030 		}
1031 		/*
1032 		 * Reclaim any pending mcast frames for the vap.
1033 		 */
1034 		ath_tx_draintxq(sc, &avp->av_mcastq);
1035 		ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1036 	}
1037 	/*
1038 	 * Update bookkeeping.
1039 	 */
1040 	if (vap->iv_opmode == IEEE80211_M_STA) {
1041 		sc->sc_nstavaps--;
1042 		if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1043 			sc->sc_swbmiss = 0;
1044 	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1045 		reclaim_address(sc, vap->iv_myaddr);
1046 		ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1047 	}
1048 	if (vap->iv_opmode != IEEE80211_M_WDS)
1049 		sc->sc_nvaps--;
1050 	ATH_UNLOCK(sc);
1051 	free(avp, M_80211_VAP);
1052 
1053 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1054 		/*
1055 		 * Restart rx+tx machines if still running (RUNNING will
1056 		 * be reset if we just destroyed the last vap).
1057 		 */
1058 		if (ath_startrecv(sc) != 0)
1059 			if_printf(ifp, "%s: unable to restart recv logic\n",
1060 			    __func__);
1061 		if (sc->sc_beacons)
1062 			ath_beacon_config(sc, NULL);
1063 		ath_hal_intrset(ah, sc->sc_imask);
1064 	}
1065 }
1066 
1067 void
1068 ath_suspend(struct ath_softc *sc)
1069 {
1070 	struct ifnet *ifp = sc->sc_ifp;
1071 	struct ieee80211com *ic = ifp->if_l2com;
1072 
1073 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1074 		__func__, ifp->if_flags);
1075 
1076 	sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1077 	if (ic->ic_opmode == IEEE80211_M_STA)
1078 		ath_stop(ifp);
1079 	else
1080 		ieee80211_suspend_all(ic);
1081 	/*
1082 	 * NB: don't worry about putting the chip in low power
1083 	 * mode; pci will power off our socket on suspend and
1084 	 * cardbus detaches the device.
1085 	 */
1086 }
1087 
1088 /*
1089  * Reset the key cache since some parts do not reset the
1090  * contents on resume.  First we clear all entries, then
1091  * re-load keys that the 802.11 layer assumes are setup
1092  * in h/w.
1093  */
1094 static void
1095 ath_reset_keycache(struct ath_softc *sc)
1096 {
1097 	struct ifnet *ifp = sc->sc_ifp;
1098 	struct ieee80211com *ic = ifp->if_l2com;
1099 	struct ath_hal *ah = sc->sc_ah;
1100 	int i;
1101 
1102 	for (i = 0; i < sc->sc_keymax; i++)
1103 		ath_hal_keyreset(ah, i);
1104 	ieee80211_crypto_reload_keys(ic);
1105 }
1106 
1107 void
1108 ath_resume(struct ath_softc *sc)
1109 {
1110 	struct ifnet *ifp = sc->sc_ifp;
1111 	struct ieee80211com *ic = ifp->if_l2com;
1112 	struct ath_hal *ah = sc->sc_ah;
1113 	HAL_STATUS status;
1114 
1115 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1116 		__func__, ifp->if_flags);
1117 
1118 	/*
1119 	 * Must reset the chip before we reload the
1120 	 * keycache as we were powered down on suspend.
1121 	 */
1122 	ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status);
1123 	ath_reset_keycache(sc);
1124 	if (sc->sc_resume_up) {
1125 		if (ic->ic_opmode == IEEE80211_M_STA) {
1126 			ath_init(sc);
1127 			ieee80211_beacon_miss(ic);
1128 		} else
1129 			ieee80211_resume_all(ic);
1130 	}
1131 	if (sc->sc_softled) {
1132 		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
1133 		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1134 	}
1135 }
1136 
1137 void
1138 ath_shutdown(struct ath_softc *sc)
1139 {
1140 	struct ifnet *ifp = sc->sc_ifp;
1141 
1142 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1143 		__func__, ifp->if_flags);
1144 
1145 	ath_stop(ifp);
1146 	/* NB: no point powering down chip as we're about to reboot */
1147 }
1148 
1149 /*
1150  * Interrupt handler.  Most of the actual processing is deferred.
1151  */
1152 void
1153 ath_intr(void *arg)
1154 {
1155 	struct ath_softc *sc = arg;
1156 	struct ifnet *ifp = sc->sc_ifp;
1157 	struct ath_hal *ah = sc->sc_ah;
1158 	HAL_INT status;
1159 
1160 	if (sc->sc_invalid) {
1161 		/*
1162 		 * The hardware is not ready/present, don't touch anything.
1163 		 * Note this can happen early on if the IRQ is shared.
1164 		 */
1165 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1166 		return;
1167 	}
1168 	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
1169 		return;
1170 	if ((ifp->if_flags & IFF_UP) == 0 ||
1171 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1172 		HAL_INT status;
1173 
1174 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1175 			__func__, ifp->if_flags);
1176 		ath_hal_getisr(ah, &status);	/* clear ISR */
1177 		ath_hal_intrset(ah, 0);		/* disable further intr's */
1178 		return;
1179 	}
1180 	/*
1181 	 * Figure out the reason(s) for the interrupt.  Note
1182 	 * that the hal returns a pseudo-ISR that may include
1183 	 * bits we haven't explicitly enabled so we mask the
1184 	 * value to insure we only process bits we requested.
1185 	 */
1186 	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
1187 	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1188 	status &= sc->sc_imask;			/* discard unasked for bits */
1189 	if (status & HAL_INT_FATAL) {
1190 		sc->sc_stats.ast_hardware++;
1191 		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
1192 		ath_fatal_proc(sc, 0);
1193 	} else {
1194 		if (status & HAL_INT_SWBA) {
1195 			/*
1196 			 * Software beacon alert--time to send a beacon.
1197 			 * Handle beacon transmission directly; deferring
1198 			 * this is too slow to meet timing constraints
1199 			 * under load.
1200 			 */
1201 			ath_beacon_proc(sc, 0);
1202 		}
1203 		if (status & HAL_INT_RXEOL) {
1204 			/*
1205 			 * NB: the hardware should re-read the link when
1206 			 *     RXE bit is written, but it doesn't work at
1207 			 *     least on older hardware revs.
1208 			 */
1209 			sc->sc_stats.ast_rxeol++;
1210 			sc->sc_rxlink = NULL;
1211 		}
1212 		if (status & HAL_INT_TXURN) {
1213 			sc->sc_stats.ast_txurn++;
1214 			/* bump tx trigger level */
1215 			ath_hal_updatetxtriglevel(ah, AH_TRUE);
1216 		}
1217 		if (status & HAL_INT_RX)
1218 			taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1219 		if (status & HAL_INT_TX)
1220 			taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1221 		if (status & HAL_INT_BMISS) {
1222 			sc->sc_stats.ast_bmiss++;
1223 			taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1224 		}
1225 		if (status & HAL_INT_MIB) {
1226 			sc->sc_stats.ast_mib++;
1227 			/*
1228 			 * Disable interrupts until we service the MIB
1229 			 * interrupt; otherwise it will continue to fire.
1230 			 */
1231 			ath_hal_intrset(ah, 0);
1232 			/*
1233 			 * Let the hal handle the event.  We assume it will
1234 			 * clear whatever condition caused the interrupt.
1235 			 */
1236 			ath_hal_mibevent(ah, &sc->sc_halstats);
1237 			ath_hal_intrset(ah, sc->sc_imask);
1238 		}
1239 		if (status & HAL_INT_RXORN) {
1240 			/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1241 			sc->sc_stats.ast_rxorn++;
1242 		}
1243 	}
1244 }
1245 
1246 static void
1247 ath_fatal_proc(void *arg, int pending)
1248 {
1249 	struct ath_softc *sc = arg;
1250 	struct ifnet *ifp = sc->sc_ifp;
1251 	u_int32_t *state;
1252 	u_int32_t len;
1253 	void *sp;
1254 
1255 	if_printf(ifp, "hardware error; resetting\n");
1256 	/*
1257 	 * Fatal errors are unrecoverable.  Typically these
1258 	 * are caused by DMA errors.  Collect h/w state from
1259 	 * the hal so we can diagnose what's going on.
1260 	 */
1261 	if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1262 		KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1263 		state = sp;
1264 		if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1265 		    state[0], state[1] , state[2], state[3],
1266 		    state[4], state[5]);
1267 	}
1268 	ath_reset(ifp);
1269 }
1270 
1271 static void
1272 ath_bmiss_vap(struct ieee80211vap *vap)
1273 {
1274 	struct ifnet *ifp = vap->iv_ic->ic_ifp;
1275 	struct ath_softc *sc = ifp->if_softc;
1276 	u_int64_t lastrx = sc->sc_lastrx;
1277 	u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1278 	u_int bmisstimeout =
1279 		vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1280 
1281 	DPRINTF(sc, ATH_DEBUG_BEACON,
1282 	    "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1283 	    __func__, (unsigned long long) tsf,
1284 	    (unsigned long long)(tsf - lastrx),
1285 	    (unsigned long long) lastrx, bmisstimeout);
1286 	/*
1287 	 * Workaround phantom bmiss interrupts by sanity-checking
1288 	 * the time of our last rx'd frame.  If it is within the
1289 	 * beacon miss interval then ignore the interrupt.  If it's
1290 	 * truly a bmiss we'll get another interrupt soon and that'll
1291 	 * be dispatched up for processing.
1292 	 */
1293 	if (tsf - lastrx > bmisstimeout)
1294 		ATH_VAP(vap)->av_bmiss(vap);
1295 	else
1296 		sc->sc_stats.ast_bmiss_phantom++;
1297 }
1298 
1299 static int
1300 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1301 {
1302 	uint32_t rsize;
1303 	void *sp;
1304 
1305 	if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(&mask), &sp, &rsize))
1306 		return 0;
1307 	KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1308 	*hangs = *(uint32_t *)sp;
1309 	return 1;
1310 }
1311 
1312 static void
1313 ath_bmiss_proc(void *arg, int pending)
1314 {
1315 	struct ath_softc *sc = arg;
1316 	struct ifnet *ifp = sc->sc_ifp;
1317 	uint32_t hangs;
1318 
1319 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1320 
1321 	if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1322 		if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1323 		ath_reset(ifp);
1324 	} else
1325 		ieee80211_beacon_miss(ifp->if_l2com);
1326 }
1327 
1328 /*
1329  * Convert net80211 channel to a HAL channel with the flags
1330  * constrained to reflect the current operating mode and
1331  * the frequency possibly mapped for GSM channels.
1332  */
1333 static void
1334 ath_mapchan(const struct ieee80211com *ic,
1335 	HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
1336 {
1337 #define	N(a)	(sizeof(a) / sizeof(a[0]))
1338 	static const u_int modeflags[IEEE80211_MODE_MAX] = {
1339 		0,			/* IEEE80211_MODE_AUTO */
1340 		CHANNEL_A,		/* IEEE80211_MODE_11A */
1341 		CHANNEL_B,		/* IEEE80211_MODE_11B */
1342 		CHANNEL_PUREG,		/* IEEE80211_MODE_11G */
1343 		0,			/* IEEE80211_MODE_FH */
1344 		CHANNEL_108A,		/* IEEE80211_MODE_TURBO_A */
1345 		CHANNEL_108G,		/* IEEE80211_MODE_TURBO_G */
1346 		CHANNEL_ST,		/* IEEE80211_MODE_STURBO_A */
1347 		CHANNEL_A,		/* IEEE80211_MODE_11NA */
1348 		CHANNEL_PUREG,		/* IEEE80211_MODE_11NG */
1349 	};
1350 	enum ieee80211_phymode mode = ieee80211_chan2mode(chan);
1351 
1352 	KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode));
1353 	KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode));
1354 	hc->channelFlags = modeflags[mode];
1355 	if (IEEE80211_IS_CHAN_HALF(chan))
1356 		hc->channelFlags |= CHANNEL_HALF;
1357 	if (IEEE80211_IS_CHAN_QUARTER(chan))
1358 		hc->channelFlags |= CHANNEL_QUARTER;
1359 	if (IEEE80211_IS_CHAN_HT20(chan))
1360 		hc->channelFlags |= CHANNEL_HT20;
1361 	if (IEEE80211_IS_CHAN_HT40D(chan))
1362 		hc->channelFlags |= CHANNEL_HT40MINUS;
1363 	if (IEEE80211_IS_CHAN_HT40U(chan))
1364 		hc->channelFlags |= CHANNEL_HT40PLUS;
1365 
1366 	if (IEEE80211_IS_CHAN_GSM(chan)) {
1367 		if (ic->ic_regdomain.country == CTRY_XR9)
1368 			hc->channel = 1520 + chan->ic_freq;
1369 		else if (ic->ic_regdomain.country == CTRY_GZ901)
1370 			hc->channel = 1544 + chan->ic_freq;
1371 		else
1372 			hc->channel = 3344 - chan->ic_freq;
1373 	} else
1374 		hc->channel = chan->ic_freq;
1375 #undef N
1376 }
1377 
1378 /*
1379  * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1380  * calcs together with WME.  If necessary disable the crypto
1381  * hardware and mark the 802.11 state so keys will be setup
1382  * with the MIC work done in software.
1383  */
1384 static void
1385 ath_settkipmic(struct ath_softc *sc)
1386 {
1387 	struct ifnet *ifp = sc->sc_ifp;
1388 	struct ieee80211com *ic = ifp->if_l2com;
1389 
1390 	if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1391 		if (ic->ic_flags & IEEE80211_F_WME) {
1392 			ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1393 			ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1394 		} else {
1395 			ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1396 			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1397 		}
1398 	}
1399 }
1400 
1401 static void
1402 ath_init(void *arg)
1403 {
1404 	struct ath_softc *sc = (struct ath_softc *) arg;
1405 	struct ifnet *ifp = sc->sc_ifp;
1406 	struct ieee80211com *ic = ifp->if_l2com;
1407 	struct ath_hal *ah = sc->sc_ah;
1408 	HAL_STATUS status;
1409 
1410 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1411 		__func__, ifp->if_flags);
1412 
1413 	ATH_LOCK(sc);
1414 	/*
1415 	 * Stop anything previously setup.  This is safe
1416 	 * whether this is the first time through or not.
1417 	 */
1418 	ath_stop_locked(ifp);
1419 
1420 	/*
1421 	 * The basic interface to setting the hardware in a good
1422 	 * state is ``reset''.  On return the hardware is known to
1423 	 * be powered up and with interrupts disabled.  This must
1424 	 * be followed by initialization of the appropriate bits
1425 	 * and then setup of the interrupt mask.
1426 	 */
1427 	ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan);
1428 	ath_settkipmic(sc);
1429 	if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) {
1430 		if_printf(ifp, "unable to reset hardware; hal status %u\n",
1431 			status);
1432 		ATH_UNLOCK(sc);
1433 		return;
1434 	}
1435 	ath_chan_change(sc, ic->ic_curchan);
1436 
1437 	/*
1438 	 * Likewise this is set during reset so update
1439 	 * state cached in the driver.
1440 	 */
1441 	sc->sc_diversity = ath_hal_getdiversity(ah);
1442 	sc->sc_lastlongcal = 0;
1443 	sc->sc_resetcal = 1;
1444 	sc->sc_lastcalreset = 0;
1445 
1446 	/*
1447 	 * Setup the hardware after reset: the key cache
1448 	 * is filled as needed and the receive engine is
1449 	 * set going.  Frame transmit is handled entirely
1450 	 * in the frame output path; there's nothing to do
1451 	 * here except setup the interrupt mask.
1452 	 */
1453 	if (ath_startrecv(sc) != 0) {
1454 		if_printf(ifp, "unable to start recv logic\n");
1455 		ATH_UNLOCK(sc);
1456 		return;
1457 	}
1458 
1459 	/*
1460 	 * Enable interrupts.
1461 	 */
1462 	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1463 		  | HAL_INT_RXEOL | HAL_INT_RXORN
1464 		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
1465 	/*
1466 	 * Enable MIB interrupts when there are hardware phy counters.
1467 	 * Note we only do this (at the moment) for station mode.
1468 	 */
1469 	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1470 		sc->sc_imask |= HAL_INT_MIB;
1471 
1472 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1473 	ath_hal_intrset(ah, sc->sc_imask);
1474 
1475 	ATH_UNLOCK(sc);
1476 
1477 #ifdef ATH_TX99_DIAG
1478 	if (sc->sc_tx99 != NULL)
1479 		sc->sc_tx99->start(sc->sc_tx99);
1480 	else
1481 #endif
1482 	ieee80211_start_all(ic);		/* start all vap's */
1483 }
1484 
1485 static void
1486 ath_stop_locked(struct ifnet *ifp)
1487 {
1488 	struct ath_softc *sc = ifp->if_softc;
1489 	struct ath_hal *ah = sc->sc_ah;
1490 
1491 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1492 		__func__, sc->sc_invalid, ifp->if_flags);
1493 
1494 	ATH_LOCK_ASSERT(sc);
1495 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1496 		/*
1497 		 * Shutdown the hardware and driver:
1498 		 *    reset 802.11 state machine
1499 		 *    turn off timers
1500 		 *    disable interrupts
1501 		 *    turn off the radio
1502 		 *    clear transmit machinery
1503 		 *    clear receive machinery
1504 		 *    drain and release tx queues
1505 		 *    reclaim beacon resources
1506 		 *    power down hardware
1507 		 *
1508 		 * Note that some of this work is not possible if the
1509 		 * hardware is gone (invalid).
1510 		 */
1511 #ifdef ATH_TX99_DIAG
1512 		if (sc->sc_tx99 != NULL)
1513 			sc->sc_tx99->stop(sc->sc_tx99);
1514 #endif
1515 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1516 		ifp->if_timer = 0;
1517 		if (!sc->sc_invalid) {
1518 			if (sc->sc_softled) {
1519 				callout_stop(&sc->sc_ledtimer);
1520 				ath_hal_gpioset(ah, sc->sc_ledpin,
1521 					!sc->sc_ledon);
1522 				sc->sc_blinking = 0;
1523 			}
1524 			ath_hal_intrset(ah, 0);
1525 		}
1526 		ath_draintxq(sc);
1527 		if (!sc->sc_invalid) {
1528 			ath_stoprecv(sc);
1529 			ath_hal_phydisable(ah);
1530 		} else
1531 			sc->sc_rxlink = NULL;
1532 		ath_beacon_free(sc);	/* XXX not needed */
1533 	}
1534 }
1535 
1536 static void
1537 ath_stop(struct ifnet *ifp)
1538 {
1539 	struct ath_softc *sc = ifp->if_softc;
1540 
1541 	ATH_LOCK(sc);
1542 	ath_stop_locked(ifp);
1543 	ATH_UNLOCK(sc);
1544 }
1545 
1546 /*
1547  * Reset the hardware w/o losing operational state.  This is
1548  * basically a more efficient way of doing ath_stop, ath_init,
1549  * followed by state transitions to the current 802.11
1550  * operational state.  Used to recover from various errors and
1551  * to reset or reload hardware state.
1552  */
1553 static int
1554 ath_reset(struct ifnet *ifp)
1555 {
1556 	struct ath_softc *sc = ifp->if_softc;
1557 	struct ieee80211com *ic = ifp->if_l2com;
1558 	struct ath_hal *ah = sc->sc_ah;
1559 	HAL_STATUS status;
1560 
1561 	/*
1562 	 * Convert to a HAL channel description with the flags
1563 	 * constrained to reflect the current operating mode.
1564 	 */
1565 	ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan);
1566 
1567 	ath_hal_intrset(ah, 0);		/* disable interrupts */
1568 	ath_draintxq(sc);		/* stop xmit side */
1569 	ath_stoprecv(sc);		/* stop recv side */
1570 	ath_settkipmic(sc);		/* configure TKIP MIC handling */
1571 	/* NB: indicate channel change so we do a full reset */
1572 	if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status))
1573 		if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1574 			__func__, status);
1575 	sc->sc_diversity = ath_hal_getdiversity(ah);
1576 	if (ath_startrecv(sc) != 0)	/* restart recv */
1577 		if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1578 	/*
1579 	 * We may be doing a reset in response to an ioctl
1580 	 * that changes the channel so update any state that
1581 	 * might change as a result.
1582 	 */
1583 	ath_chan_change(sc, ic->ic_curchan);
1584 	if (sc->sc_beacons)
1585 		ath_beacon_config(sc, NULL);	/* restart beacons */
1586 	ath_hal_intrset(ah, sc->sc_imask);
1587 
1588 	ath_start(ifp);			/* restart xmit */
1589 	return 0;
1590 }
1591 
1592 static int
1593 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1594 {
1595 	struct ieee80211com *ic = vap->iv_ic;
1596 	struct ifnet *ifp = ic->ic_ifp;
1597 	struct ath_softc *sc = ifp->if_softc;
1598 	struct ath_hal *ah = sc->sc_ah;
1599 
1600 	switch (cmd) {
1601 	case IEEE80211_IOC_TXPOWER:
1602 		/*
1603 		 * If per-packet TPC is enabled, then we have nothing
1604 		 * to do; otherwise we need to force the global limit.
1605 		 * All this can happen directly; no need to reset.
1606 		 */
1607 		if (!ath_hal_gettpc(ah))
1608 			ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1609 		return 0;
1610 	}
1611 	return ath_reset(ifp);
1612 }
1613 
1614 static int
1615 ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
1616 {
1617 	return 0;
1618 }
1619 
1620 #if 0
1621 static int
1622 ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
1623 {
1624 	return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
1625 }
1626 #endif
1627 
1628 /*
1629  * Flush FF staging queue.
1630  */
1631 static void
1632 ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
1633 	int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
1634 {
1635 	struct ath_buf *bf;
1636 	struct ieee80211_node *ni;
1637 	int pktlen, pri;
1638 
1639 	for (;;) {
1640 		ATH_TXQ_LOCK(txq);
1641 		/*
1642 		 * Go from the back (oldest) to front so we can
1643 		 * stop early based on the age of the entry.
1644 		 */
1645 		bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
1646 		if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
1647 			ATH_TXQ_UNLOCK(txq);
1648 			break;
1649 		}
1650 
1651 		ni = bf->bf_node;
1652 		pri = M_WME_GETAC(bf->bf_m);
1653 		KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
1654 			("no bf on staging queue %p", bf));
1655 		ATH_NODE(ni)->an_ff_buf[pri] = NULL;
1656 		TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
1657 
1658 		ATH_TXQ_UNLOCK(txq);
1659 
1660 		DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
1661 			__func__, bf->bf_age);
1662 
1663 		sc->sc_stats.ast_ff_flush++;
1664 
1665 		/* encap and xmit */
1666 		bf->bf_m = ieee80211_encap(ni, bf->bf_m);
1667 		if (bf->bf_m == NULL) {
1668 			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1669 				"%s: discard, encapsulation failure\n",
1670 				__func__);
1671 			sc->sc_stats.ast_tx_encap++;
1672 			goto bad;
1673 		}
1674 		pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
1675 		if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
1676 #if 0 /*XXX*/
1677 			ifp->if_opackets++;
1678 #endif
1679 			continue;
1680 		}
1681 	bad:
1682 		if (ni != NULL)
1683 			ieee80211_free_node(ni);
1684 		bf->bf_node = NULL;
1685 		if (bf->bf_m != NULL) {
1686 			m_freem(bf->bf_m);
1687 			bf->bf_m = NULL;
1688 		}
1689 
1690 		ATH_TXBUF_LOCK(sc);
1691 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1692 		ATH_TXBUF_UNLOCK(sc);
1693 	}
1694 }
1695 
1696 static __inline u_int32_t
1697 ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
1698 {
1699 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1700 	u_int32_t framelen;
1701 	struct ath_buf *bf;
1702 
1703 	/*
1704 	 * Approximate the frame length to be transmitted. A swag to add
1705 	 * the following maximal values to the skb payload:
1706 	 *   - 32: 802.11 encap + CRC
1707 	 *   - 24: encryption overhead (if wep bit)
1708 	 *   - 4 + 6: fast-frame header and padding
1709 	 *   - 16: 2 LLC FF tunnel headers
1710 	 *   - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
1711 	 */
1712 	framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
1713 	if (ic->ic_flags & IEEE80211_F_PRIVACY)
1714 		framelen += 24;
1715 	bf = an->an_ff_buf[M_WME_GETAC(m)];
1716 	if (bf != NULL)
1717 		framelen += bf->bf_m->m_pkthdr.len;
1718 	return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
1719 			sc->sc_lastdatarix, AH_FALSE);
1720 }
1721 
1722 /*
1723  * Determine if a data frame may be aggregated via ff tunnelling.
1724  * Note the caller is responsible for checking if the destination
1725  * supports fast frames.
1726  *
1727  *  NB: allowing EAPOL frames to be aggregated with other unicast traffic.
1728  *      Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
1729  *      be aggregated with other types of frames when encryption is on?
1730  *
1731  *  NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
1732  */
1733 static __inline int
1734 ath_ff_can_aggregate(struct ath_softc *sc,
1735 	struct ath_node *an, struct mbuf *m, int *flushq)
1736 {
1737 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1738 	struct ath_txq *txq;
1739 	u_int32_t txoplimit;
1740 	u_int pri;
1741 
1742 	*flushq = 0;
1743 
1744 	/*
1745 	 * If there is no frame to combine with and the txq has
1746 	 * fewer frames than the minimum required; then do not
1747 	 * attempt to aggregate this frame.
1748 	 */
1749 	pri = M_WME_GETAC(m);
1750 	txq = sc->sc_ac2q[pri];
1751 	if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
1752 		return 0;
1753 	/*
1754 	 * When not in station mode never aggregate a multicast
1755 	 * frame; this insures, for example, that a combined frame
1756 	 * does not require multiple encryption keys when using
1757 	 * 802.1x/WPA.
1758 	 */
1759 	if (ic->ic_opmode != IEEE80211_M_STA &&
1760 	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
1761 		return 0;
1762 	/*
1763 	 * Consult the max bursting interval to insure a combined
1764 	 * frame fits within the TxOp window.
1765 	 */
1766 	txoplimit = IEEE80211_TXOP_TO_US(
1767 		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
1768 	if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
1769 		DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1770 			"%s: FF TxOp violation\n", __func__);
1771 		if (an->an_ff_buf[pri] != NULL)
1772 			*flushq = 1;
1773 		return 0;
1774 	}
1775 	return 1;		/* try to aggregate */
1776 }
1777 
1778 /*
1779  * Check if the supplied frame can be partnered with an existing
1780  * or pending frame.  Return a reference to any frame that should be
1781  * sent on return; otherwise return NULL.
1782  */
1783 static struct mbuf *
1784 ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
1785 	struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
1786 {
1787 	struct ath_node *an = ATH_NODE(ni);
1788 	struct ath_buf *bfstaged;
1789 	int ff_flush, pri;
1790 
1791 	/*
1792 	 * Check if the supplied frame can be aggregated.
1793 	 *
1794 	 * NB: we use the txq lock to protect references to
1795 	 *     an->an_ff_txbuf in ath_ff_can_aggregate().
1796 	 */
1797 	ATH_TXQ_LOCK(txq);
1798 	pri = M_WME_GETAC(m);
1799 	if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
1800 		struct ath_buf *bfstaged = an->an_ff_buf[pri];
1801 		if (bfstaged != NULL) {
1802 			/*
1803 			 * A frame is available for partnering; remove
1804 			 * it, chain it to this one, and encapsulate.
1805 			 */
1806 			an->an_ff_buf[pri] = NULL;
1807 			TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1808 			ATH_TXQ_UNLOCK(txq);
1809 
1810 			/*
1811 			 * Chain mbufs and add FF magic.
1812 			 */
1813 			DPRINTF(sc, ATH_DEBUG_FF,
1814 				"[%s] aggregate fast-frame, age %u\n",
1815 				ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1816 			m->m_nextpkt = NULL;
1817 			bfstaged->bf_m->m_nextpkt = m;
1818 			m = bfstaged->bf_m;
1819 			bfstaged->bf_m = NULL;
1820 			m->m_flags |= M_FF;
1821 			/*
1822 			 * Release the node reference held while
1823 			 * the packet sat on an_ff_buf[]
1824 			 */
1825 			bfstaged->bf_node = NULL;
1826 			ieee80211_free_node(ni);
1827 
1828 			/*
1829 			 * Return bfstaged to the free list.
1830 			 */
1831 			ATH_TXBUF_LOCK(sc);
1832 			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list);
1833 			ATH_TXBUF_UNLOCK(sc);
1834 
1835 			return m;		/* ready to go */
1836 		} else {
1837 			/*
1838 			 * No frame available, queue this frame to wait
1839 			 * for a partner.  Note that we hold the buffer
1840 			 * and a reference to the node; we need the
1841 			 * buffer in particular so we're certain we
1842 			 * can flush the frame at a later time.
1843 			 */
1844 			DPRINTF(sc, ATH_DEBUG_FF,
1845 				"[%s] stage fast-frame, age %u\n",
1846 				ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1847 
1848 			bf->bf_m = m;
1849 			bf->bf_node = ni;	/* NB: held reference */
1850 			bf->bf_age = txq->axq_curage;
1851 			an->an_ff_buf[pri] = bf;
1852 			TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
1853 			ATH_TXQ_UNLOCK(txq);
1854 
1855 			return NULL;		/* consumed */
1856 		}
1857 	}
1858 	/*
1859 	 * Frame could not be aggregated, it needs to be returned
1860 	 * to the caller for immediate transmission.  In addition
1861 	 * we check if we should first flush a frame from the
1862 	 * staging queue before sending this one.
1863 	 *
1864 	 * NB: ath_ff_can_aggregate only marks ff_flush if a frame
1865 	 *     is present to flush.
1866 	 */
1867 	if (ff_flush) {
1868 		int pktlen;
1869 
1870 		bfstaged = an->an_ff_buf[pri];
1871 		an->an_ff_buf[pri] = NULL;
1872 		TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1873 		ATH_TXQ_UNLOCK(txq);
1874 
1875 		DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
1876 			ether_sprintf(an->an_node.ni_macaddr));
1877 
1878 		/* encap and xmit */
1879 		bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m);
1880 		if (bfstaged->bf_m == NULL) {
1881 			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1882 				"%s: discard, encap failure\n", __func__);
1883 			sc->sc_stats.ast_tx_encap++;
1884 			goto ff_flushbad;
1885 		}
1886 		pktlen = bfstaged->bf_m->m_pkthdr.len;
1887 		if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
1888 			DPRINTF(sc, ATH_DEBUG_XMIT,
1889 				"%s: discard, xmit failure\n", __func__);
1890 	ff_flushbad:
1891 			/*
1892 			 * Unable to transmit frame that was on the staging
1893 			 * queue.  Reclaim the node reference and other
1894 			 * resources.
1895 			 */
1896 			if (ni != NULL)
1897 				ieee80211_free_node(ni);
1898 			bfstaged->bf_node = NULL;
1899 			if (bfstaged->bf_m != NULL) {
1900 				m_freem(bfstaged->bf_m);
1901 				bfstaged->bf_m = NULL;
1902 			}
1903 
1904 			ATH_TXBUF_LOCK(sc);
1905 			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list);
1906 			ATH_TXBUF_UNLOCK(sc);
1907 		} else {
1908 #if 0
1909 			ifp->if_opackets++;
1910 #endif
1911 		}
1912 	} else {
1913 		if (an->an_ff_buf[pri] != NULL) {
1914 			/*
1915 			 * XXX: out-of-order condition only occurs for AP
1916 			 * mode and multicast.  There may be no valid way
1917 			 * to get this condition.
1918 			 */
1919 			DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
1920 				ether_sprintf(an->an_node.ni_macaddr));
1921 			/* XXX stat */
1922 		}
1923 		ATH_TXQ_UNLOCK(txq);
1924 	}
1925 	return m;
1926 }
1927 
1928 /*
1929  * Cleanup driver resources when we run out of buffers
1930  * while processing fragments; return the tx buffers
1931  * allocated and drop node references.
1932  */
1933 static void
1934 ath_txfrag_cleanup(struct ath_softc *sc,
1935 	ath_bufhead *frags, struct ieee80211_node *ni)
1936 {
1937 	struct ath_buf *bf, *next;
1938 
1939 	ATH_TXBUF_LOCK_ASSERT(sc);
1940 
1941 	STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
1942 		/* NB: bf assumed clean */
1943 		STAILQ_REMOVE_HEAD(frags, bf_list);
1944 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1945 		ieee80211_node_decref(ni);
1946 	}
1947 }
1948 
1949 /*
1950  * Setup xmit of a fragmented frame.  Allocate a buffer
1951  * for each frag and bump the node reference count to
1952  * reflect the held reference to be setup by ath_tx_start.
1953  */
1954 static int
1955 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
1956 	struct mbuf *m0, struct ieee80211_node *ni)
1957 {
1958 	struct mbuf *m;
1959 	struct ath_buf *bf;
1960 
1961 	ATH_TXBUF_LOCK(sc);
1962 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
1963 		bf = STAILQ_FIRST(&sc->sc_txbuf);
1964 		if (bf == NULL) {	/* out of buffers, cleanup */
1965 			ath_txfrag_cleanup(sc, frags, ni);
1966 			break;
1967 		}
1968 		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1969 		ieee80211_node_incref(ni);
1970 		STAILQ_INSERT_TAIL(frags, bf, bf_list);
1971 	}
1972 	ATH_TXBUF_UNLOCK(sc);
1973 
1974 	return !STAILQ_EMPTY(frags);
1975 }
1976 
1977 static void
1978 ath_start(struct ifnet *ifp)
1979 {
1980 	struct ath_softc *sc = ifp->if_softc;
1981 	struct ieee80211com *ic = ifp->if_l2com;
1982 	struct ieee80211_node *ni;
1983 	struct ath_buf *bf;
1984 	struct mbuf *m, *next;
1985 	struct ath_txq *txq;
1986 	ath_bufhead frags;
1987 	int pri;
1988 
1989 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1990 		return;
1991 	for (;;) {
1992 		/*
1993 		 * Grab a TX buffer and associated resources.
1994 		 */
1995 		ATH_TXBUF_LOCK(sc);
1996 		bf = STAILQ_FIRST(&sc->sc_txbuf);
1997 		if (bf != NULL)
1998 			STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1999 		ATH_TXBUF_UNLOCK(sc);
2000 		if (bf == NULL) {
2001 			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n",
2002 				__func__);
2003 			sc->sc_stats.ast_tx_qstop++;
2004 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2005 			break;
2006 		}
2007 
2008 		IFQ_DEQUEUE(&ifp->if_snd, m);
2009 		if (m == NULL) {
2010 			ATH_TXBUF_LOCK(sc);
2011 			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
2012 			ATH_TXBUF_UNLOCK(sc);
2013 			break;
2014 		}
2015 		STAILQ_INIT(&frags);
2016 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2017 		pri = M_WME_GETAC(m);
2018 		txq = sc->sc_ac2q[pri];
2019 		if (IEEE80211_ATH_CAP(ni->ni_vap, ni, IEEE80211_NODE_FF)) {
2020 			/*
2021 			 * Check queue length; if too deep drop this
2022 			 * frame (tail drop considered good).
2023 			 */
2024 			if (txq->axq_depth >= sc->sc_fftxqmax) {
2025 				DPRINTF(sc, ATH_DEBUG_FF,
2026 				    "[%s] tail drop on q %u depth %u\n",
2027 				    ether_sprintf(ni->ni_macaddr),
2028 				    txq->axq_qnum, txq->axq_depth);
2029 				sc->sc_stats.ast_tx_qfull++;
2030 				m_freem(m);
2031 				goto reclaim;
2032 			}
2033 			m = ath_ff_check(sc, txq, bf, m, ni);
2034 			if (m == NULL) {
2035 				/* NB: ni ref & bf held on stageq */
2036 				continue;
2037 			}
2038 		}
2039 		ifp->if_opackets++;
2040 		/*
2041 		 * Encapsulate the packet in prep for transmission.
2042 		 */
2043 		m = ieee80211_encap(ni, m);
2044 		if (m == NULL) {
2045 			DPRINTF(sc, ATH_DEBUG_XMIT,
2046 			    "%s: encapsulation failure\n", __func__);
2047 			sc->sc_stats.ast_tx_encap++;
2048 			goto bad;
2049 		}
2050 		/*
2051 		 * Check for fragmentation.  If this frame
2052 		 * has been broken up verify we have enough
2053 		 * buffers to send all the fragments so all
2054 		 * go out or none...
2055 		 */
2056 		if ((m->m_flags & M_FRAG) &&
2057 		    !ath_txfrag_setup(sc, &frags, m, ni)) {
2058 			DPRINTF(sc, ATH_DEBUG_XMIT,
2059 			    "%s: out of txfrag buffers\n", __func__);
2060 			ic->ic_stats.is_tx_nobuf++;	/* XXX */
2061 			ath_freetx(m);
2062 			goto bad;
2063 		}
2064 	nextfrag:
2065 		/*
2066 		 * Pass the frame to the h/w for transmission.
2067 		 * Fragmented frames have each frag chained together
2068 		 * with m_nextpkt.  We know there are sufficient ath_buf's
2069 		 * to send all the frags because of work done by
2070 		 * ath_txfrag_setup.  We leave m_nextpkt set while
2071 		 * calling ath_tx_start so it can use it to extend the
2072 		 * the tx duration to cover the subsequent frag and
2073 		 * so it can reclaim all the mbufs in case of an error;
2074 		 * ath_tx_start clears m_nextpkt once it commits to
2075 		 * handing the frame to the hardware.
2076 		 */
2077 		next = m->m_nextpkt;
2078 		if (ath_tx_start(sc, ni, bf, m)) {
2079 	bad:
2080 			ifp->if_oerrors++;
2081 	reclaim:
2082 			bf->bf_m = NULL;
2083 			bf->bf_node = NULL;
2084 			ATH_TXBUF_LOCK(sc);
2085 			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
2086 			ath_txfrag_cleanup(sc, &frags, ni);
2087 			ATH_TXBUF_UNLOCK(sc);
2088 			if (ni != NULL)
2089 				ieee80211_free_node(ni);
2090 			continue;
2091 		}
2092 		if (next != NULL) {
2093 			/*
2094 			 * Beware of state changing between frags.
2095 			 * XXX check sta power-save state?
2096 			 */
2097 			if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2098 				DPRINTF(sc, ATH_DEBUG_XMIT,
2099 				    "%s: flush fragmented packet, state %s\n",
2100 				    __func__,
2101 				    ieee80211_state_name[ni->ni_vap->iv_state]);
2102 				ath_freetx(next);
2103 				goto reclaim;
2104 			}
2105 			m = next;
2106 			bf = STAILQ_FIRST(&frags);
2107 			KASSERT(bf != NULL, ("no buf for txfrag"));
2108 			STAILQ_REMOVE_HEAD(&frags, bf_list);
2109 			goto nextfrag;
2110 		}
2111 
2112 		ifp->if_timer = 5;
2113 #if 0
2114 		/*
2115 		 * Flush stale frames from the fast-frame staging queue.
2116 		 */
2117 		if (ic->ic_opmode != IEEE80211_M_STA)
2118 			ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
2119 #endif
2120 	}
2121 }
2122 
2123 static int
2124 ath_media_change(struct ifnet *ifp)
2125 {
2126 	int error = ieee80211_media_change(ifp);
2127 	/* NB: only the fixed rate can change and that doesn't need a reset */
2128 	return (error == ENETRESET ? 0 : error);
2129 }
2130 
2131 #ifdef ATH_DEBUG
2132 static void
2133 ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
2134 	const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2135 {
2136 	static const char *ciphers[] = {
2137 		"WEP",
2138 		"AES-OCB",
2139 		"AES-CCM",
2140 		"CKIP",
2141 		"TKIP",
2142 		"CLR",
2143 	};
2144 	int i, n;
2145 
2146 	printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
2147 	for (i = 0, n = hk->kv_len; i < n; i++)
2148 		printf("%02x", hk->kv_val[i]);
2149 	printf(" mac %s", ether_sprintf(mac));
2150 	if (hk->kv_type == HAL_CIPHER_TKIP) {
2151 		printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
2152 		for (i = 0; i < sizeof(hk->kv_mic); i++)
2153 			printf("%02x", hk->kv_mic[i]);
2154 		if (!sc->sc_splitmic) {
2155 			printf(" txmic ");
2156 			for (i = 0; i < sizeof(hk->kv_txmic); i++)
2157 				printf("%02x", hk->kv_txmic[i]);
2158 		}
2159 	}
2160 	printf("\n");
2161 }
2162 #endif
2163 
2164 /*
2165  * Set a TKIP key into the hardware.  This handles the
2166  * potential distribution of key state to multiple key
2167  * cache slots for TKIP.
2168  */
2169 static int
2170 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
2171 	HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2172 {
2173 #define	IEEE80211_KEY_XR	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
2174 	static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
2175 	struct ath_hal *ah = sc->sc_ah;
2176 
2177 	KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
2178 		("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
2179 	if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
2180 		if (sc->sc_splitmic) {
2181 			/*
2182 			 * TX key goes at first index, RX key at the rx index.
2183 			 * The hal handles the MIC keys at index+64.
2184 			 */
2185 			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
2186 			KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
2187 			if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
2188 				return 0;
2189 
2190 			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2191 			KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
2192 			/* XXX delete tx key on failure? */
2193 			return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
2194 		} else {
2195 			/*
2196 			 * Room for both TX+RX MIC keys in one key cache
2197 			 * slot, just set key at the first index; the hal
2198 			 * will handle the rest.
2199 			 */
2200 			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2201 			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2202 			KEYPRINTF(sc, k->wk_keyix, hk, mac);
2203 			return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2204 		}
2205 	} else if (k->wk_flags & IEEE80211_KEY_XMIT) {
2206 		if (sc->sc_splitmic) {
2207 			/*
2208 			 * NB: must pass MIC key in expected location when
2209 			 * the keycache only holds one MIC key per entry.
2210 			 */
2211 			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2212 		} else
2213 			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2214 		KEYPRINTF(sc, k->wk_keyix, hk, mac);
2215 		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2216 	} else if (k->wk_flags & IEEE80211_KEY_RECV) {
2217 		memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2218 		KEYPRINTF(sc, k->wk_keyix, hk, mac);
2219 		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2220 	}
2221 	return 0;
2222 #undef IEEE80211_KEY_XR
2223 }
2224 
2225 /*
2226  * Set a net80211 key into the hardware.  This handles the
2227  * potential distribution of key state to multiple key
2228  * cache slots for TKIP with hardware MIC support.
2229  */
2230 static int
2231 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
2232 	struct ieee80211_node *bss)
2233 {
2234 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2235 	static const u_int8_t ciphermap[] = {
2236 		HAL_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
2237 		HAL_CIPHER_TKIP,	/* IEEE80211_CIPHER_TKIP */
2238 		HAL_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
2239 		HAL_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
2240 		(u_int8_t) -1,		/* 4 is not allocated */
2241 		HAL_CIPHER_CKIP,	/* IEEE80211_CIPHER_CKIP */
2242 		HAL_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
2243 	};
2244 	struct ath_hal *ah = sc->sc_ah;
2245 	const struct ieee80211_cipher *cip = k->wk_cipher;
2246 	u_int8_t gmac[IEEE80211_ADDR_LEN];
2247 	const u_int8_t *mac;
2248 	HAL_KEYVAL hk;
2249 
2250 	memset(&hk, 0, sizeof(hk));
2251 	/*
2252 	 * Software crypto uses a "clear key" so non-crypto
2253 	 * state kept in the key cache are maintained and
2254 	 * so that rx frames have an entry to match.
2255 	 */
2256 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2257 		KASSERT(cip->ic_cipher < N(ciphermap),
2258 			("invalid cipher type %u", cip->ic_cipher));
2259 		hk.kv_type = ciphermap[cip->ic_cipher];
2260 		hk.kv_len = k->wk_keylen;
2261 		memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2262 	} else
2263 		hk.kv_type = HAL_CIPHER_CLR;
2264 
2265 	if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2266 		/*
2267 		 * Group keys on hardware that supports multicast frame
2268 		 * key search use a mac that is the sender's address with
2269 		 * the high bit set instead of the app-specified address.
2270 		 */
2271 		IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2272 		gmac[0] |= 0x80;
2273 		mac = gmac;
2274 	} else
2275 		mac = k->wk_macaddr;
2276 
2277 	if (hk.kv_type == HAL_CIPHER_TKIP &&
2278 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2279 		return ath_keyset_tkip(sc, k, &hk, mac);
2280 	} else {
2281 		KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2282 		return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2283 	}
2284 #undef N
2285 }
2286 
2287 /*
2288  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2289  * each key, one for decrypt/encrypt and the other for the MIC.
2290  */
2291 static u_int16_t
2292 key_alloc_2pair(struct ath_softc *sc,
2293 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2294 {
2295 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2296 	u_int i, keyix;
2297 
2298 	KASSERT(sc->sc_splitmic, ("key cache !split"));
2299 	/* XXX could optimize */
2300 	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2301 		u_int8_t b = sc->sc_keymap[i];
2302 		if (b != 0xff) {
2303 			/*
2304 			 * One or more slots in this byte are free.
2305 			 */
2306 			keyix = i*NBBY;
2307 			while (b & 1) {
2308 		again:
2309 				keyix++;
2310 				b >>= 1;
2311 			}
2312 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2313 			if (isset(sc->sc_keymap, keyix+32) ||
2314 			    isset(sc->sc_keymap, keyix+64) ||
2315 			    isset(sc->sc_keymap, keyix+32+64)) {
2316 				/* full pair unavailable */
2317 				/* XXX statistic */
2318 				if (keyix == (i+1)*NBBY) {
2319 					/* no slots were appropriate, advance */
2320 					continue;
2321 				}
2322 				goto again;
2323 			}
2324 			setbit(sc->sc_keymap, keyix);
2325 			setbit(sc->sc_keymap, keyix+64);
2326 			setbit(sc->sc_keymap, keyix+32);
2327 			setbit(sc->sc_keymap, keyix+32+64);
2328 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2329 				"%s: key pair %u,%u %u,%u\n",
2330 				__func__, keyix, keyix+64,
2331 				keyix+32, keyix+32+64);
2332 			*txkeyix = keyix;
2333 			*rxkeyix = keyix+32;
2334 			return 1;
2335 		}
2336 	}
2337 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2338 	return 0;
2339 #undef N
2340 }
2341 
2342 /*
2343  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2344  * each key, one for decrypt/encrypt and the other for the MIC.
2345  */
2346 static u_int16_t
2347 key_alloc_pair(struct ath_softc *sc,
2348 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2349 {
2350 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2351 	u_int i, keyix;
2352 
2353 	KASSERT(!sc->sc_splitmic, ("key cache split"));
2354 	/* XXX could optimize */
2355 	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2356 		u_int8_t b = sc->sc_keymap[i];
2357 		if (b != 0xff) {
2358 			/*
2359 			 * One or more slots in this byte are free.
2360 			 */
2361 			keyix = i*NBBY;
2362 			while (b & 1) {
2363 		again:
2364 				keyix++;
2365 				b >>= 1;
2366 			}
2367 			if (isset(sc->sc_keymap, keyix+64)) {
2368 				/* full pair unavailable */
2369 				/* XXX statistic */
2370 				if (keyix == (i+1)*NBBY) {
2371 					/* no slots were appropriate, advance */
2372 					continue;
2373 				}
2374 				goto again;
2375 			}
2376 			setbit(sc->sc_keymap, keyix);
2377 			setbit(sc->sc_keymap, keyix+64);
2378 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2379 				"%s: key pair %u,%u\n",
2380 				__func__, keyix, keyix+64);
2381 			*txkeyix = *rxkeyix = keyix;
2382 			return 1;
2383 		}
2384 	}
2385 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2386 	return 0;
2387 #undef N
2388 }
2389 
2390 /*
2391  * Allocate a single key cache slot.
2392  */
2393 static int
2394 key_alloc_single(struct ath_softc *sc,
2395 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2396 {
2397 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2398 	u_int i, keyix;
2399 
2400 	/* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2401 	for (i = 0; i < N(sc->sc_keymap); i++) {
2402 		u_int8_t b = sc->sc_keymap[i];
2403 		if (b != 0xff) {
2404 			/*
2405 			 * One or more slots are free.
2406 			 */
2407 			keyix = i*NBBY;
2408 			while (b & 1)
2409 				keyix++, b >>= 1;
2410 			setbit(sc->sc_keymap, keyix);
2411 			DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2412 				__func__, keyix);
2413 			*txkeyix = *rxkeyix = keyix;
2414 			return 1;
2415 		}
2416 	}
2417 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2418 	return 0;
2419 #undef N
2420 }
2421 
2422 /*
2423  * Allocate one or more key cache slots for a uniacst key.  The
2424  * key itself is needed only to identify the cipher.  For hardware
2425  * TKIP with split cipher+MIC keys we allocate two key cache slot
2426  * pairs so that we can setup separate TX and RX MIC keys.  Note
2427  * that the MIC key for a TKIP key at slot i is assumed by the
2428  * hardware to be at slot i+64.  This limits TKIP keys to the first
2429  * 64 entries.
2430  */
2431 static int
2432 ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
2433 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2434 {
2435 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2436 
2437 	/*
2438 	 * Group key allocation must be handled specially for
2439 	 * parts that do not support multicast key cache search
2440 	 * functionality.  For those parts the key id must match
2441 	 * the h/w key index so lookups find the right key.  On
2442 	 * parts w/ the key search facility we install the sender's
2443 	 * mac address (with the high bit set) and let the hardware
2444 	 * find the key w/o using the key id.  This is preferred as
2445 	 * it permits us to support multiple users for adhoc and/or
2446 	 * multi-station operation.
2447 	 */
2448 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||	/* global key */
2449 	    ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) {
2450 		if (!(&vap->iv_nw_keys[0] <= k &&
2451 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
2452 			/* should not happen */
2453 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2454 				"%s: bogus group key\n", __func__);
2455 			return 0;
2456 		}
2457 		/*
2458 		 * XXX we pre-allocate the global keys so
2459 		 * have no way to check if they've already been allocated.
2460 		 */
2461 		*keyix = *rxkeyix = k - vap->iv_nw_keys;
2462 		return 1;
2463 	}
2464 
2465 	/*
2466 	 * We allocate two pair for TKIP when using the h/w to do
2467 	 * the MIC.  For everything else, including software crypto,
2468 	 * we allocate a single entry.  Note that s/w crypto requires
2469 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
2470 	 * not support pass-through cache entries and we map all
2471 	 * those requests to slot 0.
2472 	 */
2473 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2474 		return key_alloc_single(sc, keyix, rxkeyix);
2475 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2476 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2477 		if (sc->sc_splitmic)
2478 			return key_alloc_2pair(sc, keyix, rxkeyix);
2479 		else
2480 			return key_alloc_pair(sc, keyix, rxkeyix);
2481 	} else {
2482 		return key_alloc_single(sc, keyix, rxkeyix);
2483 	}
2484 }
2485 
2486 /*
2487  * Delete an entry in the key cache allocated by ath_key_alloc.
2488  */
2489 static int
2490 ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
2491 {
2492 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2493 	struct ath_hal *ah = sc->sc_ah;
2494 	const struct ieee80211_cipher *cip = k->wk_cipher;
2495 	u_int keyix = k->wk_keyix;
2496 
2497 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2498 
2499 	ath_hal_keyreset(ah, keyix);
2500 	/*
2501 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
2502 	 */
2503 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2504 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2505 		ath_hal_keyreset(ah, keyix+32);		/* RX key */
2506 	if (keyix >= IEEE80211_WEP_NKID) {
2507 		/*
2508 		 * Don't touch keymap entries for global keys so
2509 		 * they are never considered for dynamic allocation.
2510 		 */
2511 		clrbit(sc->sc_keymap, keyix);
2512 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2513 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2514 			clrbit(sc->sc_keymap, keyix+64);	/* TX key MIC */
2515 			if (sc->sc_splitmic) {
2516 				/* +32 for RX key, +32+64 for RX key MIC */
2517 				clrbit(sc->sc_keymap, keyix+32);
2518 				clrbit(sc->sc_keymap, keyix+32+64);
2519 			}
2520 		}
2521 	}
2522 	return 1;
2523 }
2524 
2525 /*
2526  * Set the key cache contents for the specified key.  Key cache
2527  * slot(s) must already have been allocated by ath_key_alloc.
2528  */
2529 static int
2530 ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2531 	const u_int8_t mac[IEEE80211_ADDR_LEN])
2532 {
2533 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2534 
2535 	return ath_keyset(sc, k, vap->iv_bss);
2536 }
2537 
2538 /*
2539  * Block/unblock tx+rx processing while a key change is done.
2540  * We assume the caller serializes key management operations
2541  * so we only need to worry about synchronization with other
2542  * uses that originate in the driver.
2543  */
2544 static void
2545 ath_key_update_begin(struct ieee80211vap *vap)
2546 {
2547 	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2548 	struct ath_softc *sc = ifp->if_softc;
2549 
2550 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2551 	taskqueue_block(sc->sc_tq);
2552 	IF_LOCK(&ifp->if_snd);		/* NB: doesn't block mgmt frames */
2553 }
2554 
2555 static void
2556 ath_key_update_end(struct ieee80211vap *vap)
2557 {
2558 	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2559 	struct ath_softc *sc = ifp->if_softc;
2560 
2561 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2562 	IF_UNLOCK(&ifp->if_snd);
2563 	taskqueue_unblock(sc->sc_tq);
2564 }
2565 
2566 /*
2567  * Calculate the receive filter according to the
2568  * operating mode and state:
2569  *
2570  * o always accept unicast, broadcast, and multicast traffic
2571  * o accept PHY error frames when hardware doesn't have MIB support
2572  *   to count and we need them for ANI (sta mode only until recently)
2573  *   and we are not scanning (ANI is disabled)
2574  *   NB: older hal's add rx filter bits out of sight and we need to
2575  *	 blindly preserve them
2576  * o probe request frames are accepted only when operating in
2577  *   hostap, adhoc, or monitor modes
2578  * o enable promiscuous mode
2579  *   - when in monitor mode
2580  *   - if interface marked PROMISC (assumes bridge setting is filtered)
2581  * o accept beacons:
2582  *   - when operating in station mode for collecting rssi data when
2583  *     the station is otherwise quiet, or
2584  *   - when operating in adhoc mode so the 802.11 layer creates
2585  *     node table entries for peers,
2586  *   - when scanning
2587  *   - when doing s/w beacon miss (e.g. for ap+sta)
2588  *   - when operating in ap mode in 11g to detect overlapping bss that
2589  *     require protection
2590  * o accept control frames:
2591  *   - when in monitor mode
2592  * XXX BAR frames for 11n
2593  * XXX HT protection for 11n
2594  */
2595 static u_int32_t
2596 ath_calcrxfilter(struct ath_softc *sc)
2597 {
2598 	struct ifnet *ifp = sc->sc_ifp;
2599 	struct ieee80211com *ic = ifp->if_l2com;
2600 	u_int32_t rfilt;
2601 
2602 	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2603 #if HAL_ABI_VERSION < 0x08011600
2604 	rfilt |= (ath_hal_getrxfilter(sc->sc_ah) &
2605 		(HAL_RX_FILTER_PHYRADAR | HAL_RX_FILTER_PHYERR));
2606 #elif HAL_ABI_VERSION < 0x08060100
2607 	if (ic->ic_opmode == IEEE80211_M_STA &&
2608 	    !sc->sc_needmib && !sc->sc_scanning)
2609 		rfilt |= HAL_RX_FILTER_PHYERR;
2610 #else
2611 	if (!sc->sc_needmib && !sc->sc_scanning)
2612 		rfilt |= HAL_RX_FILTER_PHYERR;
2613 #endif
2614 	if (ic->ic_opmode != IEEE80211_M_STA)
2615 		rfilt |= HAL_RX_FILTER_PROBEREQ;
2616 	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2617 		rfilt |= HAL_RX_FILTER_PROM;
2618 	if (ic->ic_opmode == IEEE80211_M_STA ||
2619 	    ic->ic_opmode == IEEE80211_M_IBSS ||
2620 	    sc->sc_swbmiss || sc->sc_scanning)
2621 		rfilt |= HAL_RX_FILTER_BEACON;
2622 	/*
2623 	 * NB: We don't recalculate the rx filter when
2624 	 * ic_protmode changes; otherwise we could do
2625 	 * this only when ic_protmode != NONE.
2626 	 */
2627 	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2628 	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2629 		rfilt |= HAL_RX_FILTER_BEACON;
2630 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2631 		rfilt |= HAL_RX_FILTER_CONTROL;
2632 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2633 	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2634 	return rfilt;
2635 }
2636 
2637 static void
2638 ath_update_promisc(struct ifnet *ifp)
2639 {
2640 	struct ath_softc *sc = ifp->if_softc;
2641 	u_int32_t rfilt;
2642 
2643 	/* configure rx filter */
2644 	rfilt = ath_calcrxfilter(sc);
2645 	ath_hal_setrxfilter(sc->sc_ah, rfilt);
2646 
2647 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2648 }
2649 
2650 static void
2651 ath_update_mcast(struct ifnet *ifp)
2652 {
2653 	struct ath_softc *sc = ifp->if_softc;
2654 	u_int32_t mfilt[2];
2655 
2656 	/* calculate and install multicast filter */
2657 	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2658 		struct ifmultiaddr *ifma;
2659 		/*
2660 		 * Merge multicast addresses to form the hardware filter.
2661 		 */
2662 		mfilt[0] = mfilt[1] = 0;
2663 		IF_ADDR_LOCK(ifp);	/* XXX need some fiddling to remove? */
2664 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2665 			caddr_t dl;
2666 			u_int32_t val;
2667 			u_int8_t pos;
2668 
2669 			/* calculate XOR of eight 6bit values */
2670 			dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2671 			val = LE_READ_4(dl + 0);
2672 			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2673 			val = LE_READ_4(dl + 3);
2674 			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2675 			pos &= 0x3f;
2676 			mfilt[pos / 32] |= (1 << (pos % 32));
2677 		}
2678 		IF_ADDR_UNLOCK(ifp);
2679 	} else
2680 		mfilt[0] = mfilt[1] = ~0;
2681 	ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2682 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2683 		__func__, mfilt[0], mfilt[1]);
2684 }
2685 
2686 static void
2687 ath_mode_init(struct ath_softc *sc)
2688 {
2689 	struct ifnet *ifp = sc->sc_ifp;
2690 	struct ieee80211com *ic = ifp->if_l2com;
2691 	struct ath_hal *ah = sc->sc_ah;
2692 	u_int32_t rfilt;
2693 
2694 	/* configure rx filter */
2695 	rfilt = ath_calcrxfilter(sc);
2696 	ath_hal_setrxfilter(ah, rfilt);
2697 
2698 	/* configure operational mode */
2699 	ath_hal_setopmode(ah);
2700 
2701 	/*
2702 	 * Handle any link-level address change.  Note that we only
2703 	 * need to force ic_myaddr; any other addresses are handled
2704 	 * as a byproduct of the ifnet code marking the interface
2705 	 * down then up.
2706 	 *
2707 	 * XXX should get from lladdr instead of arpcom but that's more work
2708 	 */
2709 	IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp));
2710 	ath_hal_setmac(ah, ic->ic_myaddr);
2711 
2712 	/* calculate and install multicast filter */
2713 	ath_update_mcast(ifp);
2714 }
2715 
2716 /*
2717  * Set the slot time based on the current setting.
2718  */
2719 static void
2720 ath_setslottime(struct ath_softc *sc)
2721 {
2722 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2723 	struct ath_hal *ah = sc->sc_ah;
2724 	u_int usec;
2725 
2726 	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2727 		usec = 13;
2728 	else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2729 		usec = 21;
2730 	else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2731 		/* honor short/long slot time only in 11g */
2732 		/* XXX shouldn't honor on pure g or turbo g channel */
2733 		if (ic->ic_flags & IEEE80211_F_SHSLOT)
2734 			usec = HAL_SLOT_TIME_9;
2735 		else
2736 			usec = HAL_SLOT_TIME_20;
2737 	} else
2738 		usec = HAL_SLOT_TIME_9;
2739 
2740 	DPRINTF(sc, ATH_DEBUG_RESET,
2741 	    "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2742 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2743 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2744 
2745 	ath_hal_setslottime(ah, usec);
2746 	sc->sc_updateslot = OK;
2747 }
2748 
2749 /*
2750  * Callback from the 802.11 layer to update the
2751  * slot time based on the current setting.
2752  */
2753 static void
2754 ath_updateslot(struct ifnet *ifp)
2755 {
2756 	struct ath_softc *sc = ifp->if_softc;
2757 	struct ieee80211com *ic = ifp->if_l2com;
2758 
2759 	/*
2760 	 * When not coordinating the BSS, change the hardware
2761 	 * immediately.  For other operation we defer the change
2762 	 * until beacon updates have propagated to the stations.
2763 	 */
2764 	if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2765 		sc->sc_updateslot = UPDATE;
2766 	else
2767 		ath_setslottime(sc);
2768 }
2769 
2770 /*
2771  * Setup a h/w transmit queue for beacons.
2772  */
2773 static int
2774 ath_beaconq_setup(struct ath_hal *ah)
2775 {
2776 	HAL_TXQ_INFO qi;
2777 
2778 	memset(&qi, 0, sizeof(qi));
2779 	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2780 	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2781 	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2782 	/* NB: for dynamic turbo, don't enable any other interrupts */
2783 	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2784 	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2785 }
2786 
2787 /*
2788  * Setup the transmit queue parameters for the beacon queue.
2789  */
2790 static int
2791 ath_beaconq_config(struct ath_softc *sc)
2792 {
2793 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<(v))-1)
2794 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2795 	struct ath_hal *ah = sc->sc_ah;
2796 	HAL_TXQ_INFO qi;
2797 
2798 	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2799 	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2800 		/*
2801 		 * Always burst out beacon and CAB traffic.
2802 		 */
2803 		qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2804 		qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2805 		qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2806 	} else {
2807 		struct wmeParams *wmep =
2808 			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2809 		/*
2810 		 * Adhoc mode; important thing is to use 2x cwmin.
2811 		 */
2812 		qi.tqi_aifs = wmep->wmep_aifsn;
2813 		qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2814 		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2815 	}
2816 
2817 	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2818 		device_printf(sc->sc_dev, "unable to update parameters for "
2819 			"beacon hardware queue!\n");
2820 		return 0;
2821 	} else {
2822 		ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2823 		return 1;
2824 	}
2825 #undef ATH_EXPONENT_TO_VALUE
2826 }
2827 
2828 /*
2829  * Allocate and setup an initial beacon frame.
2830  */
2831 static int
2832 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2833 {
2834 	struct ieee80211vap *vap = ni->ni_vap;
2835 	struct ath_vap *avp = ATH_VAP(vap);
2836 	struct ath_buf *bf;
2837 	struct mbuf *m;
2838 	int error;
2839 
2840 	bf = avp->av_bcbuf;
2841 	if (bf->bf_m != NULL) {
2842 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2843 		m_freem(bf->bf_m);
2844 		bf->bf_m = NULL;
2845 	}
2846 	if (bf->bf_node != NULL) {
2847 		ieee80211_free_node(bf->bf_node);
2848 		bf->bf_node = NULL;
2849 	}
2850 
2851 	/*
2852 	 * NB: the beacon data buffer must be 32-bit aligned;
2853 	 * we assume the mbuf routines will return us something
2854 	 * with this alignment (perhaps should assert).
2855 	 */
2856 	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2857 	if (m == NULL) {
2858 		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2859 		sc->sc_stats.ast_be_nombuf++;
2860 		return ENOMEM;
2861 	}
2862 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2863 				     bf->bf_segs, &bf->bf_nseg,
2864 				     BUS_DMA_NOWAIT);
2865 	if (error != 0) {
2866 		device_printf(sc->sc_dev,
2867 		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2868 		    __func__, error);
2869 		m_freem(m);
2870 		return error;
2871 	}
2872 
2873 	/*
2874 	 * Calculate a TSF adjustment factor required for staggered
2875 	 * beacons.  Note that we assume the format of the beacon
2876 	 * frame leaves the tstamp field immediately following the
2877 	 * header.
2878 	 */
2879 	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2880 		uint64_t tsfadjust;
2881 		struct ieee80211_frame *wh;
2882 
2883 		/*
2884 		 * The beacon interval is in TU's; the TSF is in usecs.
2885 		 * We figure out how many TU's to add to align the timestamp
2886 		 * then convert to TSF units and handle byte swapping before
2887 		 * inserting it in the frame.  The hardware will then add this
2888 		 * each time a beacon frame is sent.  Note that we align vap's
2889 		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
2890 		 * timestamp in one beacon interval while the others get a
2891 		 * timstamp aligned to the next interval.
2892 		 */
2893 		tsfadjust = ni->ni_intval *
2894 		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2895 		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */
2896 
2897 		DPRINTF(sc, ATH_DEBUG_BEACON,
2898 		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2899 		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2900 		    avp->av_bslot, ni->ni_intval,
2901 		    (long long unsigned) le64toh(tsfadjust));
2902 
2903 		wh = mtod(m, struct ieee80211_frame *);
2904 		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2905 	}
2906 	bf->bf_m = m;
2907 	bf->bf_node = ieee80211_ref_node(ni);
2908 
2909 	return 0;
2910 }
2911 
2912 /*
2913  * Setup the beacon frame for transmit.
2914  */
2915 static void
2916 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2917 {
2918 #define	USE_SHPREAMBLE(_ic) \
2919 	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2920 		== IEEE80211_F_SHPREAMBLE)
2921 	struct ieee80211_node *ni = bf->bf_node;
2922 	struct ieee80211com *ic = ni->ni_ic;
2923 	struct mbuf *m = bf->bf_m;
2924 	struct ath_hal *ah = sc->sc_ah;
2925 	struct ath_desc *ds;
2926 	int flags, antenna;
2927 	const HAL_RATE_TABLE *rt;
2928 	u_int8_t rix, rate;
2929 
2930 	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2931 		__func__, m, m->m_len);
2932 
2933 	/* setup descriptors */
2934 	ds = bf->bf_desc;
2935 
2936 	flags = HAL_TXDESC_NOACK;
2937 	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2938 		ds->ds_link = bf->bf_daddr;	/* self-linked */
2939 		flags |= HAL_TXDESC_VEOL;
2940 		/*
2941 		 * Let hardware handle antenna switching.
2942 		 */
2943 		antenna = sc->sc_txantenna;
2944 	} else {
2945 		ds->ds_link = 0;
2946 		/*
2947 		 * Switch antenna every 4 beacons.
2948 		 * XXX assumes two antenna
2949 		 */
2950 		if (sc->sc_txantenna != 0)
2951 			antenna = sc->sc_txantenna;
2952 		else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2953 			antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2954 		else
2955 			antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2956 	}
2957 
2958 	KASSERT(bf->bf_nseg == 1,
2959 		("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2960 	ds->ds_data = bf->bf_segs[0].ds_addr;
2961 	/*
2962 	 * Calculate rate code.
2963 	 * XXX everything at min xmit rate
2964 	 */
2965 	rix = 0;
2966 	rt = sc->sc_currates;
2967 	rate = rt->info[rix].rateCode;
2968 	if (USE_SHPREAMBLE(ic))
2969 		rate |= rt->info[rix].shortPreamble;
2970 	ath_hal_setuptxdesc(ah, ds
2971 		, m->m_len + IEEE80211_CRC_LEN	/* frame length */
2972 		, sizeof(struct ieee80211_frame)/* header length */
2973 		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
2974 		, ni->ni_txpower		/* txpower XXX */
2975 		, rate, 1			/* series 0 rate/tries */
2976 		, HAL_TXKEYIX_INVALID		/* no encryption */
2977 		, antenna			/* antenna mode */
2978 		, flags				/* no ack, veol for beacons */
2979 		, 0				/* rts/cts rate */
2980 		, 0				/* rts/cts duration */
2981 	);
2982 	/* NB: beacon's BufLen must be a multiple of 4 bytes */
2983 	ath_hal_filltxdesc(ah, ds
2984 		, roundup(m->m_len, 4)		/* buffer length */
2985 		, AH_TRUE			/* first segment */
2986 		, AH_TRUE			/* last segment */
2987 		, ds				/* first descriptor */
2988 	);
2989 #if 0
2990 	ath_desc_swap(ds);
2991 #endif
2992 #undef USE_SHPREAMBLE
2993 }
2994 
2995 static void
2996 ath_beacon_update(struct ieee80211vap *vap, int item)
2997 {
2998 	struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2999 
3000 	setbit(bo->bo_flags, item);
3001 }
3002 
3003 /*
3004  * Append the contents of src to dst; both queues
3005  * are assumed to be locked.
3006  */
3007 static void
3008 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3009 {
3010 	STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
3011 	dst->axq_link = src->axq_link;
3012 	src->axq_link = NULL;
3013 	dst->axq_depth += src->axq_depth;
3014 	src->axq_depth = 0;
3015 }
3016 
3017 /*
3018  * Transmit a beacon frame at SWBA.  Dynamic updates to the
3019  * frame contents are done as needed and the slot time is
3020  * also adjusted based on current state.
3021  */
3022 static void
3023 ath_beacon_proc(void *arg, int pending)
3024 {
3025 	struct ath_softc *sc = arg;
3026 	struct ath_hal *ah = sc->sc_ah;
3027 	struct ieee80211vap *vap;
3028 	struct ath_buf *bf;
3029 	int slot, otherant;
3030 	uint32_t bfaddr;
3031 
3032 	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
3033 		__func__, pending);
3034 	/*
3035 	 * Check if the previous beacon has gone out.  If
3036 	 * not don't try to post another, skip this period
3037 	 * and wait for the next.  Missed beacons indicate
3038 	 * a problem and should not occur.  If we miss too
3039 	 * many consecutive beacons reset the device.
3040 	 */
3041 	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
3042 		sc->sc_bmisscount++;
3043 		DPRINTF(sc, ATH_DEBUG_BEACON,
3044 			"%s: missed %u consecutive beacons\n",
3045 			__func__, sc->sc_bmisscount);
3046 		if (sc->sc_bmisscount > 3)		/* NB: 3 is a guess */
3047 			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
3048 		return;
3049 	}
3050 	if (sc->sc_bmisscount != 0) {
3051 		DPRINTF(sc, ATH_DEBUG_BEACON,
3052 			"%s: resume beacon xmit after %u misses\n",
3053 			__func__, sc->sc_bmisscount);
3054 		sc->sc_bmisscount = 0;
3055 	}
3056 
3057 	if (sc->sc_stagbeacons) {			/* staggered beacons */
3058 		struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3059 		uint32_t tsftu;
3060 
3061 		tsftu = ath_hal_gettsf32(ah) >> 10;
3062 		/* XXX lintval */
3063 		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
3064 		vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
3065 		bfaddr = 0;
3066 		if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3067 			bf = ath_beacon_generate(sc, vap);
3068 			if (bf != NULL)
3069 				bfaddr = bf->bf_daddr;
3070 		}
3071 	} else {					/* burst'd beacons */
3072 		uint32_t *bflink = &bfaddr;
3073 
3074 		for (slot = 0; slot < ATH_BCBUF; slot++) {
3075 			vap = sc->sc_bslot[slot];
3076 			if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3077 				bf = ath_beacon_generate(sc, vap);
3078 				if (bf != NULL) {
3079 					*bflink = bf->bf_daddr;
3080 					bflink = &bf->bf_desc->ds_link;
3081 				}
3082 			}
3083 		}
3084 		*bflink = 0;				/* terminate list */
3085 	}
3086 
3087 	/*
3088 	 * Handle slot time change when a non-ERP station joins/leaves
3089 	 * an 11g network.  The 802.11 layer notifies us via callback,
3090 	 * we mark updateslot, then wait one beacon before effecting
3091 	 * the change.  This gives associated stations at least one
3092 	 * beacon interval to note the state change.
3093 	 */
3094 	/* XXX locking */
3095 	if (sc->sc_updateslot == UPDATE) {
3096 		sc->sc_updateslot = COMMIT;	/* commit next beacon */
3097 		sc->sc_slotupdate = slot;
3098 	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
3099 		ath_setslottime(sc);		/* commit change to h/w */
3100 
3101 	/*
3102 	 * Check recent per-antenna transmit statistics and flip
3103 	 * the default antenna if noticeably more frames went out
3104 	 * on the non-default antenna.
3105 	 * XXX assumes 2 anntenae
3106 	 */
3107 	if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
3108 		otherant = sc->sc_defant & 1 ? 2 : 1;
3109 		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
3110 			ath_setdefantenna(sc, otherant);
3111 		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
3112 	}
3113 
3114 	if (bfaddr != 0) {
3115 		/*
3116 		 * Stop any current dma and put the new frame on the queue.
3117 		 * This should never fail since we check above that no frames
3118 		 * are still pending on the queue.
3119 		 */
3120 		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
3121 			DPRINTF(sc, ATH_DEBUG_ANY,
3122 				"%s: beacon queue %u did not stop?\n",
3123 				__func__, sc->sc_bhalq);
3124 		}
3125 		/* NB: cabq traffic should already be queued and primed */
3126 		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
3127 		ath_hal_txstart(ah, sc->sc_bhalq);
3128 
3129 		sc->sc_stats.ast_be_xmit++;
3130 	}
3131 }
3132 
3133 static struct ath_buf *
3134 ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
3135 {
3136 	struct ath_vap *avp = ATH_VAP(vap);
3137 	struct ath_txq *cabq = sc->sc_cabq;
3138 	struct ath_buf *bf;
3139 	struct mbuf *m;
3140 	int nmcastq, error;
3141 
3142 	KASSERT(vap->iv_state == IEEE80211_S_RUN,
3143 	    ("not running, state %d", vap->iv_state));
3144 	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3145 
3146 	/*
3147 	 * Update dynamic beacon contents.  If this returns
3148 	 * non-zero then we need to remap the memory because
3149 	 * the beacon frame changed size (probably because
3150 	 * of the TIM bitmap).
3151 	 */
3152 	bf = avp->av_bcbuf;
3153 	m = bf->bf_m;
3154 	nmcastq = avp->av_mcastq.axq_depth;
3155 	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
3156 		/* XXX too conservative? */
3157 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3158 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3159 					     bf->bf_segs, &bf->bf_nseg,
3160 					     BUS_DMA_NOWAIT);
3161 		if (error != 0) {
3162 			if_printf(vap->iv_ifp,
3163 			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3164 			    __func__, error);
3165 			return NULL;
3166 		}
3167 	}
3168 	if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3169 		DPRINTF(sc, ATH_DEBUG_BEACON,
3170 		    "%s: cabq did not drain, mcastq %u cabq %u\n",
3171 		    __func__, nmcastq, cabq->axq_depth);
3172 		sc->sc_stats.ast_cabq_busy++;
3173 		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3174 			/*
3175 			 * CABQ traffic from a previous vap is still pending.
3176 			 * We must drain the q before this beacon frame goes
3177 			 * out as otherwise this vap's stations will get cab
3178 			 * frames from a different vap.
3179 			 * XXX could be slow causing us to miss DBA
3180 			 */
3181 			ath_tx_draintxq(sc, cabq);
3182 		}
3183 	}
3184 	ath_beacon_setup(sc, bf);
3185 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3186 
3187 	/*
3188 	 * Enable the CAB queue before the beacon queue to
3189 	 * insure cab frames are triggered by this beacon.
3190 	 */
3191 	if (avp->av_boff.bo_tim[4] & 1) {
3192 		struct ath_hal *ah = sc->sc_ah;
3193 
3194 		/* NB: only at DTIM */
3195 		ATH_TXQ_LOCK(cabq);
3196 		ATH_TXQ_LOCK(&avp->av_mcastq);
3197 		if (nmcastq) {
3198 			struct ath_buf *bfm;
3199 
3200 			/*
3201 			 * Move frames from the s/w mcast q to the h/w cab q.
3202 			 * XXX MORE_DATA bit
3203 			 */
3204 			bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
3205 			if (cabq->axq_link != NULL) {
3206 				*cabq->axq_link = bfm->bf_daddr;
3207 			} else
3208 				ath_hal_puttxbuf(ah, cabq->axq_qnum,
3209 					bfm->bf_daddr);
3210 			ath_txqmove(cabq, &avp->av_mcastq);
3211 
3212 			sc->sc_stats.ast_cabq_xmit += nmcastq;
3213 		}
3214 		/* NB: gated by beacon so safe to start here */
3215 		ath_hal_txstart(ah, cabq->axq_qnum);
3216 		ATH_TXQ_UNLOCK(cabq);
3217 		ATH_TXQ_UNLOCK(&avp->av_mcastq);
3218 	}
3219 	return bf;
3220 }
3221 
3222 static void
3223 ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3224 {
3225 	struct ath_vap *avp = ATH_VAP(vap);
3226 	struct ath_hal *ah = sc->sc_ah;
3227 	struct ath_buf *bf;
3228 	struct mbuf *m;
3229 	int error;
3230 
3231 	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3232 
3233 	/*
3234 	 * Update dynamic beacon contents.  If this returns
3235 	 * non-zero then we need to remap the memory because
3236 	 * the beacon frame changed size (probably because
3237 	 * of the TIM bitmap).
3238 	 */
3239 	bf = avp->av_bcbuf;
3240 	m = bf->bf_m;
3241 	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3242 		/* XXX too conservative? */
3243 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3244 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3245 					     bf->bf_segs, &bf->bf_nseg,
3246 					     BUS_DMA_NOWAIT);
3247 		if (error != 0) {
3248 			if_printf(vap->iv_ifp,
3249 			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3250 			    __func__, error);
3251 			return;
3252 		}
3253 	}
3254 	ath_beacon_setup(sc, bf);
3255 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3256 
3257 	/* NB: caller is known to have already stopped tx dma */
3258 	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3259 	ath_hal_txstart(ah, sc->sc_bhalq);
3260 }
3261 
3262 /*
3263  * Reset the hardware after detecting beacons have stopped.
3264  */
3265 static void
3266 ath_bstuck_proc(void *arg, int pending)
3267 {
3268 	struct ath_softc *sc = arg;
3269 	struct ifnet *ifp = sc->sc_ifp;
3270 
3271 	if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3272 		sc->sc_bmisscount);
3273 	ath_reset(ifp);
3274 }
3275 
3276 /*
3277  * Reclaim beacon resources and return buffer to the pool.
3278  */
3279 static void
3280 ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3281 {
3282 
3283 	if (bf->bf_m != NULL) {
3284 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3285 		m_freem(bf->bf_m);
3286 		bf->bf_m = NULL;
3287 	}
3288 	if (bf->bf_node != NULL) {
3289 		ieee80211_free_node(bf->bf_node);
3290 		bf->bf_node = NULL;
3291 	}
3292 	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3293 }
3294 
3295 /*
3296  * Reclaim beacon resources.
3297  */
3298 static void
3299 ath_beacon_free(struct ath_softc *sc)
3300 {
3301 	struct ath_buf *bf;
3302 
3303 	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3304 		if (bf->bf_m != NULL) {
3305 			bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3306 			m_freem(bf->bf_m);
3307 			bf->bf_m = NULL;
3308 		}
3309 		if (bf->bf_node != NULL) {
3310 			ieee80211_free_node(bf->bf_node);
3311 			bf->bf_node = NULL;
3312 		}
3313 	}
3314 }
3315 
3316 /*
3317  * Configure the beacon and sleep timers.
3318  *
3319  * When operating as an AP this resets the TSF and sets
3320  * up the hardware to notify us when we need to issue beacons.
3321  *
3322  * When operating in station mode this sets up the beacon
3323  * timers according to the timestamp of the last received
3324  * beacon and the current TSF, configures PCF and DTIM
3325  * handling, programs the sleep registers so the hardware
3326  * will wakeup in time to receive beacons, and configures
3327  * the beacon miss handling so we'll receive a BMISS
3328  * interrupt when we stop seeing beacons from the AP
3329  * we've associated with.
3330  */
3331 static void
3332 ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3333 {
3334 #define	TSF_TO_TU(_h,_l) \
3335 	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3336 #define	FUDGE	2
3337 	struct ath_hal *ah = sc->sc_ah;
3338 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3339 	struct ieee80211_node *ni;
3340 	u_int32_t nexttbtt, intval, tsftu;
3341 	u_int64_t tsf;
3342 
3343 	if (vap == NULL)
3344 		vap = TAILQ_FIRST(&ic->ic_vaps);	/* XXX */
3345 	ni = vap->iv_bss;
3346 
3347 	/* extract tstamp from last beacon and convert to TU */
3348 	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3349 			     LE_READ_4(ni->ni_tstamp.data));
3350 	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3351 		/*
3352 		 * For multi-bss ap support beacons are either staggered
3353 		 * evenly over N slots or burst together.  For the former
3354 		 * arrange for the SWBA to be delivered for each slot.
3355 		 * Slots that are not occupied will generate nothing.
3356 		 */
3357 		/* NB: the beacon interval is kept internally in TU's */
3358 		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3359 		if (sc->sc_stagbeacons)
3360 			intval /= ATH_BCBUF;
3361 	} else {
3362 		/* NB: the beacon interval is kept internally in TU's */
3363 		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3364 	}
3365 	if (nexttbtt == 0)		/* e.g. for ap mode */
3366 		nexttbtt = intval;
3367 	else if (intval)		/* NB: can be 0 for monitor mode */
3368 		nexttbtt = roundup(nexttbtt, intval);
3369 	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3370 		__func__, nexttbtt, intval, ni->ni_intval);
3371 	if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3372 		HAL_BEACON_STATE bs;
3373 		int dtimperiod, dtimcount;
3374 		int cfpperiod, cfpcount;
3375 
3376 		/*
3377 		 * Setup dtim and cfp parameters according to
3378 		 * last beacon we received (which may be none).
3379 		 */
3380 		dtimperiod = ni->ni_dtim_period;
3381 		if (dtimperiod <= 0)		/* NB: 0 if not known */
3382 			dtimperiod = 1;
3383 		dtimcount = ni->ni_dtim_count;
3384 		if (dtimcount >= dtimperiod)	/* NB: sanity check */
3385 			dtimcount = 0;		/* XXX? */
3386 		cfpperiod = 1;			/* NB: no PCF support yet */
3387 		cfpcount = 0;
3388 		/*
3389 		 * Pull nexttbtt forward to reflect the current
3390 		 * TSF and calculate dtim+cfp state for the result.
3391 		 */
3392 		tsf = ath_hal_gettsf64(ah);
3393 		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3394 		do {
3395 			nexttbtt += intval;
3396 			if (--dtimcount < 0) {
3397 				dtimcount = dtimperiod - 1;
3398 				if (--cfpcount < 0)
3399 					cfpcount = cfpperiod - 1;
3400 			}
3401 		} while (nexttbtt < tsftu);
3402 		memset(&bs, 0, sizeof(bs));
3403 		bs.bs_intval = intval;
3404 		bs.bs_nexttbtt = nexttbtt;
3405 		bs.bs_dtimperiod = dtimperiod*intval;
3406 		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3407 		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3408 		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3409 		bs.bs_cfpmaxduration = 0;
3410 #if 0
3411 		/*
3412 		 * The 802.11 layer records the offset to the DTIM
3413 		 * bitmap while receiving beacons; use it here to
3414 		 * enable h/w detection of our AID being marked in
3415 		 * the bitmap vector (to indicate frames for us are
3416 		 * pending at the AP).
3417 		 * XXX do DTIM handling in s/w to WAR old h/w bugs
3418 		 * XXX enable based on h/w rev for newer chips
3419 		 */
3420 		bs.bs_timoffset = ni->ni_timoff;
3421 #endif
3422 		/*
3423 		 * Calculate the number of consecutive beacons to miss
3424 		 * before taking a BMISS interrupt.
3425 		 * Note that we clamp the result to at most 10 beacons.
3426 		 */
3427 		bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3428 		if (bs.bs_bmissthreshold > 10)
3429 			bs.bs_bmissthreshold = 10;
3430 		else if (bs.bs_bmissthreshold <= 0)
3431 			bs.bs_bmissthreshold = 1;
3432 
3433 		/*
3434 		 * Calculate sleep duration.  The configuration is
3435 		 * given in ms.  We insure a multiple of the beacon
3436 		 * period is used.  Also, if the sleep duration is
3437 		 * greater than the DTIM period then it makes senses
3438 		 * to make it a multiple of that.
3439 		 *
3440 		 * XXX fixed at 100ms
3441 		 */
3442 		bs.bs_sleepduration =
3443 			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3444 		if (bs.bs_sleepduration > bs.bs_dtimperiod)
3445 			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3446 
3447 		DPRINTF(sc, ATH_DEBUG_BEACON,
3448 			"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3449 			, __func__
3450 			, tsf, tsftu
3451 			, bs.bs_intval
3452 			, bs.bs_nexttbtt
3453 			, bs.bs_dtimperiod
3454 			, bs.bs_nextdtim
3455 			, bs.bs_bmissthreshold
3456 			, bs.bs_sleepduration
3457 			, bs.bs_cfpperiod
3458 			, bs.bs_cfpmaxduration
3459 			, bs.bs_cfpnext
3460 			, bs.bs_timoffset
3461 		);
3462 		ath_hal_intrset(ah, 0);
3463 		ath_hal_beacontimers(ah, &bs);
3464 		sc->sc_imask |= HAL_INT_BMISS;
3465 		ath_hal_intrset(ah, sc->sc_imask);
3466 	} else {
3467 		ath_hal_intrset(ah, 0);
3468 		if (nexttbtt == intval)
3469 			intval |= HAL_BEACON_RESET_TSF;
3470 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
3471 			/*
3472 			 * In IBSS mode enable the beacon timers but only
3473 			 * enable SWBA interrupts if we need to manually
3474 			 * prepare beacon frames.  Otherwise we use a
3475 			 * self-linked tx descriptor and let the hardware
3476 			 * deal with things.
3477 			 */
3478 			intval |= HAL_BEACON_ENA;
3479 			if (!sc->sc_hasveol)
3480 				sc->sc_imask |= HAL_INT_SWBA;
3481 			if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3482 				/*
3483 				 * Pull nexttbtt forward to reflect
3484 				 * the current TSF.
3485 				 */
3486 				tsf = ath_hal_gettsf64(ah);
3487 				tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3488 				do {
3489 					nexttbtt += intval;
3490 				} while (nexttbtt < tsftu);
3491 			}
3492 			ath_beaconq_config(sc);
3493 		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3494 			/*
3495 			 * In AP mode we enable the beacon timers and
3496 			 * SWBA interrupts to prepare beacon frames.
3497 			 */
3498 			intval |= HAL_BEACON_ENA;
3499 			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
3500 			ath_beaconq_config(sc);
3501 		}
3502 		ath_hal_beaconinit(ah, nexttbtt, intval);
3503 		sc->sc_bmisscount = 0;
3504 		ath_hal_intrset(ah, sc->sc_imask);
3505 		/*
3506 		 * When using a self-linked beacon descriptor in
3507 		 * ibss mode load it once here.
3508 		 */
3509 		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3510 			ath_beacon_start_adhoc(sc, vap);
3511 	}
3512 	sc->sc_syncbeacon = 0;
3513 #undef FUDGE
3514 #undef TSF_TO_TU
3515 }
3516 
3517 static void
3518 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3519 {
3520 	bus_addr_t *paddr = (bus_addr_t*) arg;
3521 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
3522 	*paddr = segs->ds_addr;
3523 }
3524 
3525 static int
3526 ath_descdma_setup(struct ath_softc *sc,
3527 	struct ath_descdma *dd, ath_bufhead *head,
3528 	const char *name, int nbuf, int ndesc)
3529 {
3530 #define	DS2PHYS(_dd, _ds) \
3531 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3532 	struct ifnet *ifp = sc->sc_ifp;
3533 	struct ath_desc *ds;
3534 	struct ath_buf *bf;
3535 	int i, bsize, error;
3536 
3537 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3538 	    __func__, name, nbuf, ndesc);
3539 
3540 	dd->dd_name = name;
3541 	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
3542 
3543 	/*
3544 	 * Setup DMA descriptor area.
3545 	 */
3546 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
3547 		       PAGE_SIZE, 0,		/* alignment, bounds */
3548 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
3549 		       BUS_SPACE_MAXADDR,	/* highaddr */
3550 		       NULL, NULL,		/* filter, filterarg */
3551 		       dd->dd_desc_len,		/* maxsize */
3552 		       1,			/* nsegments */
3553 		       dd->dd_desc_len,		/* maxsegsize */
3554 		       BUS_DMA_ALLOCNOW,	/* flags */
3555 		       NULL,			/* lockfunc */
3556 		       NULL,			/* lockarg */
3557 		       &dd->dd_dmat);
3558 	if (error != 0) {
3559 		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3560 		return error;
3561 	}
3562 
3563 	/* allocate descriptors */
3564 	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3565 	if (error != 0) {
3566 		if_printf(ifp, "unable to create dmamap for %s descriptors, "
3567 			"error %u\n", dd->dd_name, error);
3568 		goto fail0;
3569 	}
3570 
3571 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3572 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3573 				 &dd->dd_dmamap);
3574 	if (error != 0) {
3575 		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3576 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
3577 		goto fail1;
3578 	}
3579 
3580 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3581 				dd->dd_desc, dd->dd_desc_len,
3582 				ath_load_cb, &dd->dd_desc_paddr,
3583 				BUS_DMA_NOWAIT);
3584 	if (error != 0) {
3585 		if_printf(ifp, "unable to map %s descriptors, error %u\n",
3586 			dd->dd_name, error);
3587 		goto fail2;
3588 	}
3589 
3590 	ds = dd->dd_desc;
3591 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3592 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3593 	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3594 
3595 	/* allocate rx buffers */
3596 	bsize = sizeof(struct ath_buf) * nbuf;
3597 	bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3598 	if (bf == NULL) {
3599 		if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3600 			dd->dd_name, bsize);
3601 		goto fail3;
3602 	}
3603 	dd->dd_bufptr = bf;
3604 
3605 	STAILQ_INIT(head);
3606 	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3607 		bf->bf_desc = ds;
3608 		bf->bf_daddr = DS2PHYS(dd, ds);
3609 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3610 				&bf->bf_dmamap);
3611 		if (error != 0) {
3612 			if_printf(ifp, "unable to create dmamap for %s "
3613 				"buffer %u, error %u\n", dd->dd_name, i, error);
3614 			ath_descdma_cleanup(sc, dd, head);
3615 			return error;
3616 		}
3617 		STAILQ_INSERT_TAIL(head, bf, bf_list);
3618 	}
3619 	return 0;
3620 fail3:
3621 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3622 fail2:
3623 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3624 fail1:
3625 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3626 fail0:
3627 	bus_dma_tag_destroy(dd->dd_dmat);
3628 	memset(dd, 0, sizeof(*dd));
3629 	return error;
3630 #undef DS2PHYS
3631 }
3632 
3633 static void
3634 ath_descdma_cleanup(struct ath_softc *sc,
3635 	struct ath_descdma *dd, ath_bufhead *head)
3636 {
3637 	struct ath_buf *bf;
3638 	struct ieee80211_node *ni;
3639 
3640 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3641 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3642 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3643 	bus_dma_tag_destroy(dd->dd_dmat);
3644 
3645 	STAILQ_FOREACH(bf, head, bf_list) {
3646 		if (bf->bf_m) {
3647 			m_freem(bf->bf_m);
3648 			bf->bf_m = NULL;
3649 		}
3650 		if (bf->bf_dmamap != NULL) {
3651 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3652 			bf->bf_dmamap = NULL;
3653 		}
3654 		ni = bf->bf_node;
3655 		bf->bf_node = NULL;
3656 		if (ni != NULL) {
3657 			/*
3658 			 * Reclaim node reference.
3659 			 */
3660 			ieee80211_free_node(ni);
3661 		}
3662 	}
3663 
3664 	STAILQ_INIT(head);
3665 	free(dd->dd_bufptr, M_ATHDEV);
3666 	memset(dd, 0, sizeof(*dd));
3667 }
3668 
3669 static int
3670 ath_desc_alloc(struct ath_softc *sc)
3671 {
3672 	int error;
3673 
3674 	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3675 			"rx", ath_rxbuf, 1);
3676 	if (error != 0)
3677 		return error;
3678 
3679 	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3680 			"tx", ath_txbuf, ATH_TXDESC);
3681 	if (error != 0) {
3682 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3683 		return error;
3684 	}
3685 
3686 	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3687 			"beacon", ATH_BCBUF, 1);
3688 	if (error != 0) {
3689 		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3690 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3691 		return error;
3692 	}
3693 	return 0;
3694 }
3695 
3696 static void
3697 ath_desc_free(struct ath_softc *sc)
3698 {
3699 
3700 	if (sc->sc_bdma.dd_desc_len != 0)
3701 		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3702 	if (sc->sc_txdma.dd_desc_len != 0)
3703 		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3704 	if (sc->sc_rxdma.dd_desc_len != 0)
3705 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3706 }
3707 
3708 static struct ieee80211_node *
3709 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3710 {
3711 	struct ieee80211com *ic = vap->iv_ic;
3712 	struct ath_softc *sc = ic->ic_ifp->if_softc;
3713 	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3714 	struct ath_node *an;
3715 
3716 	an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3717 	if (an == NULL) {
3718 		/* XXX stat+msg */
3719 		return NULL;
3720 	}
3721 	ath_rate_node_init(sc, an);
3722 
3723 	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3724 	return &an->an_node;
3725 }
3726 
3727 static void
3728 ath_node_free(struct ieee80211_node *ni)
3729 {
3730 	struct ieee80211com *ic = ni->ni_ic;
3731         struct ath_softc *sc = ic->ic_ifp->if_softc;
3732 
3733 	DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3734 
3735 	ath_rate_node_cleanup(sc, ATH_NODE(ni));
3736 	sc->sc_node_free(ni);
3737 }
3738 
3739 static void
3740 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3741 {
3742 	struct ieee80211com *ic = ni->ni_ic;
3743 	struct ath_softc *sc = ic->ic_ifp->if_softc;
3744 	struct ath_hal *ah = sc->sc_ah;
3745 	HAL_CHANNEL hchan;
3746 
3747 	*rssi = ic->ic_node_getrssi(ni);
3748 	if (ni->ni_chan != IEEE80211_CHAN_ANYC) {
3749 		ath_mapchan(ic, &hchan, ni->ni_chan);
3750 		*noise = ath_hal_getchannoise(ah, &hchan);
3751 	} else
3752 		*noise = -95;		/* nominally correct */
3753 }
3754 
3755 static int
3756 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3757 {
3758 	struct ath_hal *ah = sc->sc_ah;
3759 	int error;
3760 	struct mbuf *m;
3761 	struct ath_desc *ds;
3762 
3763 	m = bf->bf_m;
3764 	if (m == NULL) {
3765 		/*
3766 		 * NB: by assigning a page to the rx dma buffer we
3767 		 * implicitly satisfy the Atheros requirement that
3768 		 * this buffer be cache-line-aligned and sized to be
3769 		 * multiple of the cache line size.  Not doing this
3770 		 * causes weird stuff to happen (for the 5210 at least).
3771 		 */
3772 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3773 		if (m == NULL) {
3774 			DPRINTF(sc, ATH_DEBUG_ANY,
3775 				"%s: no mbuf/cluster\n", __func__);
3776 			sc->sc_stats.ast_rx_nombuf++;
3777 			return ENOMEM;
3778 		}
3779 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3780 
3781 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3782 					     bf->bf_dmamap, m,
3783 					     bf->bf_segs, &bf->bf_nseg,
3784 					     BUS_DMA_NOWAIT);
3785 		if (error != 0) {
3786 			DPRINTF(sc, ATH_DEBUG_ANY,
3787 			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3788 			    __func__, error);
3789 			sc->sc_stats.ast_rx_busdma++;
3790 			m_freem(m);
3791 			return error;
3792 		}
3793 		KASSERT(bf->bf_nseg == 1,
3794 			("multi-segment packet; nseg %u", bf->bf_nseg));
3795 		bf->bf_m = m;
3796 	}
3797 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3798 
3799 	/*
3800 	 * Setup descriptors.  For receive we always terminate
3801 	 * the descriptor list with a self-linked entry so we'll
3802 	 * not get overrun under high load (as can happen with a
3803 	 * 5212 when ANI processing enables PHY error frames).
3804 	 *
3805 	 * To insure the last descriptor is self-linked we create
3806 	 * each descriptor as self-linked and add it to the end.  As
3807 	 * each additional descriptor is added the previous self-linked
3808 	 * entry is ``fixed'' naturally.  This should be safe even
3809 	 * if DMA is happening.  When processing RX interrupts we
3810 	 * never remove/process the last, self-linked, entry on the
3811 	 * descriptor list.  This insures the hardware always has
3812 	 * someplace to write a new frame.
3813 	 */
3814 	ds = bf->bf_desc;
3815 	ds->ds_link = bf->bf_daddr;	/* link to self */
3816 	ds->ds_data = bf->bf_segs[0].ds_addr;
3817 	ath_hal_setuprxdesc(ah, ds
3818 		, m->m_len		/* buffer size */
3819 		, 0
3820 	);
3821 
3822 	if (sc->sc_rxlink != NULL)
3823 		*sc->sc_rxlink = bf->bf_daddr;
3824 	sc->sc_rxlink = &ds->ds_link;
3825 	return 0;
3826 }
3827 
3828 /*
3829  * Extend 15-bit time stamp from rx descriptor to
3830  * a full 64-bit TSF using the specified TSF.
3831  */
3832 static __inline u_int64_t
3833 ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3834 {
3835 	if ((tsf & 0x7fff) < rstamp)
3836 		tsf -= 0x8000;
3837 	return ((tsf &~ 0x7fff) | rstamp);
3838 }
3839 
3840 /*
3841  * Intercept management frames to collect beacon rssi data
3842  * and to do ibss merges.
3843  */
3844 static void
3845 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3846 	int subtype, int rssi, int noise, u_int32_t rstamp)
3847 {
3848 	struct ieee80211vap *vap = ni->ni_vap;
3849 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3850 
3851 	/*
3852 	 * Call up first so subsequent work can use information
3853 	 * potentially stored in the node (e.g. for ibss merge).
3854 	 */
3855 	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp);
3856 	switch (subtype) {
3857 	case IEEE80211_FC0_SUBTYPE_BEACON:
3858 		/* update rssi statistics for use by the hal */
3859 		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3860 		if (sc->sc_syncbeacon &&
3861 		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3862 			/*
3863 			 * Resync beacon timers using the tsf of the beacon
3864 			 * frame we just received.
3865 			 */
3866 			ath_beacon_config(sc, vap);
3867 		}
3868 		/* fall thru... */
3869 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3870 		if (vap->iv_opmode == IEEE80211_M_IBSS &&
3871 		    vap->iv_state == IEEE80211_S_RUN) {
3872 			u_int64_t tsf = ath_extend_tsf(rstamp,
3873 				ath_hal_gettsf64(sc->sc_ah));
3874 			/*
3875 			 * Handle ibss merge as needed; check the tsf on the
3876 			 * frame before attempting the merge.  The 802.11 spec
3877 			 * says the station should change it's bssid to match
3878 			 * the oldest station with the same ssid, where oldest
3879 			 * is determined by the tsf.  Note that hardware
3880 			 * reconfiguration happens through callback to
3881 			 * ath_newstate as the state machine will go from
3882 			 * RUN -> RUN when this happens.
3883 			 */
3884 			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3885 				DPRINTF(sc, ATH_DEBUG_STATE,
3886 				    "ibss merge, rstamp %u tsf %ju "
3887 				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3888 				    (uintmax_t)ni->ni_tstamp.tsf);
3889 				(void) ieee80211_ibss_merge(ni);
3890 			}
3891 		}
3892 		break;
3893 	}
3894 }
3895 
3896 /*
3897  * Set the default antenna.
3898  */
3899 static void
3900 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3901 {
3902 	struct ath_hal *ah = sc->sc_ah;
3903 
3904 	/* XXX block beacon interrupts */
3905 	ath_hal_setdefantenna(ah, antenna);
3906 	if (sc->sc_defant != antenna)
3907 		sc->sc_stats.ast_ant_defswitch++;
3908 	sc->sc_defant = antenna;
3909 	sc->sc_rxotherant = 0;
3910 }
3911 
3912 static int
3913 ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3914 	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3915 {
3916 #define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
3917 #define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
3918 #define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
3919 #define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3920 	struct ath_softc *sc = ifp->if_softc;
3921 	const HAL_RATE_TABLE *rt;
3922 	uint8_t rix;
3923 
3924 	/*
3925 	 * Discard anything shorter than an ack or cts.
3926 	 */
3927 	if (m->m_pkthdr.len < IEEE80211_ACK_LEN) {
3928 		DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n",
3929 			__func__, m->m_pkthdr.len);
3930 		sc->sc_stats.ast_rx_tooshort++;
3931 		return 0;
3932 	}
3933 	rt = sc->sc_currates;
3934 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3935 	rix = rt->rateCodeToIndex[rs->rs_rate];
3936 	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3937 	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3938 #ifdef AH_SUPPORT_AR5416
3939 	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3940 	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
3941 		if ((rs->rs_flags & HAL_RX_2040) == 0)
3942 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3943 		else if (sc->sc_curchan.channelFlags & CHANNEL_HT40PLUS)
3944 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3945 		else
3946 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3947 		if ((rs->rs_flags & HAL_RX_GI) == 0)
3948 			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3949 	}
3950 #endif
3951 	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
3952 	if (rs->rs_status & HAL_RXERR_CRC)
3953 		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3954 	/* XXX propagate other error flags from descriptor */
3955 	sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf;
3956 	sc->sc_rx_th.wr_antnoise = nf;
3957 	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3958 
3959 	bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m);
3960 
3961 	return 1;
3962 #undef CHAN_HT
3963 #undef CHAN_HT20
3964 #undef CHAN_HT40U
3965 #undef CHAN_HT40D
3966 }
3967 
3968 static void
3969 ath_handle_micerror(struct ieee80211com *ic,
3970 	struct ieee80211_frame *wh, int keyix)
3971 {
3972 	struct ieee80211_node *ni;
3973 
3974 	/* XXX recheck MIC to deal w/ chips that lie */
3975 	/* XXX discard MIC errors on !data frames */
3976 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3977 	if (ni != NULL) {
3978 		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3979 		ieee80211_free_node(ni);
3980 	}
3981 }
3982 
3983 static void
3984 ath_rx_proc(void *arg, int npending)
3985 {
3986 #define	PA2DESC(_sc, _pa) \
3987 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3988 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3989 	struct ath_softc *sc = arg;
3990 	struct ath_buf *bf;
3991 	struct ifnet *ifp = sc->sc_ifp;
3992 	struct ieee80211com *ic = ifp->if_l2com;
3993 	struct ath_hal *ah = sc->sc_ah;
3994 	struct ath_desc *ds;
3995 	struct ath_rx_status *rs;
3996 	struct mbuf *m;
3997 	struct ieee80211_node *ni;
3998 	int len, type, ngood;
3999 	u_int phyerr;
4000 	HAL_STATUS status;
4001 	int16_t nf;
4002 	u_int64_t tsf;
4003 
4004 	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
4005 	ngood = 0;
4006 	nf = ath_hal_getchannoise(ah, &sc->sc_curchan);
4007 	sc->sc_stats.ast_rx_noise = nf;
4008 	tsf = ath_hal_gettsf64(ah);
4009 	do {
4010 		bf = STAILQ_FIRST(&sc->sc_rxbuf);
4011 		if (bf == NULL) {		/* NB: shouldn't happen */
4012 			if_printf(ifp, "%s: no buffer!\n", __func__);
4013 			break;
4014 		}
4015 		m = bf->bf_m;
4016 		if (m == NULL) {		/* NB: shouldn't happen */
4017 			/*
4018 			 * If mbuf allocation failed previously there
4019 			 * will be no mbuf; try again to re-populate it.
4020 			 */
4021 			/* XXX make debug msg */
4022 			if_printf(ifp, "%s: no mbuf!\n", __func__);
4023 			STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4024 			goto rx_next;
4025 		}
4026 		ds = bf->bf_desc;
4027 		if (ds->ds_link == bf->bf_daddr) {
4028 			/* NB: never process the self-linked entry at the end */
4029 			break;
4030 		}
4031 		/* XXX sync descriptor memory */
4032 		/*
4033 		 * Must provide the virtual address of the current
4034 		 * descriptor, the physical address, and the virtual
4035 		 * address of the next descriptor in the h/w chain.
4036 		 * This allows the HAL to look ahead to see if the
4037 		 * hardware is done with a descriptor by checking the
4038 		 * done bit in the following descriptor and the address
4039 		 * of the current descriptor the DMA engine is working
4040 		 * on.  All this is necessary because of our use of
4041 		 * a self-linked list to avoid rx overruns.
4042 		 */
4043 		rs = &bf->bf_status.ds_rxstat;
4044 		status = ath_hal_rxprocdesc(ah, ds,
4045 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4046 #ifdef ATH_DEBUG
4047 		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4048 			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4049 #endif
4050 		if (status == HAL_EINPROGRESS)
4051 			break;
4052 		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4053 		if (rs->rs_status != 0) {
4054 			if (rs->rs_status & HAL_RXERR_CRC)
4055 				sc->sc_stats.ast_rx_crcerr++;
4056 			if (rs->rs_status & HAL_RXERR_FIFO)
4057 				sc->sc_stats.ast_rx_fifoerr++;
4058 			if (rs->rs_status & HAL_RXERR_PHY) {
4059 				sc->sc_stats.ast_rx_phyerr++;
4060 				phyerr = rs->rs_phyerr & 0x1f;
4061 				sc->sc_stats.ast_rx_phy[phyerr]++;
4062 				goto rx_error;	/* NB: don't count in ierrors */
4063 			}
4064 			if (rs->rs_status & HAL_RXERR_DECRYPT) {
4065 				/*
4066 				 * Decrypt error.  If the error occurred
4067 				 * because there was no hardware key, then
4068 				 * let the frame through so the upper layers
4069 				 * can process it.  This is necessary for 5210
4070 				 * parts which have no way to setup a ``clear''
4071 				 * key cache entry.
4072 				 *
4073 				 * XXX do key cache faulting
4074 				 */
4075 				if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4076 					goto rx_accept;
4077 				sc->sc_stats.ast_rx_badcrypt++;
4078 			}
4079 			if (rs->rs_status & HAL_RXERR_MIC) {
4080 				sc->sc_stats.ast_rx_badmic++;
4081 				/*
4082 				 * Do minimal work required to hand off
4083 				 * the 802.11 header for notifcation.
4084 				 */
4085 				/* XXX frag's and qos frames */
4086 				len = rs->rs_datalen;
4087 				if (len >= sizeof (struct ieee80211_frame)) {
4088 					bus_dmamap_sync(sc->sc_dmat,
4089 					    bf->bf_dmamap,
4090 					    BUS_DMASYNC_POSTREAD);
4091 					ath_handle_micerror(ic,
4092 					    mtod(m, struct ieee80211_frame *),
4093 					    sc->sc_splitmic ?
4094 						rs->rs_keyix-32 : rs->rs_keyix);
4095 				}
4096 			}
4097 			ifp->if_ierrors++;
4098 rx_error:
4099 			/*
4100 			 * Cleanup any pending partial frame.
4101 			 */
4102 			if (sc->sc_rxpending != NULL) {
4103 				m_freem(sc->sc_rxpending);
4104 				sc->sc_rxpending = NULL;
4105 			}
4106 			/*
4107 			 * When a tap is present pass error frames
4108 			 * that have been requested.  By default we
4109 			 * pass decrypt+mic errors but others may be
4110 			 * interesting (e.g. crc).
4111 			 */
4112 			if (bpf_peers_present(ifp->if_bpf) &&
4113 			    (rs->rs_status & sc->sc_monpass)) {
4114 				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4115 				    BUS_DMASYNC_POSTREAD);
4116 				/* NB: bpf needs the mbuf length setup */
4117 				len = rs->rs_datalen;
4118 				m->m_pkthdr.len = m->m_len = len;
4119 				(void) ath_rx_tap(ifp, m, rs, tsf, nf);
4120 			}
4121 			/* XXX pass MIC errors up for s/w reclaculation */
4122 			goto rx_next;
4123 		}
4124 rx_accept:
4125 		/*
4126 		 * Sync and unmap the frame.  At this point we're
4127 		 * committed to passing the mbuf somewhere so clear
4128 		 * bf_m; this means a new mbuf must be allocated
4129 		 * when the rx descriptor is setup again to receive
4130 		 * another frame.
4131 		 */
4132 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4133 		    BUS_DMASYNC_POSTREAD);
4134 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4135 		bf->bf_m = NULL;
4136 
4137 		len = rs->rs_datalen;
4138 		m->m_len = len;
4139 
4140 		if (rs->rs_more) {
4141 			/*
4142 			 * Frame spans multiple descriptors; save
4143 			 * it for the next completed descriptor, it
4144 			 * will be used to construct a jumbogram.
4145 			 */
4146 			if (sc->sc_rxpending != NULL) {
4147 				/* NB: max frame size is currently 2 clusters */
4148 				sc->sc_stats.ast_rx_toobig++;
4149 				m_freem(sc->sc_rxpending);
4150 			}
4151 			m->m_pkthdr.rcvif = ifp;
4152 			m->m_pkthdr.len = len;
4153 			sc->sc_rxpending = m;
4154 			goto rx_next;
4155 		} else if (sc->sc_rxpending != NULL) {
4156 			/*
4157 			 * This is the second part of a jumbogram,
4158 			 * chain it to the first mbuf, adjust the
4159 			 * frame length, and clear the rxpending state.
4160 			 */
4161 			sc->sc_rxpending->m_next = m;
4162 			sc->sc_rxpending->m_pkthdr.len += len;
4163 			m = sc->sc_rxpending;
4164 			sc->sc_rxpending = NULL;
4165 		} else {
4166 			/*
4167 			 * Normal single-descriptor receive; setup
4168 			 * the rcvif and packet length.
4169 			 */
4170 			m->m_pkthdr.rcvif = ifp;
4171 			m->m_pkthdr.len = len;
4172 		}
4173 
4174 		ifp->if_ipackets++;
4175 		sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4176 
4177 		if (bpf_peers_present(ifp->if_bpf) &&
4178 		    !ath_rx_tap(ifp, m, rs, tsf, nf)) {
4179 			m_freem(m);		/* XXX reclaim */
4180 			goto rx_next;
4181 		}
4182 
4183 		/*
4184 		 * From this point on we assume the frame is at least
4185 		 * as large as ieee80211_frame_min; verify that.
4186 		 */
4187 		if (len < IEEE80211_MIN_LEN) {
4188 			DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
4189 				__func__, len);
4190 			sc->sc_stats.ast_rx_tooshort++;
4191 			m_freem(m);
4192 			goto rx_next;
4193 		}
4194 
4195 		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4196 			const HAL_RATE_TABLE *rt = sc->sc_currates;
4197 			uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4198 
4199 			ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4200 			    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4201 		}
4202 
4203 		m_adj(m, -IEEE80211_CRC_LEN);
4204 
4205 		/*
4206 		 * Locate the node for sender, track state, and then
4207 		 * pass the (referenced) node up to the 802.11 layer
4208 		 * for its use.
4209 		 */
4210 		ni = ieee80211_find_rxnode_withkey(ic,
4211 			mtod(m, const struct ieee80211_frame_min *),
4212 			rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4213 				IEEE80211_KEYIX_NONE : rs->rs_keyix);
4214 		if (ni != NULL) {
4215 			/*
4216 			 * Sending station is known, dispatch directly.
4217 			 */
4218 			type = ieee80211_input(ni, m,
4219 			    rs->rs_rssi, nf, rs->rs_tstamp);
4220 			ieee80211_free_node(ni);
4221 			/*
4222 			 * Arrange to update the last rx timestamp only for
4223 			 * frames from our ap when operating in station mode.
4224 			 * This assumes the rx key is always setup when
4225 			 * associated.
4226 			 */
4227 			if (ic->ic_opmode == IEEE80211_M_STA &&
4228 			    rs->rs_keyix != HAL_RXKEYIX_INVALID)
4229 				ngood++;
4230 		} else {
4231 			type = ieee80211_input_all(ic, m,
4232 			    rs->rs_rssi, nf, rs->rs_tstamp);
4233 		}
4234 		/*
4235 		 * Track rx rssi and do any rx antenna management.
4236 		 */
4237 		ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4238 		if (sc->sc_diversity) {
4239 			/*
4240 			 * When using fast diversity, change the default rx
4241 			 * antenna if diversity chooses the other antenna 3
4242 			 * times in a row.
4243 			 */
4244 			if (sc->sc_defant != rs->rs_antenna) {
4245 				if (++sc->sc_rxotherant >= 3)
4246 					ath_setdefantenna(sc, rs->rs_antenna);
4247 			} else
4248 				sc->sc_rxotherant = 0;
4249 		}
4250 		if (sc->sc_softled) {
4251 			/*
4252 			 * Blink for any data frame.  Otherwise do a
4253 			 * heartbeat-style blink when idle.  The latter
4254 			 * is mainly for station mode where we depend on
4255 			 * periodic beacon frames to trigger the poll event.
4256 			 */
4257 			if (type == IEEE80211_FC0_TYPE_DATA) {
4258 				const HAL_RATE_TABLE *rt = sc->sc_currates;
4259 				ath_led_event(sc,
4260 				    rt->rateCodeToIndex[rs->rs_rate]);
4261 			} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4262 				ath_led_event(sc, 0);
4263 		}
4264 rx_next:
4265 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4266 	} while (ath_rxbuf_init(sc, bf) == 0);
4267 
4268 	/* rx signal state monitoring */
4269 	ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
4270 	if (ngood)
4271 		sc->sc_lastrx = tsf;
4272 
4273 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
4274 	    !IFQ_IS_EMPTY(&ifp->if_snd))
4275 		ath_start(ifp);
4276 
4277 #undef PA2DESC
4278 }
4279 
4280 static void
4281 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4282 {
4283 	txq->axq_qnum = qnum;
4284 	txq->axq_depth = 0;
4285 	txq->axq_intrcnt = 0;
4286 	txq->axq_link = NULL;
4287 	STAILQ_INIT(&txq->axq_q);
4288 	ATH_TXQ_LOCK_INIT(sc, txq);
4289 	TAILQ_INIT(&txq->axq_stageq);
4290 	txq->axq_curage = 0;
4291 }
4292 
4293 /*
4294  * Setup a h/w transmit queue.
4295  */
4296 static struct ath_txq *
4297 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4298 {
4299 #define	N(a)	(sizeof(a)/sizeof(a[0]))
4300 	struct ath_hal *ah = sc->sc_ah;
4301 	HAL_TXQ_INFO qi;
4302 	int qnum;
4303 
4304 	memset(&qi, 0, sizeof(qi));
4305 	qi.tqi_subtype = subtype;
4306 	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4307 	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4308 	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4309 	/*
4310 	 * Enable interrupts only for EOL and DESC conditions.
4311 	 * We mark tx descriptors to receive a DESC interrupt
4312 	 * when a tx queue gets deep; otherwise waiting for the
4313 	 * EOL to reap descriptors.  Note that this is done to
4314 	 * reduce interrupt load and this only defers reaping
4315 	 * descriptors, never transmitting frames.  Aside from
4316 	 * reducing interrupts this also permits more concurrency.
4317 	 * The only potential downside is if the tx queue backs
4318 	 * up in which case the top half of the kernel may backup
4319 	 * due to a lack of tx descriptors.
4320 	 */
4321 	qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4322 	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4323 	if (qnum == -1) {
4324 		/*
4325 		 * NB: don't print a message, this happens
4326 		 * normally on parts with too few tx queues
4327 		 */
4328 		return NULL;
4329 	}
4330 	if (qnum >= N(sc->sc_txq)) {
4331 		device_printf(sc->sc_dev,
4332 			"hal qnum %u out of range, max %zu!\n",
4333 			qnum, N(sc->sc_txq));
4334 		ath_hal_releasetxqueue(ah, qnum);
4335 		return NULL;
4336 	}
4337 	if (!ATH_TXQ_SETUP(sc, qnum)) {
4338 		ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4339 		sc->sc_txqsetup |= 1<<qnum;
4340 	}
4341 	return &sc->sc_txq[qnum];
4342 #undef N
4343 }
4344 
4345 /*
4346  * Setup a hardware data transmit queue for the specified
4347  * access control.  The hal may not support all requested
4348  * queues in which case it will return a reference to a
4349  * previously setup queue.  We record the mapping from ac's
4350  * to h/w queues for use by ath_tx_start and also track
4351  * the set of h/w queues being used to optimize work in the
4352  * transmit interrupt handler and related routines.
4353  */
4354 static int
4355 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4356 {
4357 #define	N(a)	(sizeof(a)/sizeof(a[0]))
4358 	struct ath_txq *txq;
4359 
4360 	if (ac >= N(sc->sc_ac2q)) {
4361 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4362 			ac, N(sc->sc_ac2q));
4363 		return 0;
4364 	}
4365 	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4366 	if (txq != NULL) {
4367 		sc->sc_ac2q[ac] = txq;
4368 		return 1;
4369 	} else
4370 		return 0;
4371 #undef N
4372 }
4373 
4374 /*
4375  * Update WME parameters for a transmit queue.
4376  */
4377 static int
4378 ath_txq_update(struct ath_softc *sc, int ac)
4379 {
4380 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
4381 #define	ATH_TXOP_TO_US(v)		(v<<5)
4382 	struct ifnet *ifp = sc->sc_ifp;
4383 	struct ieee80211com *ic = ifp->if_l2com;
4384 	struct ath_txq *txq = sc->sc_ac2q[ac];
4385 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4386 	struct ath_hal *ah = sc->sc_ah;
4387 	HAL_TXQ_INFO qi;
4388 
4389 	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4390 	qi.tqi_aifs = wmep->wmep_aifsn;
4391 	qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4392 	qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4393 	qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4394 
4395 	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4396 		if_printf(ifp, "unable to update hardware queue "
4397 			"parameters for %s traffic!\n",
4398 			ieee80211_wme_acnames[ac]);
4399 		return 0;
4400 	} else {
4401 		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4402 		return 1;
4403 	}
4404 #undef ATH_TXOP_TO_US
4405 #undef ATH_EXPONENT_TO_VALUE
4406 }
4407 
4408 /*
4409  * Callback from the 802.11 layer to update WME parameters.
4410  */
4411 static int
4412 ath_wme_update(struct ieee80211com *ic)
4413 {
4414 	struct ath_softc *sc = ic->ic_ifp->if_softc;
4415 
4416 	return !ath_txq_update(sc, WME_AC_BE) ||
4417 	    !ath_txq_update(sc, WME_AC_BK) ||
4418 	    !ath_txq_update(sc, WME_AC_VI) ||
4419 	    !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4420 }
4421 
4422 /*
4423  * Reclaim resources for a setup queue.
4424  */
4425 static void
4426 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4427 {
4428 
4429 	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4430 	ATH_TXQ_LOCK_DESTROY(txq);
4431 	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4432 }
4433 
4434 /*
4435  * Reclaim all tx queue resources.
4436  */
4437 static void
4438 ath_tx_cleanup(struct ath_softc *sc)
4439 {
4440 	int i;
4441 
4442 	ATH_TXBUF_LOCK_DESTROY(sc);
4443 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4444 		if (ATH_TXQ_SETUP(sc, i))
4445 			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4446 }
4447 
4448 /*
4449  * Return h/w rate index for an IEEE rate (w/o basic rate bit).
4450  */
4451 static int
4452 ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate)
4453 {
4454 	int i;
4455 
4456 	for (i = 0; i < rt->rateCount; i++)
4457 		if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate)
4458 			return i;
4459 	return 0;		/* NB: lowest rate */
4460 }
4461 
4462 /*
4463  * Reclaim mbuf resources.  For fragmented frames we
4464  * need to claim each frag chained with m_nextpkt.
4465  */
4466 static void
4467 ath_freetx(struct mbuf *m)
4468 {
4469 	struct mbuf *next;
4470 
4471 	do {
4472 		next = m->m_nextpkt;
4473 		m->m_nextpkt = NULL;
4474 		m_freem(m);
4475 	} while ((m = next) != NULL);
4476 }
4477 
4478 static int
4479 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
4480 {
4481 	struct mbuf *m;
4482 	int error;
4483 
4484 	/*
4485 	 * Load the DMA map so any coalescing is done.  This
4486 	 * also calculates the number of descriptors we need.
4487 	 */
4488 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4489 				     bf->bf_segs, &bf->bf_nseg,
4490 				     BUS_DMA_NOWAIT);
4491 	if (error == EFBIG) {
4492 		/* XXX packet requires too many descriptors */
4493 		bf->bf_nseg = ATH_TXDESC+1;
4494 	} else if (error != 0) {
4495 		sc->sc_stats.ast_tx_busdma++;
4496 		ath_freetx(m0);
4497 		return error;
4498 	}
4499 	/*
4500 	 * Discard null packets and check for packets that
4501 	 * require too many TX descriptors.  We try to convert
4502 	 * the latter to a cluster.
4503 	 */
4504 	if (bf->bf_nseg > ATH_TXDESC) {		/* too many desc's, linearize */
4505 		sc->sc_stats.ast_tx_linear++;
4506 		m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
4507 		if (m == NULL) {
4508 			ath_freetx(m0);
4509 			sc->sc_stats.ast_tx_nombuf++;
4510 			return ENOMEM;
4511 		}
4512 		m0 = m;
4513 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4514 					     bf->bf_segs, &bf->bf_nseg,
4515 					     BUS_DMA_NOWAIT);
4516 		if (error != 0) {
4517 			sc->sc_stats.ast_tx_busdma++;
4518 			ath_freetx(m0);
4519 			return error;
4520 		}
4521 		KASSERT(bf->bf_nseg <= ATH_TXDESC,
4522 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
4523 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
4524 		sc->sc_stats.ast_tx_nodata++;
4525 		ath_freetx(m0);
4526 		return EIO;
4527 	}
4528 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
4529 		__func__, m0, m0->m_pkthdr.len);
4530 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
4531 	bf->bf_m = m0;
4532 
4533 	return 0;
4534 }
4535 
4536 static void
4537 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
4538 {
4539 	struct ath_hal *ah = sc->sc_ah;
4540 	struct ath_desc *ds, *ds0;
4541 	int i;
4542 
4543 	/*
4544 	 * Fillin the remainder of the descriptor info.
4545 	 */
4546 	ds0 = ds = bf->bf_desc;
4547 	for (i = 0; i < bf->bf_nseg; i++, ds++) {
4548 		ds->ds_data = bf->bf_segs[i].ds_addr;
4549 		if (i == bf->bf_nseg - 1)
4550 			ds->ds_link = 0;
4551 		else
4552 			ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4553 		ath_hal_filltxdesc(ah, ds
4554 			, bf->bf_segs[i].ds_len	/* segment length */
4555 			, i == 0		/* first segment */
4556 			, i == bf->bf_nseg - 1	/* last segment */
4557 			, ds0			/* first descriptor */
4558 		);
4559 		DPRINTF(sc, ATH_DEBUG_XMIT,
4560 			"%s: %d: %08x %08x %08x %08x %08x %08x\n",
4561 			__func__, i, ds->ds_link, ds->ds_data,
4562 			ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4563 	}
4564 	/*
4565 	 * Insert the frame on the outbound list and pass it on
4566 	 * to the hardware.  Multicast frames buffered for power
4567 	 * save stations and transmit from the CAB queue are stored
4568 	 * on a s/w only queue and loaded on to the CAB queue in
4569 	 * the SWBA handler since frames only go out on DTIM and
4570 	 * to avoid possible races.
4571 	 */
4572 	ATH_TXQ_LOCK(txq);
4573 	if (txq->axq_qnum != ATH_TXQ_SWQ) {
4574 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4575 		if (txq->axq_link == NULL) {
4576 			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4577 			DPRINTF(sc, ATH_DEBUG_XMIT,
4578 			    "%s: TXDP[%u] = %p (%p) depth %d\n", __func__,
4579 			    txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc,
4580 			    txq->axq_depth);
4581 		} else {
4582 			*txq->axq_link = bf->bf_daddr;
4583 			DPRINTF(sc, ATH_DEBUG_XMIT,
4584 			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4585 			    txq->axq_qnum, txq->axq_link,
4586 			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4587 		}
4588 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4589 		ath_hal_txstart(ah, txq->axq_qnum);
4590 	} else {
4591 		if (txq->axq_link != NULL) {
4592 			struct ath_buf *last = ATH_TXQ_LAST(txq);
4593 			struct ieee80211_frame *wh;
4594 
4595 			/* mark previous frame */
4596 			wh = mtod(last->bf_m, struct ieee80211_frame *);
4597 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
4598 			bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
4599 			    BUS_DMASYNC_PREWRITE);
4600 
4601 			/* link descriptor */
4602 			*txq->axq_link = bf->bf_daddr;
4603 		}
4604 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4605 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4606 	}
4607 	ATH_TXQ_UNLOCK(txq);
4608 }
4609 
4610 static int
4611 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4612     struct mbuf *m0)
4613 {
4614 	struct ieee80211vap *vap = ni->ni_vap;
4615 	struct ath_vap *avp = ATH_VAP(vap);
4616 	struct ath_hal *ah = sc->sc_ah;
4617 	struct ifnet *ifp = sc->sc_ifp;
4618 	struct ieee80211com *ic = ifp->if_l2com;
4619 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4620 	int error, iswep, ismcast, isfrag, ismrr;
4621 	int keyix, hdrlen, pktlen, try0;
4622 	u_int8_t rix, txrate, ctsrate;
4623 	u_int8_t cix = 0xff;		/* NB: silence compiler */
4624 	struct ath_desc *ds;
4625 	struct ath_txq *txq;
4626 	struct ieee80211_frame *wh;
4627 	u_int subtype, flags, ctsduration;
4628 	HAL_PKT_TYPE atype;
4629 	const HAL_RATE_TABLE *rt;
4630 	HAL_BOOL shortPreamble;
4631 	struct ath_node *an;
4632 	u_int pri;
4633 
4634 	wh = mtod(m0, struct ieee80211_frame *);
4635 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4636 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4637 	isfrag = m0->m_flags & M_FRAG;
4638 	hdrlen = ieee80211_anyhdrsize(wh);
4639 	/*
4640 	 * Packet length must not include any
4641 	 * pad bytes; deduct them here.
4642 	 */
4643 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4644 
4645 	if (iswep) {
4646 		const struct ieee80211_cipher *cip;
4647 		struct ieee80211_key *k;
4648 
4649 		/*
4650 		 * Construct the 802.11 header+trailer for an encrypted
4651 		 * frame. The only reason this can fail is because of an
4652 		 * unknown or unsupported cipher/key type.
4653 		 */
4654 		k = ieee80211_crypto_encap(ni, m0);
4655 		if (k == NULL) {
4656 			/*
4657 			 * This can happen when the key is yanked after the
4658 			 * frame was queued.  Just discard the frame; the
4659 			 * 802.11 layer counts failures and provides
4660 			 * debugging/diagnostics.
4661 			 */
4662 			ath_freetx(m0);
4663 			return EIO;
4664 		}
4665 		/*
4666 		 * Adjust the packet + header lengths for the crypto
4667 		 * additions and calculate the h/w key index.  When
4668 		 * a s/w mic is done the frame will have had any mic
4669 		 * added to it prior to entry so m0->m_pkthdr.len will
4670 		 * account for it. Otherwise we need to add it to the
4671 		 * packet length.
4672 		 */
4673 		cip = k->wk_cipher;
4674 		hdrlen += cip->ic_header;
4675 		pktlen += cip->ic_header + cip->ic_trailer;
4676 		/* NB: frags always have any TKIP MIC done in s/w */
4677 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4678 			pktlen += cip->ic_miclen;
4679 		keyix = k->wk_keyix;
4680 
4681 		/* packet header may have moved, reset our local pointer */
4682 		wh = mtod(m0, struct ieee80211_frame *);
4683 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4684 		/*
4685 		 * Use station key cache slot, if assigned.
4686 		 */
4687 		keyix = ni->ni_ucastkey.wk_keyix;
4688 		if (keyix == IEEE80211_KEYIX_NONE)
4689 			keyix = HAL_TXKEYIX_INVALID;
4690 	} else
4691 		keyix = HAL_TXKEYIX_INVALID;
4692 
4693 	pktlen += IEEE80211_CRC_LEN;
4694 
4695 	/*
4696 	 * Load the DMA map so any coalescing is done.  This
4697 	 * also calculates the number of descriptors we need.
4698 	 */
4699 	error = ath_tx_dmasetup(sc, bf, m0);
4700 	if (error != 0)
4701 		return error;
4702 	bf->bf_node = ni;			/* NB: held reference */
4703 	m0 = bf->bf_m;				/* NB: may have changed */
4704 	wh = mtod(m0, struct ieee80211_frame *);
4705 
4706 	/* setup descriptors */
4707 	ds = bf->bf_desc;
4708 	rt = sc->sc_currates;
4709 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4710 
4711 	/*
4712 	 * NB: the 802.11 layer marks whether or not we should
4713 	 * use short preamble based on the current mode and
4714 	 * negotiated parameters.
4715 	 */
4716 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4717 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4718 		shortPreamble = AH_TRUE;
4719 		sc->sc_stats.ast_tx_shortpre++;
4720 	} else {
4721 		shortPreamble = AH_FALSE;
4722 	}
4723 
4724 	an = ATH_NODE(ni);
4725 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
4726 	ismrr = 0;				/* default no multi-rate retry*/
4727 	pri = M_WME_GETAC(m0);			/* honor classification */
4728 	/* XXX use txparams instead of fixed values */
4729 	/*
4730 	 * Calculate Atheros packet type from IEEE80211 packet header,
4731 	 * setup for rate calculations, and select h/w transmit queue.
4732 	 */
4733 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4734 	case IEEE80211_FC0_TYPE_MGT:
4735 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4736 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4737 			atype = HAL_PKT_TYPE_BEACON;
4738 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4739 			atype = HAL_PKT_TYPE_PROBE_RESP;
4740 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4741 			atype = HAL_PKT_TYPE_ATIM;
4742 		else
4743 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
4744 		rix = an->an_mgmtrix;
4745 		txrate = rt->info[rix].rateCode;
4746 		if (shortPreamble)
4747 			txrate |= rt->info[rix].shortPreamble;
4748 		try0 = ATH_TXMGTTRY;
4749 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4750 		break;
4751 	case IEEE80211_FC0_TYPE_CTL:
4752 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
4753 		rix = an->an_mgmtrix;
4754 		txrate = rt->info[rix].rateCode;
4755 		if (shortPreamble)
4756 			txrate |= rt->info[rix].shortPreamble;
4757 		try0 = ATH_TXMGTTRY;
4758 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4759 		break;
4760 	case IEEE80211_FC0_TYPE_DATA:
4761 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
4762 		/*
4763 		 * Data frames: multicast frames go out at a fixed rate,
4764 		 * EAPOL frames use the mgmt frame rate; otherwise consult
4765 		 * the rate control module for the rate to use.
4766 		 */
4767 		if (ismcast) {
4768 			rix = an->an_mcastrix;
4769 			txrate = rt->info[rix].rateCode;
4770 			if (shortPreamble)
4771 				txrate |= rt->info[rix].shortPreamble;
4772 			try0 = 1;
4773 		} else if (m0->m_flags & M_EAPOL) {
4774 			/* XXX? maybe always use long preamble? */
4775 			rix = an->an_mgmtrix;
4776 			txrate = rt->info[rix].rateCode;
4777 			if (shortPreamble)
4778 				txrate |= rt->info[rix].shortPreamble;
4779 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
4780 		} else {
4781 			ath_rate_findrate(sc, an, shortPreamble, pktlen,
4782 				&rix, &try0, &txrate);
4783 			sc->sc_txrix = rix;		/* for LED blinking */
4784 			sc->sc_lastdatarix = rix;	/* for fast frames */
4785 			if (try0 != ATH_TXMAXTRY)
4786 				ismrr = 1;
4787 		}
4788 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4789 			flags |= HAL_TXDESC_NOACK;
4790 		break;
4791 	default:
4792 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4793 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4794 		/* XXX statistic */
4795 		ath_freetx(m0);
4796 		return EIO;
4797 	}
4798 	txq = sc->sc_ac2q[pri];
4799 
4800 	/*
4801 	 * When servicing one or more stations in power-save mode
4802 	 * (or) if there is some mcast data waiting on the mcast
4803 	 * queue (to prevent out of order delivery) multicast
4804 	 * frames must be buffered until after the beacon.
4805 	 */
4806 	if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
4807 		txq = &avp->av_mcastq;
4808 
4809 	/*
4810 	 * Calculate miscellaneous flags.
4811 	 */
4812 	if (ismcast) {
4813 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
4814 	} else if (pktlen > vap->iv_rtsthreshold &&
4815 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4816 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
4817 		cix = rt->info[rix].controlRate;
4818 		sc->sc_stats.ast_tx_rts++;
4819 	}
4820 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
4821 		sc->sc_stats.ast_tx_noack++;
4822 
4823 	/*
4824 	 * If 802.11g protection is enabled, determine whether
4825 	 * to use RTS/CTS or just CTS.  Note that this is only
4826 	 * done for OFDM unicast frames.
4827 	 */
4828 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
4829 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
4830 	    (flags & HAL_TXDESC_NOACK) == 0) {
4831 		/* XXX fragments must use CCK rates w/ protection */
4832 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4833 			flags |= HAL_TXDESC_RTSENA;
4834 		else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4835 			flags |= HAL_TXDESC_CTSENA;
4836 		if (isfrag) {
4837 			/*
4838 			 * For frags it would be desirable to use the
4839 			 * highest CCK rate for RTS/CTS.  But stations
4840 			 * farther away may detect it at a lower CCK rate
4841 			 * so use the configured protection rate instead
4842 			 * (for now).
4843 			 */
4844 			cix = rt->info[sc->sc_protrix].controlRate;
4845 		} else
4846 			cix = rt->info[sc->sc_protrix].controlRate;
4847 		sc->sc_stats.ast_tx_protect++;
4848 	}
4849 
4850 	/*
4851 	 * Calculate duration.  This logically belongs in the 802.11
4852 	 * layer but it lacks sufficient information to calculate it.
4853 	 */
4854 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
4855 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
4856 		u_int16_t dur;
4857 		if (shortPreamble)
4858 			dur = rt->info[rix].spAckDuration;
4859 		else
4860 			dur = rt->info[rix].lpAckDuration;
4861 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
4862 			dur += dur;		/* additional SIFS+ACK */
4863 			KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
4864 			/*
4865 			 * Include the size of next fragment so NAV is
4866 			 * updated properly.  The last fragment uses only
4867 			 * the ACK duration
4868 			 */
4869 			dur += ath_hal_computetxtime(ah, rt,
4870 					m0->m_nextpkt->m_pkthdr.len,
4871 					rix, shortPreamble);
4872 		}
4873 		if (isfrag) {
4874 			/*
4875 			 * Force hardware to use computed duration for next
4876 			 * fragment by disabling multi-rate retry which updates
4877 			 * duration based on the multi-rate duration table.
4878 			 */
4879 			ismrr = 0;
4880 			try0 = ATH_TXMGTTRY;	/* XXX? */
4881 		}
4882 		*(u_int16_t *)wh->i_dur = htole16(dur);
4883 	}
4884 
4885 	/*
4886 	 * Calculate RTS/CTS rate and duration if needed.
4887 	 */
4888 	ctsduration = 0;
4889 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
4890 		/*
4891 		 * CTS transmit rate is derived from the transmit rate
4892 		 * by looking in the h/w rate table.  We must also factor
4893 		 * in whether or not a short preamble is to be used.
4894 		 */
4895 		/* NB: cix is set above where RTS/CTS is enabled */
4896 		KASSERT(cix != 0xff, ("cix not setup"));
4897 		ctsrate = rt->info[cix].rateCode;
4898 		/*
4899 		 * Compute the transmit duration based on the frame
4900 		 * size and the size of an ACK frame.  We call into the
4901 		 * HAL to do the computation since it depends on the
4902 		 * characteristics of the actual PHY being used.
4903 		 *
4904 		 * NB: CTS is assumed the same size as an ACK so we can
4905 		 *     use the precalculated ACK durations.
4906 		 */
4907 		if (shortPreamble) {
4908 			ctsrate |= rt->info[cix].shortPreamble;
4909 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
4910 				ctsduration += rt->info[cix].spAckDuration;
4911 			ctsduration += ath_hal_computetxtime(ah,
4912 				rt, pktlen, rix, AH_TRUE);
4913 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
4914 				ctsduration += rt->info[rix].spAckDuration;
4915 		} else {
4916 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
4917 				ctsduration += rt->info[cix].lpAckDuration;
4918 			ctsduration += ath_hal_computetxtime(ah,
4919 				rt, pktlen, rix, AH_FALSE);
4920 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
4921 				ctsduration += rt->info[rix].lpAckDuration;
4922 		}
4923 		/*
4924 		 * Must disable multi-rate retry when using RTS/CTS.
4925 		 */
4926 		ismrr = 0;
4927 		try0 = ATH_TXMGTTRY;		/* XXX */
4928 	} else
4929 		ctsrate = 0;
4930 
4931 	/*
4932 	 * At this point we are committed to sending the frame
4933 	 * and we don't need to look at m_nextpkt; clear it in
4934 	 * case this frame is part of frag chain.
4935 	 */
4936 	m0->m_nextpkt = NULL;
4937 
4938 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
4939 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
4940 			sc->sc_hwmap[rix].ieeerate, -1);
4941 
4942 	if (bpf_peers_present(ifp->if_bpf)) {
4943 		u_int64_t tsf = ath_hal_gettsf64(ah);
4944 
4945 		sc->sc_tx_th.wt_tsf = htole64(tsf);
4946 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
4947 		if (iswep)
4948 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4949 		if (isfrag)
4950 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
4951 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
4952 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
4953 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
4954 
4955 		bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
4956 	}
4957 
4958 	/*
4959 	 * Determine if a tx interrupt should be generated for
4960 	 * this descriptor.  We take a tx interrupt to reap
4961 	 * descriptors when the h/w hits an EOL condition or
4962 	 * when the descriptor is specifically marked to generate
4963 	 * an interrupt.  We periodically mark descriptors in this
4964 	 * way to insure timely replenishing of the supply needed
4965 	 * for sending frames.  Defering interrupts reduces system
4966 	 * load and potentially allows more concurrent work to be
4967 	 * done but if done to aggressively can cause senders to
4968 	 * backup.
4969 	 *
4970 	 * NB: use >= to deal with sc_txintrperiod changing
4971 	 *     dynamically through sysctl.
4972 	 */
4973 	if (flags & HAL_TXDESC_INTREQ) {
4974 		txq->axq_intrcnt = 0;
4975 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
4976 		flags |= HAL_TXDESC_INTREQ;
4977 		txq->axq_intrcnt = 0;
4978 	}
4979 
4980 	/*
4981 	 * Formulate first tx descriptor with tx controls.
4982 	 */
4983 	/* XXX check return value? */
4984 	ath_hal_setuptxdesc(ah, ds
4985 		, pktlen		/* packet length */
4986 		, hdrlen		/* header length */
4987 		, atype			/* Atheros packet type */
4988 		, ni->ni_txpower	/* txpower */
4989 		, txrate, try0		/* series 0 rate/tries */
4990 		, keyix			/* key cache index */
4991 		, sc->sc_txantenna	/* antenna mode */
4992 		, flags			/* flags */
4993 		, ctsrate		/* rts/cts rate */
4994 		, ctsduration		/* rts/cts duration */
4995 	);
4996 	bf->bf_txflags = flags;
4997 	/*
4998 	 * Setup the multi-rate retry state only when we're
4999 	 * going to use it.  This assumes ath_hal_setuptxdesc
5000 	 * initializes the descriptors (so we don't have to)
5001 	 * when the hardware supports multi-rate retry and
5002 	 * we don't use it.
5003 	 */
5004 	if (ismrr)
5005 		ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
5006 
5007 	ath_tx_handoff(sc, txq, bf);
5008 	return 0;
5009 }
5010 
5011 /*
5012  * Process completed xmit descriptors from the specified queue.
5013  */
5014 static int
5015 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
5016 {
5017 	struct ath_hal *ah = sc->sc_ah;
5018 	struct ifnet *ifp = sc->sc_ifp;
5019 	struct ieee80211com *ic = ifp->if_l2com;
5020 	struct ath_buf *bf;
5021 	struct ath_desc *ds, *ds0;
5022 	struct ath_tx_status *ts;
5023 	struct ieee80211_node *ni;
5024 	struct ath_node *an;
5025 	int sr, lr, pri, nacked;
5026 	HAL_STATUS status;
5027 
5028 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
5029 		__func__, txq->axq_qnum,
5030 		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
5031 		txq->axq_link);
5032 	nacked = 0;
5033 	for (;;) {
5034 		ATH_TXQ_LOCK(txq);
5035 		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
5036 		bf = STAILQ_FIRST(&txq->axq_q);
5037 		if (bf == NULL) {
5038 			ATH_TXQ_UNLOCK(txq);
5039 			break;
5040 		}
5041 		ds0 = &bf->bf_desc[0];
5042 		ds = &bf->bf_desc[bf->bf_nseg - 1];
5043 		ts = &bf->bf_status.ds_txstat;
5044 		status = ath_hal_txprocdesc(ah, ds, ts);
5045 #ifdef ATH_DEBUG
5046 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
5047 			ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
5048 			    status == HAL_OK);
5049 #endif
5050 		if (status == HAL_EINPROGRESS) {
5051 			ATH_TXQ_UNLOCK(txq);
5052 			break;
5053 		}
5054 		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5055 		if (txq->axq_depth == 0)
5056 			txq->axq_link = NULL;
5057 		ATH_TXQ_UNLOCK(txq);
5058 
5059 		ni = bf->bf_node;
5060 		if (ni != NULL) {
5061 			an = ATH_NODE(ni);
5062 			if (ts->ts_status == 0) {
5063 				u_int8_t txant = ts->ts_antenna;
5064 				sc->sc_stats.ast_ant_tx[txant]++;
5065 				sc->sc_ant_tx[txant]++;
5066 				if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
5067 					sc->sc_stats.ast_tx_altrate++;
5068 				pri = M_WME_GETAC(bf->bf_m);
5069 				if (pri >= WME_AC_VO)
5070 					ic->ic_wme.wme_hipri_traffic++;
5071 				if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
5072 					ni->ni_inact = ni->ni_inact_reload;
5073 			} else {
5074 				if (ts->ts_status & HAL_TXERR_XRETRY)
5075 					sc->sc_stats.ast_tx_xretries++;
5076 				if (ts->ts_status & HAL_TXERR_FIFO)
5077 					sc->sc_stats.ast_tx_fifoerr++;
5078 				if (ts->ts_status & HAL_TXERR_FILT)
5079 					sc->sc_stats.ast_tx_filtered++;
5080 				if (bf->bf_m->m_flags & M_FF)
5081 					sc->sc_stats.ast_ff_txerr++;
5082 			}
5083 			sr = ts->ts_shortretry;
5084 			lr = ts->ts_longretry;
5085 			sc->sc_stats.ast_tx_shortretry += sr;
5086 			sc->sc_stats.ast_tx_longretry += lr;
5087 			/*
5088 			 * Hand the descriptor to the rate control algorithm.
5089 			 */
5090 			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
5091 			    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
5092 				/*
5093 				 * If frame was ack'd update statistics,
5094 				 * including the last rx time used to
5095 				 * workaround phantom bmiss interrupts.
5096 				 */
5097 				if (ts->ts_status == 0) {
5098 					nacked++;
5099 					sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
5100 					ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
5101 						ts->ts_rssi);
5102 				}
5103 				ath_rate_tx_complete(sc, an, bf);
5104 			}
5105 			/*
5106 			 * Do any tx complete callback.  Note this must
5107 			 * be done before releasing the node reference.
5108 			 */
5109 			if (bf->bf_m->m_flags & M_TXCB)
5110 				ieee80211_process_callback(ni, bf->bf_m,
5111 				    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
5112 				        ts->ts_status : HAL_TXERR_XRETRY);
5113 			/*
5114 			 * Reclaim reference to node.
5115 			 *
5116 			 * NB: the node may be reclaimed here if, for example
5117 			 *     this is a DEAUTH message that was sent and the
5118 			 *     node was timed out due to inactivity.
5119 			 */
5120 			ieee80211_free_node(ni);
5121 		}
5122 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
5123 		    BUS_DMASYNC_POSTWRITE);
5124 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5125 
5126 		m_freem(bf->bf_m);
5127 		bf->bf_m = NULL;
5128 		bf->bf_node = NULL;
5129 
5130 		ATH_TXBUF_LOCK(sc);
5131 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5132 		ATH_TXBUF_UNLOCK(sc);
5133 	}
5134 	/*
5135 	 * Flush fast-frame staging queue when traffic slows.
5136 	 */
5137 	if (txq->axq_depth <= 1)
5138 		ath_ff_stageq_flush(sc, txq, ath_ff_always);
5139 	return nacked;
5140 }
5141 
5142 static __inline int
5143 txqactive(struct ath_hal *ah, int qnum)
5144 {
5145 	u_int32_t txqs = 1<<qnum;
5146 	ath_hal_gettxintrtxqs(ah, &txqs);
5147 	return (txqs & (1<<qnum));
5148 }
5149 
5150 /*
5151  * Deferred processing of transmit interrupt; special-cased
5152  * for a single hardware transmit queue (e.g. 5210 and 5211).
5153  */
5154 static void
5155 ath_tx_proc_q0(void *arg, int npending)
5156 {
5157 	struct ath_softc *sc = arg;
5158 	struct ifnet *ifp = sc->sc_ifp;
5159 
5160 	if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
5161 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5162 	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5163 		ath_tx_processq(sc, sc->sc_cabq);
5164 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5165 	ifp->if_timer = 0;
5166 
5167 	if (sc->sc_softled)
5168 		ath_led_event(sc, sc->sc_txrix);
5169 
5170 	ath_start(ifp);
5171 }
5172 
5173 /*
5174  * Deferred processing of transmit interrupt; special-cased
5175  * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5176  */
5177 static void
5178 ath_tx_proc_q0123(void *arg, int npending)
5179 {
5180 	struct ath_softc *sc = arg;
5181 	struct ifnet *ifp = sc->sc_ifp;
5182 	int nacked;
5183 
5184 	/*
5185 	 * Process each active queue.
5186 	 */
5187 	nacked = 0;
5188 	if (txqactive(sc->sc_ah, 0))
5189 		nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
5190 	if (txqactive(sc->sc_ah, 1))
5191 		nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
5192 	if (txqactive(sc->sc_ah, 2))
5193 		nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
5194 	if (txqactive(sc->sc_ah, 3))
5195 		nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
5196 	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5197 		ath_tx_processq(sc, sc->sc_cabq);
5198 	if (nacked)
5199 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5200 
5201 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5202 	ifp->if_timer = 0;
5203 
5204 	if (sc->sc_softled)
5205 		ath_led_event(sc, sc->sc_txrix);
5206 
5207 	ath_start(ifp);
5208 }
5209 
5210 /*
5211  * Deferred processing of transmit interrupt.
5212  */
5213 static void
5214 ath_tx_proc(void *arg, int npending)
5215 {
5216 	struct ath_softc *sc = arg;
5217 	struct ifnet *ifp = sc->sc_ifp;
5218 	int i, nacked;
5219 
5220 	/*
5221 	 * Process each active queue.
5222 	 */
5223 	nacked = 0;
5224 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5225 		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
5226 			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
5227 	if (nacked)
5228 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5229 
5230 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5231 	ifp->if_timer = 0;
5232 
5233 	if (sc->sc_softled)
5234 		ath_led_event(sc, sc->sc_txrix);
5235 
5236 	ath_start(ifp);
5237 }
5238 
5239 static void
5240 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5241 {
5242 #ifdef ATH_DEBUG
5243 	struct ath_hal *ah = sc->sc_ah;
5244 #endif
5245 	struct ieee80211_node *ni;
5246 	struct ath_buf *bf;
5247 	u_int ix;
5248 
5249 	/*
5250 	 * NB: this assumes output has been stopped and
5251 	 *     we do not need to block ath_tx_proc
5252 	 */
5253 	for (ix = 0;; ix++) {
5254 		ATH_TXQ_LOCK(txq);
5255 		bf = STAILQ_FIRST(&txq->axq_q);
5256 		if (bf == NULL) {
5257 			txq->axq_link = NULL;
5258 			ATH_TXQ_UNLOCK(txq);
5259 			break;
5260 		}
5261 		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5262 		ATH_TXQ_UNLOCK(txq);
5263 #ifdef ATH_DEBUG
5264 		if (sc->sc_debug & ATH_DEBUG_RESET) {
5265 			struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5266 
5267 			ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5268 				ath_hal_txprocdesc(ah, bf->bf_desc,
5269 				    &bf->bf_status.ds_txstat) == HAL_OK);
5270 			ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t),
5271 				bf->bf_m->m_len, 0, -1);
5272 		}
5273 #endif /* ATH_DEBUG */
5274 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5275 		ni = bf->bf_node;
5276 		bf->bf_node = NULL;
5277 		if (ni != NULL) {
5278 			/*
5279 			 * Do any callback and reclaim the node reference.
5280 			 */
5281 			if (bf->bf_m->m_flags & M_TXCB)
5282 				ieee80211_process_callback(ni, bf->bf_m, -1);
5283 			ieee80211_free_node(ni);
5284 		}
5285 		m_freem(bf->bf_m);
5286 		bf->bf_m = NULL;
5287 
5288 		ATH_TXBUF_LOCK(sc);
5289 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5290 		ATH_TXBUF_UNLOCK(sc);
5291 	}
5292 }
5293 
5294 static void
5295 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5296 {
5297 	struct ath_hal *ah = sc->sc_ah;
5298 
5299 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5300 	    __func__, txq->axq_qnum,
5301 	    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5302 	    txq->axq_link);
5303 	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5304 }
5305 
5306 /*
5307  * Drain the transmit queues and reclaim resources.
5308  */
5309 static void
5310 ath_draintxq(struct ath_softc *sc)
5311 {
5312 	struct ath_hal *ah = sc->sc_ah;
5313 	struct ifnet *ifp = sc->sc_ifp;
5314 	int i;
5315 
5316 	/* XXX return value */
5317 	if (!sc->sc_invalid) {
5318 		/* don't touch the hardware if marked invalid */
5319 		DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5320 		    __func__, sc->sc_bhalq,
5321 		    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5322 		    NULL);
5323 		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5324 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5325 			if (ATH_TXQ_SETUP(sc, i))
5326 				ath_tx_stopdma(sc, &sc->sc_txq[i]);
5327 	}
5328 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5329 		if (ATH_TXQ_SETUP(sc, i))
5330 			ath_tx_draintxq(sc, &sc->sc_txq[i]);
5331 #ifdef ATH_DEBUG
5332 	if (sc->sc_debug & ATH_DEBUG_RESET) {
5333 		struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
5334 		if (bf != NULL && bf->bf_m != NULL) {
5335 			ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5336 				ath_hal_txprocdesc(ah, bf->bf_desc,
5337 				    &bf->bf_status.ds_txstat) == HAL_OK);
5338 			ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t),
5339 				bf->bf_m->m_len, 0, -1);
5340 		}
5341 	}
5342 #endif /* ATH_DEBUG */
5343 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5344 	ifp->if_timer = 0;
5345 }
5346 
5347 /*
5348  * Disable the receive h/w in preparation for a reset.
5349  */
5350 static void
5351 ath_stoprecv(struct ath_softc *sc)
5352 {
5353 #define	PA2DESC(_sc, _pa) \
5354 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5355 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5356 	struct ath_hal *ah = sc->sc_ah;
5357 
5358 	ath_hal_stoppcurecv(ah);	/* disable PCU */
5359 	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
5360 	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
5361 	DELAY(3000);			/* 3ms is long enough for 1 frame */
5362 #ifdef ATH_DEBUG
5363 	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5364 		struct ath_buf *bf;
5365 		u_int ix;
5366 
5367 		printf("%s: rx queue %p, link %p\n", __func__,
5368 			(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5369 		ix = 0;
5370 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5371 			struct ath_desc *ds = bf->bf_desc;
5372 			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5373 			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5374 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5375 			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5376 				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5377 			ix++;
5378 		}
5379 	}
5380 #endif
5381 	if (sc->sc_rxpending != NULL) {
5382 		m_freem(sc->sc_rxpending);
5383 		sc->sc_rxpending = NULL;
5384 	}
5385 	sc->sc_rxlink = NULL;		/* just in case */
5386 #undef PA2DESC
5387 }
5388 
5389 /*
5390  * Enable the receive h/w following a reset.
5391  */
5392 static int
5393 ath_startrecv(struct ath_softc *sc)
5394 {
5395 	struct ath_hal *ah = sc->sc_ah;
5396 	struct ath_buf *bf;
5397 
5398 	sc->sc_rxlink = NULL;
5399 	sc->sc_rxpending = NULL;
5400 	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5401 		int error = ath_rxbuf_init(sc, bf);
5402 		if (error != 0) {
5403 			DPRINTF(sc, ATH_DEBUG_RECV,
5404 				"%s: ath_rxbuf_init failed %d\n",
5405 				__func__, error);
5406 			return error;
5407 		}
5408 	}
5409 
5410 	bf = STAILQ_FIRST(&sc->sc_rxbuf);
5411 	ath_hal_putrxbuf(ah, bf->bf_daddr);
5412 	ath_hal_rxena(ah);		/* enable recv descriptors */
5413 	ath_mode_init(sc);		/* set filters, etc. */
5414 	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
5415 	return 0;
5416 }
5417 
5418 /*
5419  * Update internal state after a channel change.
5420  */
5421 static void
5422 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5423 {
5424 	enum ieee80211_phymode mode;
5425 
5426 	/*
5427 	 * Change channels and update the h/w rate map
5428 	 * if we're switching; e.g. 11a to 11b/g.
5429 	 */
5430 	if (IEEE80211_IS_CHAN_HALF(chan))
5431 		mode = IEEE80211_MODE_HALF;
5432 	else if (IEEE80211_IS_CHAN_QUARTER(chan))
5433 		mode = IEEE80211_MODE_QUARTER;
5434 	else
5435 		mode = ieee80211_chan2mode(chan);
5436 	if (mode != sc->sc_curmode)
5437 		ath_setcurmode(sc, mode);
5438 
5439 	sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags);
5440 	sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags;
5441 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
5442 	sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq;
5443 	sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee;
5444 	sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee;
5445 	sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower;
5446 	sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow;
5447 }
5448 
5449 /*
5450  * Set/change channels.  If the channel is really being changed,
5451  * it's done by reseting the chip.  To accomplish this we must
5452  * first cleanup any pending DMA, then restart stuff after a la
5453  * ath_init.
5454  */
5455 static int
5456 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5457 {
5458 	struct ifnet *ifp = sc->sc_ifp;
5459 	struct ieee80211com *ic = ifp->if_l2com;
5460 	struct ath_hal *ah = sc->sc_ah;
5461 	HAL_CHANNEL hchan;
5462 
5463 	/*
5464 	 * Convert to a HAL channel description with
5465 	 * the flags constrained to reflect the current
5466 	 * operating mode.
5467 	 */
5468 	ath_mapchan(ic, &hchan, chan);
5469 
5470 	DPRINTF(sc, ATH_DEBUG_RESET,
5471 	    "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n",
5472 	    __func__,
5473 	    ath_hal_mhz2ieee(ah, sc->sc_curchan.channel,
5474 		sc->sc_curchan.channelFlags),
5475 	    	sc->sc_curchan.channel, sc->sc_curchan.channelFlags,
5476 	    ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags),
5477 	        hchan.channel, hchan.channelFlags);
5478 	if (hchan.channel != sc->sc_curchan.channel ||
5479 	    hchan.channelFlags != sc->sc_curchan.channelFlags) {
5480 		HAL_STATUS status;
5481 
5482 		/*
5483 		 * To switch channels clear any pending DMA operations;
5484 		 * wait long enough for the RX fifo to drain, reset the
5485 		 * hardware at the new frequency, and then re-enable
5486 		 * the relevant bits of the h/w.
5487 		 */
5488 		ath_hal_intrset(ah, 0);		/* disable interrupts */
5489 		ath_draintxq(sc);		/* clear pending tx frames */
5490 		ath_stoprecv(sc);		/* turn off frame recv */
5491 		if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) {
5492 			if_printf(ifp, "%s: unable to reset "
5493 			    "channel %u (%u Mhz, flags 0x%x hal flags 0x%x), "
5494 			    "hal status %u\n", __func__,
5495 			    ieee80211_chan2ieee(ic, chan), chan->ic_freq,
5496 			    chan->ic_flags, hchan.channelFlags, status);
5497 			return EIO;
5498 		}
5499 		sc->sc_curchan = hchan;
5500 		sc->sc_diversity = ath_hal_getdiversity(ah);
5501 
5502 		/*
5503 		 * Re-enable rx framework.
5504 		 */
5505 		if (ath_startrecv(sc) != 0) {
5506 			if_printf(ifp, "%s: unable to restart recv logic\n",
5507 			    __func__);
5508 			return EIO;
5509 		}
5510 
5511 		/*
5512 		 * Change channels and update the h/w rate map
5513 		 * if we're switching; e.g. 11a to 11b/g.
5514 		 */
5515 		ath_chan_change(sc, chan);
5516 
5517 		/*
5518 		 * Re-enable interrupts.
5519 		 */
5520 		ath_hal_intrset(ah, sc->sc_imask);
5521 	}
5522 	return 0;
5523 }
5524 
5525 /*
5526  * Periodically recalibrate the PHY to account
5527  * for temperature/environment changes.
5528  */
5529 static void
5530 ath_calibrate(void *arg)
5531 {
5532 	struct ath_softc *sc = arg;
5533 	struct ath_hal *ah = sc->sc_ah;
5534 	struct ifnet *ifp = sc->sc_ifp;
5535 	HAL_BOOL longCal, isCalDone;
5536 	int nextcal;
5537 
5538 	longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5539 	if (longCal) {
5540 		sc->sc_stats.ast_per_cal++;
5541 		if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5542 			/*
5543 			 * Rfgain is out of bounds, reset the chip
5544 			 * to load new gain values.
5545 			 */
5546 			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5547 				"%s: rfgain change\n", __func__);
5548 			sc->sc_stats.ast_per_rfgain++;
5549 			ath_reset(ifp);
5550 		}
5551 		/*
5552 		 * If this long cal is after an idle period, then
5553 		 * reset the data collection state so we start fresh.
5554 		 */
5555 		if (sc->sc_resetcal) {
5556 			(void) ath_hal_calreset(ah, &sc->sc_curchan);
5557 			sc->sc_lastcalreset = ticks;
5558 			sc->sc_resetcal = 0;
5559 		}
5560 	}
5561 	if (ath_hal_calibrateN(ah, &sc->sc_curchan, longCal, &isCalDone)) {
5562 		if (longCal) {
5563 			/*
5564 			 * Calibrate noise floor data again in case of change.
5565 			 */
5566 			ath_hal_process_noisefloor(ah);
5567 		}
5568 	} else {
5569 		DPRINTF(sc, ATH_DEBUG_ANY,
5570 			"%s: calibration of channel %u failed\n",
5571 			__func__, sc->sc_curchan.channel);
5572 		sc->sc_stats.ast_per_calfail++;
5573 	}
5574 	if (!isCalDone) {
5575 		/*
5576 		 * Use a shorter interval to potentially collect multiple
5577 		 * data samples required to complete calibration.  Once
5578 		 * we're told the work is done we drop back to a longer
5579 		 * interval between requests.  We're more aggressive doing
5580 		 * work when operating as an AP to improve operation right
5581 		 * after startup.
5582 		 */
5583 		nextcal = (1000*ath_shortcalinterval)/hz;
5584 		if (sc->sc_opmode != HAL_M_HOSTAP)
5585 			nextcal *= 10;
5586 	} else {
5587 		nextcal = ath_longcalinterval*hz;
5588 		sc->sc_lastlongcal = ticks;
5589 		if (sc->sc_lastcalreset == 0)
5590 			sc->sc_lastcalreset = sc->sc_lastlongcal;
5591 		else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5592 			sc->sc_resetcal = 1;	/* setup reset next trip */
5593 	}
5594 
5595 	if (nextcal != 0) {
5596 		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5597 		    __func__, nextcal, isCalDone ? "" : "!");
5598 		callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5599 	} else {
5600 		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5601 		    __func__);
5602 		/* NB: don't rearm timer */
5603 	}
5604 }
5605 
5606 static void
5607 ath_scan_start(struct ieee80211com *ic)
5608 {
5609 	struct ifnet *ifp = ic->ic_ifp;
5610 	struct ath_softc *sc = ifp->if_softc;
5611 	struct ath_hal *ah = sc->sc_ah;
5612 	u_int32_t rfilt;
5613 
5614 	/* XXX calibration timer? */
5615 
5616 	sc->sc_scanning = 1;
5617 	sc->sc_syncbeacon = 0;
5618 	rfilt = ath_calcrxfilter(sc);
5619 	ath_hal_setrxfilter(ah, rfilt);
5620 	ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5621 
5622 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5623 		 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5624 }
5625 
5626 static void
5627 ath_scan_end(struct ieee80211com *ic)
5628 {
5629 	struct ifnet *ifp = ic->ic_ifp;
5630 	struct ath_softc *sc = ifp->if_softc;
5631 	struct ath_hal *ah = sc->sc_ah;
5632 	u_int32_t rfilt;
5633 
5634 	sc->sc_scanning = 0;
5635 	rfilt = ath_calcrxfilter(sc);
5636 	ath_hal_setrxfilter(ah, rfilt);
5637 	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5638 
5639 	ath_hal_process_noisefloor(ah);
5640 
5641 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5642 		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5643 		 sc->sc_curaid);
5644 }
5645 
5646 static void
5647 ath_set_channel(struct ieee80211com *ic)
5648 {
5649 	struct ifnet *ifp = ic->ic_ifp;
5650 	struct ath_softc *sc = ifp->if_softc;
5651 
5652 	(void) ath_chan_set(sc, ic->ic_curchan);
5653 	/*
5654 	 * If we are returning to our bss channel then mark state
5655 	 * so the next recv'd beacon's tsf will be used to sync the
5656 	 * beacon timers.  Note that since we only hear beacons in
5657 	 * sta/ibss mode this has no effect in other operating modes.
5658 	 */
5659 	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5660 		sc->sc_syncbeacon = 1;
5661 }
5662 
5663 /*
5664  * Walk the vap list and check if there any vap's in RUN state.
5665  */
5666 static int
5667 ath_isanyrunningvaps(struct ieee80211vap *this)
5668 {
5669 	struct ieee80211com *ic = this->iv_ic;
5670 	struct ieee80211vap *vap;
5671 
5672 	IEEE80211_LOCK_ASSERT(ic);
5673 
5674 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5675 		if (vap != this && vap->iv_state == IEEE80211_S_RUN)
5676 			return 1;
5677 	}
5678 	return 0;
5679 }
5680 
5681 static int
5682 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5683 {
5684 	struct ieee80211com *ic = vap->iv_ic;
5685 	struct ath_softc *sc = ic->ic_ifp->if_softc;
5686 	struct ath_vap *avp = ATH_VAP(vap);
5687 	struct ath_hal *ah = sc->sc_ah;
5688 	struct ieee80211_node *ni = NULL;
5689 	int i, error, stamode;
5690 	u_int32_t rfilt;
5691 	static const HAL_LED_STATE leds[] = {
5692 	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
5693 	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
5694 	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
5695 	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
5696 	    HAL_LED_RUN, 	/* IEEE80211_S_CAC */
5697 	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
5698 	    HAL_LED_RUN, 	/* IEEE80211_S_CSA */
5699 	    HAL_LED_RUN, 	/* IEEE80211_S_SLEEP */
5700 	};
5701 
5702 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5703 		ieee80211_state_name[vap->iv_state],
5704 		ieee80211_state_name[nstate]);
5705 
5706 	callout_stop(&sc->sc_cal_ch);
5707 	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
5708 
5709 	if (nstate == IEEE80211_S_SCAN) {
5710 		/*
5711 		 * Scanning: turn off beacon miss and don't beacon.
5712 		 * Mark beacon state so when we reach RUN state we'll
5713 		 * [re]setup beacons.  Unblock the task q thread so
5714 		 * deferred interrupt processing is done.
5715 		 */
5716 		ath_hal_intrset(ah,
5717 		    sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5718 		sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5719 		sc->sc_beacons = 0;
5720 		taskqueue_unblock(sc->sc_tq);
5721 	}
5722 
5723 	ni = vap->iv_bss;
5724 	rfilt = ath_calcrxfilter(sc);
5725 	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5726 		   vap->iv_opmode == IEEE80211_M_AHDEMO ||
5727 		   vap->iv_opmode == IEEE80211_M_IBSS);
5728 	if (stamode && nstate == IEEE80211_S_RUN) {
5729 		sc->sc_curaid = ni->ni_associd;
5730 		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5731 		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5732 	}
5733 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5734 	   __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5735 	ath_hal_setrxfilter(ah, rfilt);
5736 
5737 	/* XXX is this to restore keycache on resume? */
5738 	if (vap->iv_opmode != IEEE80211_M_STA &&
5739 	    (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5740 		for (i = 0; i < IEEE80211_WEP_NKID; i++)
5741 			if (ath_hal_keyisvalid(ah, i))
5742 				ath_hal_keysetmac(ah, i, ni->ni_bssid);
5743 	}
5744 
5745 	/*
5746 	 * Invoke the parent method to do net80211 work.
5747 	 */
5748 	error = avp->av_newstate(vap, nstate, arg);
5749 	if (error != 0)
5750 		goto bad;
5751 
5752 	if (nstate == IEEE80211_S_RUN) {
5753 		/* NB: collect bss node again, it may have changed */
5754 		ni = vap->iv_bss;
5755 
5756 		DPRINTF(sc, ATH_DEBUG_STATE,
5757 		    "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5758 		    "capinfo 0x%04x chan %d\n", __func__,
5759 		    vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5760 		    ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5761 
5762 		switch (vap->iv_opmode) {
5763 		case IEEE80211_M_HOSTAP:
5764 		case IEEE80211_M_IBSS:
5765 			/*
5766 			 * Allocate and setup the beacon frame.
5767 			 *
5768 			 * Stop any previous beacon DMA.  This may be
5769 			 * necessary, for example, when an ibss merge
5770 			 * causes reconfiguration; there will be a state
5771 			 * transition from RUN->RUN that means we may
5772 			 * be called with beacon transmission active.
5773 			 */
5774 			ath_hal_stoptxdma(ah, sc->sc_bhalq);
5775 
5776 			error = ath_beacon_alloc(sc, ni);
5777 			if (error != 0)
5778 				goto bad;
5779 			/*
5780 			 * If joining an adhoc network defer beacon timer
5781 			 * configuration to the next beacon frame so we
5782 			 * have a current TSF to use.  Otherwise we're
5783 			 * starting an ibss/bss so there's no need to delay;
5784 			 * if this is the first vap moving to RUN state, then
5785 			 * beacon state needs to be [re]configured.
5786 			 */
5787 			if (vap->iv_opmode == IEEE80211_M_IBSS &&
5788 			    ni->ni_tstamp.tsf != 0) {
5789 				sc->sc_syncbeacon = 1;
5790 			} else if (!sc->sc_beacons) {
5791 				ath_beacon_config(sc, vap);
5792 				sc->sc_beacons = 1;
5793 			}
5794 			break;
5795 		case IEEE80211_M_STA:
5796 			/*
5797 			 * Defer beacon timer configuration to the next
5798 			 * beacon frame so we have a current TSF to use
5799 			 * (any TSF collected when scanning is likely old).
5800 			 */
5801 			sc->sc_syncbeacon = 1;
5802 			break;
5803 		case IEEE80211_M_MONITOR:
5804 			/*
5805 			 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5806 			 * transitions so we must re-enable interrupts here to
5807 			 * handle the case of a single monitor mode vap.
5808 			 */
5809 			ath_hal_intrset(ah, sc->sc_imask);
5810 			break;
5811 		case IEEE80211_M_WDS:
5812 			break;
5813 		default:
5814 			break;
5815 		}
5816 		/*
5817 		 * Let the hal process statistics collected during a
5818 		 * scan so it can provide calibrated noise floor data.
5819 		 */
5820 		ath_hal_process_noisefloor(ah);
5821 		/*
5822 		 * Reset rssi stats; maybe not the best place...
5823 		 */
5824 		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5825 		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5826 		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5827 		/*
5828 		 * Finally, start any timers and the task q thread
5829 		 * (in case we didn't go through SCAN state).
5830 		 */
5831 		if (ath_longcalinterval != 0) {
5832 			/* start periodic recalibration timer */
5833 			callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5834 		} else {
5835 			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5836 			    "%s: calibration disabled\n", __func__);
5837 		}
5838 		taskqueue_unblock(sc->sc_tq);
5839 	} else if (nstate == IEEE80211_S_INIT) {
5840 		/*
5841 		 * If there are no vaps left in RUN state then
5842 		 * shutdown host/driver operation:
5843 		 * o disable interrupts
5844 		 * o disable the task queue thread
5845 		 * o mark beacon processing as stopped
5846 		 */
5847 		if (!ath_isanyrunningvaps(vap)) {
5848 			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5849 			/* disable interrupts  */
5850 			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5851 			taskqueue_block(sc->sc_tq);
5852 			sc->sc_beacons = 0;
5853 		}
5854 	}
5855 bad:
5856 	return error;
5857 }
5858 
5859 /*
5860  * Allocate a key cache slot to the station so we can
5861  * setup a mapping from key index to node. The key cache
5862  * slot is needed for managing antenna state and for
5863  * compression when stations do not use crypto.  We do
5864  * it uniliaterally here; if crypto is employed this slot
5865  * will be reassigned.
5866  */
5867 static void
5868 ath_setup_stationkey(struct ieee80211_node *ni)
5869 {
5870 	struct ieee80211vap *vap = ni->ni_vap;
5871 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5872 	ieee80211_keyix keyix, rxkeyix;
5873 
5874 	if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5875 		/*
5876 		 * Key cache is full; we'll fall back to doing
5877 		 * the more expensive lookup in software.  Note
5878 		 * this also means no h/w compression.
5879 		 */
5880 		/* XXX msg+statistic */
5881 	} else {
5882 		/* XXX locking? */
5883 		ni->ni_ucastkey.wk_keyix = keyix;
5884 		ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5885 		IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5886 		/* NB: this will create a pass-thru key entry */
5887 		ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
5888 	}
5889 }
5890 
5891 /*
5892  * Setup driver-specific state for a newly associated node.
5893  * Note that we're called also on a re-associate, the isnew
5894  * param tells us if this is the first time or not.
5895  */
5896 static void
5897 ath_newassoc(struct ieee80211_node *ni, int isnew)
5898 {
5899 	struct ath_node *an = ATH_NODE(ni);
5900 	struct ieee80211vap *vap = ni->ni_vap;
5901 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5902 	const struct ieee80211_txparam *tp = ni->ni_txparms;
5903 
5904 	an->an_mcastrix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
5905 	an->an_mgmtrix = ath_tx_findrix(sc->sc_currates, tp->mgmtrate);
5906 
5907 	ath_rate_newassoc(sc, an, isnew);
5908 	if (isnew &&
5909 	    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5910 	    ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5911 		ath_setup_stationkey(ni);
5912 }
5913 
5914 static int
5915 getchannels(struct ath_softc *sc, int *nchans, struct ieee80211_channel chans[],
5916 	int cc, int ecm, int outdoor)
5917 {
5918 	struct ath_hal *ah = sc->sc_ah;
5919 	HAL_CHANNEL *halchans;
5920 	int i, nhalchans, error;
5921 
5922 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: cc %u outdoor %u ecm %u\n",
5923 	    __func__, cc, outdoor, ecm);
5924 
5925 	halchans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL),
5926 			M_TEMP, M_NOWAIT | M_ZERO);
5927 	if (halchans == NULL) {
5928 		device_printf(sc->sc_dev,
5929 		    "%s: unable to allocate channel table\n", __func__);
5930 		return ENOMEM;
5931 	}
5932 	error = 0;
5933 	if (!ath_hal_init_channels(ah, halchans, IEEE80211_CHAN_MAX, &nhalchans,
5934 	    NULL, 0, NULL, cc, HAL_MODE_ALL, outdoor, ecm)) {
5935 		u_int32_t rd;
5936 		(void) ath_hal_getregdomain(ah, &rd);
5937 		device_printf(sc->sc_dev, "ath_hal_init_channels failed, "
5938 		    "rd %d cc %u outdoor %u ecm %u\n", rd, cc, outdoor, ecm);
5939 		error = EINVAL;
5940 		goto done;
5941 	}
5942 	if (nchans == NULL)		/* no table requested */
5943 		goto done;
5944 
5945 	/*
5946 	 * Convert HAL channels to ieee80211 ones.
5947 	 */
5948 	for (i = 0; i < nhalchans; i++) {
5949 		HAL_CHANNEL *c = &halchans[i];
5950 		struct ieee80211_channel *ichan = &chans[i];
5951 
5952 		ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel,
5953 					c->channelFlags);
5954 		if (bootverbose)
5955 			device_printf(sc->sc_dev, "hal channel %u/%x -> %u "
5956 			    "maxpow %d minpow %d maxreg %d\n",
5957 			    c->channel, c->channelFlags, ichan->ic_ieee,
5958 			    c->maxTxPower, c->minTxPower, c->maxRegTxPower);
5959 		ichan->ic_freq = c->channel;
5960 
5961 		if ((c->channelFlags & CHANNEL_PUREG) == CHANNEL_PUREG) {
5962 			/*
5963 			 * Except for AR5211, HAL's PUREG means mixed
5964 			 * DSSS and OFDM.
5965 			 */
5966 			ichan->ic_flags = c->channelFlags &~ CHANNEL_PUREG;
5967 			ichan->ic_flags |= IEEE80211_CHAN_G;
5968 		} else {
5969 			ichan->ic_flags = c->channelFlags;
5970 		}
5971 
5972 		if (ath_hal_isgsmsku(ah)) {
5973 			/*
5974 			 * Remap to true frequencies: Ubiquiti XR9 cards use a
5975 			 * frequency mapping different from their SR9 cards.
5976 			 * We define special country codes to deal with this.
5977 			 */
5978 			if (cc == CTRY_XR9)
5979 				ichan->ic_freq = ichan->ic_freq - 1520;
5980 			else if (cc == CTRY_GZ901)
5981 				ichan->ic_freq = ichan->ic_freq - 1544;
5982 			else
5983 				ichan->ic_freq = 3344 - ichan->ic_freq;
5984 			ichan->ic_flags |= IEEE80211_CHAN_GSM;
5985 			ichan->ic_ieee = ieee80211_mhz2ieee(ichan->ic_freq,
5986 						    ichan->ic_flags);
5987 		}
5988 		ichan->ic_maxregpower = c->maxRegTxPower;	/* dBm */
5989 		/* XXX: old hal's don't provide maxTxPower for some parts */
5990 		ichan->ic_maxpower = (c->maxTxPower != 0) ?
5991 		    c->maxTxPower : 2*c->maxRegTxPower;		/* 1/2 dBm */
5992 		ichan->ic_minpower = c->minTxPower;		/* 1/2 dBm */
5993 	}
5994 	*nchans = nhalchans;
5995 done:
5996 	free(halchans, M_TEMP);
5997 	return error;
5998 }
5999 
6000 /* XXX hard to include ieee80211_regdomain.h right now */
6001 #define	SKU_DEBUG	0x1ff
6002 
6003 static void
6004 ath_maprd(const struct ieee80211_regdomain *rd,
6005 	u_int32_t *ath_rd, u_int32_t *ath_cc)
6006 {
6007 	/* map SKU's to Atheros sku's */
6008 	switch (rd->regdomain) {
6009 	case SKU_DEBUG:
6010 		if (rd->country == 0) {
6011 			*ath_rd = 0;
6012 			*ath_cc = CTRY_DEBUG;
6013 			return;
6014 		}
6015 		break;
6016 	}
6017 	*ath_rd = rd->regdomain;
6018 	*ath_cc = rd->country;
6019 }
6020 
6021 static int
6022 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
6023 	int nchans, struct ieee80211_channel chans[])
6024 {
6025 	struct ath_softc *sc = ic->ic_ifp->if_softc;
6026 	struct ath_hal *ah = sc->sc_ah;
6027 	u_int32_t ord, regdomain, cc;
6028 	int error;
6029 
6030 	(void) ath_hal_getregdomain(ah, &ord);
6031 	ath_maprd(rd, &regdomain, &cc);
6032 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6033 	    "%s: rd %u cc %u location %c ecm %u (mapped rd %u cc %u)\n",
6034 	    __func__, rd->regdomain, rd->country, rd->location, rd->ecm,
6035 	    regdomain, cc);
6036 	ath_hal_setregdomain(ah, regdomain);
6037 
6038 	error = getchannels(sc, &nchans, chans, cc,
6039 	     rd->ecm ? AH_TRUE : AH_FALSE,
6040 	     rd->location != 'I' ? AH_TRUE : AH_FALSE);
6041 	if (error != 0) {
6042 		/*
6043 		 * Restore previous state.
6044 		 */
6045 		ath_hal_setregdomain(ah, ord);
6046 		(void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country,
6047 		     ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE,
6048 		     ic->ic_regdomain.location != 'I' ? AH_TRUE : AH_FALSE);
6049 		return error;
6050 	}
6051 	return 0;
6052 }
6053 
6054 static void
6055 ath_getradiocaps(struct ieee80211com *ic,
6056 	int *nchans, struct ieee80211_channel chans[])
6057 {
6058 	struct ath_softc *sc = ic->ic_ifp->if_softc;
6059 	struct ath_hal *ah = sc->sc_ah;
6060 	u_int32_t ord;
6061 
6062 	(void) ath_hal_getregdomain(ah, &ord);
6063 
6064 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d, ord %u\n",
6065 	    __func__, 0, CTRY_DEBUG, ord);
6066 
6067 	ath_hal_setregdomain(ah, 0);
6068 	/* XXX not quite right but close enough for now */
6069 	getchannels(sc, nchans, chans, CTRY_DEBUG, AH_TRUE, AH_FALSE);
6070 
6071 	/* NB: restore previous state */
6072 	ath_hal_setregdomain(ah, ord);
6073 	(void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country,
6074 	     ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE,
6075 	     ic->ic_regdomain.location != 'I' ? AH_TRUE : AH_FALSE);
6076 }
6077 
6078 static void
6079 ath_mapsku(u_int32_t ath_rd, u_int32_t ath_cc, struct ieee80211_regdomain *rd)
6080 {
6081 	rd->isocc[0] = ' ';	/* XXX don't know */
6082 	rd->isocc[1] = ' ';
6083 
6084 	/* map Atheros sku's to SKU's */
6085 	switch (ath_rd) {
6086 	case 0:
6087 		if (ath_cc == CTRY_DEBUG) {
6088 			rd->regdomain = SKU_DEBUG;
6089 			rd->country = 0;
6090 			return;
6091 		}
6092 		break;
6093 	}
6094 	/* XXX net80211 types too small */
6095 	rd->regdomain = (uint16_t) ath_rd;
6096 	rd->country = (uint16_t) ath_cc;
6097 }
6098 
6099 static int
6100 ath_getchannels(struct ath_softc *sc)
6101 {
6102 	struct ifnet *ifp = sc->sc_ifp;
6103 	struct ieee80211com *ic = ifp->if_l2com;
6104 	struct ath_hal *ah = sc->sc_ah;
6105 	int error;
6106 
6107 	/*
6108 	 * Convert HAL channels to ieee80211 ones.
6109 	 */
6110 	error = getchannels(sc, &ic->ic_nchans, ic->ic_channels,
6111 	    CTRY_DEFAULT, AH_TRUE, AH_FALSE);
6112 	(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6113 	ath_hal_getcountrycode(ah, &sc->sc_eecc);	/* NB: cannot fail */
6114 	if (error) {
6115 		if_printf(ifp, "%s: unable to collect channel list from hal, "
6116 		    "error %d\n", __func__, error);
6117 		if (error == EINVAL) {
6118 			if_printf(ifp, "%s: regdomain likely %u country code %u\n",
6119 			    __func__, sc->sc_eerd, sc->sc_eecc);
6120 		}
6121 		return error;
6122 	}
6123 	ic->ic_regdomain.ecm = 1;
6124 	ic->ic_regdomain.location = 'I';
6125 	ath_mapsku(sc->sc_eerd, sc->sc_eecc, &ic->ic_regdomain);
6126 
6127 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6128 	    "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c ecm %u\n",
6129 	    __func__, sc->sc_eerd, sc->sc_eecc,
6130 	    ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6131 	    ic->ic_regdomain.location, ic->ic_regdomain.ecm);
6132 	return 0;
6133 }
6134 
6135 static void
6136 ath_led_done(void *arg)
6137 {
6138 	struct ath_softc *sc = arg;
6139 
6140 	sc->sc_blinking = 0;
6141 }
6142 
6143 /*
6144  * Turn the LED off: flip the pin and then set a timer so no
6145  * update will happen for the specified duration.
6146  */
6147 static void
6148 ath_led_off(void *arg)
6149 {
6150 	struct ath_softc *sc = arg;
6151 
6152 	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
6153 	callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
6154 }
6155 
6156 /*
6157  * Blink the LED according to the specified on/off times.
6158  */
6159 static void
6160 ath_led_blink(struct ath_softc *sc, int on, int off)
6161 {
6162 	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
6163 	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
6164 	sc->sc_blinking = 1;
6165 	sc->sc_ledoff = off;
6166 	callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
6167 }
6168 
6169 static void
6170 ath_led_event(struct ath_softc *sc, int rix)
6171 {
6172 	sc->sc_ledevent = ticks;	/* time of last event */
6173 	if (sc->sc_blinking)		/* don't interrupt active blink */
6174 		return;
6175 	ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
6176 }
6177 
6178 static int
6179 ath_rate_setup(struct ath_softc *sc, u_int mode)
6180 {
6181 	struct ath_hal *ah = sc->sc_ah;
6182 	const HAL_RATE_TABLE *rt;
6183 
6184 	switch (mode) {
6185 	case IEEE80211_MODE_11A:
6186 		rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6187 		break;
6188 	case IEEE80211_MODE_HALF:
6189 		rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6190 		break;
6191 	case IEEE80211_MODE_QUARTER:
6192 		rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6193 		break;
6194 	case IEEE80211_MODE_11B:
6195 		rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6196 		break;
6197 	case IEEE80211_MODE_11G:
6198 		rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6199 		break;
6200 	case IEEE80211_MODE_TURBO_A:
6201 		rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6202 #if HAL_ABI_VERSION < 0x07013100
6203 		if (rt == NULL)		/* XXX bandaid for old hal's */
6204 			rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6205 #endif
6206 		break;
6207 	case IEEE80211_MODE_TURBO_G:
6208 		rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6209 		break;
6210 	case IEEE80211_MODE_STURBO_A:
6211 		rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6212 		break;
6213 	case IEEE80211_MODE_11NA:
6214 		rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6215 		break;
6216 	case IEEE80211_MODE_11NG:
6217 		rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6218 		break;
6219 	default:
6220 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6221 			__func__, mode);
6222 		return 0;
6223 	}
6224 	sc->sc_rates[mode] = rt;
6225 	return (rt != NULL);
6226 }
6227 
6228 static void
6229 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6230 {
6231 #define	N(a)	(sizeof(a)/sizeof(a[0]))
6232 	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
6233 	static const struct {
6234 		u_int		rate;		/* tx/rx 802.11 rate */
6235 		u_int16_t	timeOn;		/* LED on time (ms) */
6236 		u_int16_t	timeOff;	/* LED off time (ms) */
6237 	} blinkrates[] = {
6238 		{ 108,  40,  10 },
6239 		{  96,  44,  11 },
6240 		{  72,  50,  13 },
6241 		{  48,  57,  14 },
6242 		{  36,  67,  16 },
6243 		{  24,  80,  20 },
6244 		{  22, 100,  25 },
6245 		{  18, 133,  34 },
6246 		{  12, 160,  40 },
6247 		{  10, 200,  50 },
6248 		{   6, 240,  58 },
6249 		{   4, 267,  66 },
6250 		{   2, 400, 100 },
6251 		{   0, 500, 130 },
6252 		/* XXX half/quarter rates */
6253 	};
6254 	const HAL_RATE_TABLE *rt;
6255 	int i, j;
6256 
6257 	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6258 	rt = sc->sc_rates[mode];
6259 	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6260 	for (i = 0; i < rt->rateCount; i++) {
6261 		uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6262 		if (rt->info[i].phy != IEEE80211_T_HT)
6263 			sc->sc_rixmap[ieeerate] = i;
6264 		else
6265 			sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6266 	}
6267 	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6268 	for (i = 0; i < N(sc->sc_hwmap); i++) {
6269 		if (i >= rt->rateCount) {
6270 			sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6271 			sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6272 			continue;
6273 		}
6274 		sc->sc_hwmap[i].ieeerate =
6275 			rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6276 		if (rt->info[i].phy == IEEE80211_T_HT)
6277 			sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6278 		sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6279 		if (rt->info[i].shortPreamble ||
6280 		    rt->info[i].phy == IEEE80211_T_OFDM)
6281 			sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6282 		/* NB: receive frames include FCS */
6283 		sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags |
6284 			IEEE80211_RADIOTAP_F_FCS;
6285 		/* setup blink rate table to avoid per-packet lookup */
6286 		for (j = 0; j < N(blinkrates)-1; j++)
6287 			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6288 				break;
6289 		/* NB: this uses the last entry if the rate isn't found */
6290 		/* XXX beware of overlow */
6291 		sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6292 		sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6293 	}
6294 	sc->sc_currates = rt;
6295 	sc->sc_curmode = mode;
6296 	/*
6297 	 * All protection frames are transmited at 2Mb/s for
6298 	 * 11g, otherwise at 1Mb/s.
6299 	 */
6300 	if (mode == IEEE80211_MODE_11G)
6301 		sc->sc_protrix = ath_tx_findrix(rt, 2*2);
6302 	else
6303 		sc->sc_protrix = ath_tx_findrix(rt, 2*1);
6304 	/* NB: caller is responsible for reseting rate control state */
6305 #undef N
6306 }
6307 
6308 #ifdef ATH_DEBUG
6309 static void
6310 ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6311 	u_int ix, int done)
6312 {
6313 	const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
6314 	struct ath_hal *ah = sc->sc_ah;
6315 	const struct ath_desc *ds;
6316 	int i;
6317 
6318 	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6319 		printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
6320 		       "      %08x %08x %08x %08x\n",
6321 		    ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
6322 		    ds->ds_link, ds->ds_data,
6323 		    !done ? "" : (rs->rs_status == 0) ? " *" : " !",
6324 		    ds->ds_ctl0, ds->ds_ctl1,
6325 		    ds->ds_hw[0], ds->ds_hw[1]);
6326 		if (ah->ah_magic == 0x20065416) {
6327 			printf("        %08x %08x %08x %08x %08x %08x %08x\n",
6328 			    ds->ds_hw[2], ds->ds_hw[3], ds->ds_hw[4],
6329 			    ds->ds_hw[5], ds->ds_hw[6], ds->ds_hw[7],
6330 			    ds->ds_hw[8]);
6331 		}
6332 	}
6333 }
6334 
6335 static void
6336 ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6337 	u_int qnum, u_int ix, int done)
6338 {
6339 	const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
6340 	struct ath_hal *ah = sc->sc_ah;
6341 	const struct ath_desc *ds;
6342 	int i;
6343 
6344 	printf("Q%u[%3u]", qnum, ix);
6345 	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6346 		printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
6347 		       "        %08x %08x %08x %08x %08x %08x\n",
6348 		    ds, (const struct ath_desc *)bf->bf_daddr + i,
6349 		    ds->ds_link, ds->ds_data, bf->bf_txflags,
6350 		    !done ? "" : (ts->ts_status == 0) ? " *" : " !",
6351 		    ds->ds_ctl0, ds->ds_ctl1,
6352 		    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
6353 		if (ah->ah_magic == 0x20065416) {
6354 			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6355 			    ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
6356 			    ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
6357 			    ds->ds_hw[10],ds->ds_hw[11]);
6358 			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6359 			    ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
6360 			    ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
6361 			    ds->ds_hw[18], ds->ds_hw[19]);
6362 		}
6363 	}
6364 }
6365 #endif /* ATH_DEBUG */
6366 
6367 static void
6368 ath_watchdog(struct ifnet *ifp)
6369 {
6370 	struct ath_softc *sc = ifp->if_softc;
6371 
6372 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
6373 		uint32_t hangs;
6374 
6375 		if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6376 		    hangs != 0) {
6377 			if_printf(ifp, "%s hang detected (0x%x)\n",
6378 			    hangs & 0xff ? "bb" : "mac", hangs);
6379 		} else
6380 			if_printf(ifp, "device timeout\n");
6381 		ath_reset(ifp);
6382 		ifp->if_oerrors++;
6383 		sc->sc_stats.ast_watchdog++;
6384 	}
6385 }
6386 
6387 #ifdef ATH_DIAGAPI
6388 /*
6389  * Diagnostic interface to the HAL.  This is used by various
6390  * tools to do things like retrieve register contents for
6391  * debugging.  The mechanism is intentionally opaque so that
6392  * it can change frequently w/o concern for compatiblity.
6393  */
6394 static int
6395 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6396 {
6397 	struct ath_hal *ah = sc->sc_ah;
6398 	u_int id = ad->ad_id & ATH_DIAG_ID;
6399 	void *indata = NULL;
6400 	void *outdata = NULL;
6401 	u_int32_t insize = ad->ad_in_size;
6402 	u_int32_t outsize = ad->ad_out_size;
6403 	int error = 0;
6404 
6405 	if (ad->ad_id & ATH_DIAG_IN) {
6406 		/*
6407 		 * Copy in data.
6408 		 */
6409 		indata = malloc(insize, M_TEMP, M_NOWAIT);
6410 		if (indata == NULL) {
6411 			error = ENOMEM;
6412 			goto bad;
6413 		}
6414 		error = copyin(ad->ad_in_data, indata, insize);
6415 		if (error)
6416 			goto bad;
6417 	}
6418 	if (ad->ad_id & ATH_DIAG_DYN) {
6419 		/*
6420 		 * Allocate a buffer for the results (otherwise the HAL
6421 		 * returns a pointer to a buffer where we can read the
6422 		 * results).  Note that we depend on the HAL leaving this
6423 		 * pointer for us to use below in reclaiming the buffer;
6424 		 * may want to be more defensive.
6425 		 */
6426 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6427 		if (outdata == NULL) {
6428 			error = ENOMEM;
6429 			goto bad;
6430 		}
6431 	}
6432 	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6433 		if (outsize < ad->ad_out_size)
6434 			ad->ad_out_size = outsize;
6435 		if (outdata != NULL)
6436 			error = copyout(outdata, ad->ad_out_data,
6437 					ad->ad_out_size);
6438 	} else {
6439 		error = EINVAL;
6440 	}
6441 bad:
6442 	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6443 		free(indata, M_TEMP);
6444 	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6445 		free(outdata, M_TEMP);
6446 	return error;
6447 }
6448 #endif /* ATH_DIAGAPI */
6449 
6450 static int
6451 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6452 {
6453 #define	IS_RUNNING(ifp) \
6454 	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6455 	struct ath_softc *sc = ifp->if_softc;
6456 	struct ieee80211com *ic = ifp->if_l2com;
6457 	struct ifreq *ifr = (struct ifreq *)data;
6458 	const HAL_RATE_TABLE *rt;
6459 	int error = 0;
6460 
6461 	switch (cmd) {
6462 	case SIOCSIFFLAGS:
6463 		ATH_LOCK(sc);
6464 		if (IS_RUNNING(ifp)) {
6465 			/*
6466 			 * To avoid rescanning another access point,
6467 			 * do not call ath_init() here.  Instead,
6468 			 * only reflect promisc mode settings.
6469 			 */
6470 			ath_mode_init(sc);
6471 		} else if (ifp->if_flags & IFF_UP) {
6472 			/*
6473 			 * Beware of being called during attach/detach
6474 			 * to reset promiscuous mode.  In that case we
6475 			 * will still be marked UP but not RUNNING.
6476 			 * However trying to re-init the interface
6477 			 * is the wrong thing to do as we've already
6478 			 * torn down much of our state.  There's
6479 			 * probably a better way to deal with this.
6480 			 */
6481 			if (!sc->sc_invalid)
6482 				ath_init(sc);	/* XXX lose error */
6483 		} else {
6484 			ath_stop_locked(ifp);
6485 #ifdef notyet
6486 			/* XXX must wakeup in places like ath_vap_delete */
6487 			if (!sc->sc_invalid)
6488 				ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6489 #endif
6490 		}
6491 		ATH_UNLOCK(sc);
6492 		break;
6493 	case SIOCGIFMEDIA:
6494 	case SIOCSIFMEDIA:
6495 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6496 		break;
6497 	case SIOCGATHSTATS:
6498 		/* NB: embed these numbers to get a consistent view */
6499 		sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6500 		sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6501 		sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6502 		sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6503 		rt = sc->sc_currates;
6504 		/* XXX HT rates */
6505 		sc->sc_stats.ast_tx_rate =
6506 		    rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6507 		return copyout(&sc->sc_stats,
6508 		    ifr->ifr_data, sizeof (sc->sc_stats));
6509 #ifdef ATH_DIAGAPI
6510 	case SIOCGATHDIAG:
6511 		error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6512 		break;
6513 #endif
6514 	case SIOCGIFADDR:
6515 		error = ether_ioctl(ifp, cmd, data);
6516 		break;
6517 	default:
6518 		error = EINVAL;
6519 		break;
6520 	}
6521 	return error;
6522 #undef IS_RUNNING
6523 }
6524 
6525 static int
6526 ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
6527 {
6528 	struct ath_softc *sc = arg1;
6529 	u_int slottime = ath_hal_getslottime(sc->sc_ah);
6530 	int error;
6531 
6532 	error = sysctl_handle_int(oidp, &slottime, 0, req);
6533 	if (error || !req->newptr)
6534 		return error;
6535 	return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
6536 }
6537 
6538 static int
6539 ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
6540 {
6541 	struct ath_softc *sc = arg1;
6542 	u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
6543 	int error;
6544 
6545 	error = sysctl_handle_int(oidp, &acktimeout, 0, req);
6546 	if (error || !req->newptr)
6547 		return error;
6548 	return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
6549 }
6550 
6551 static int
6552 ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
6553 {
6554 	struct ath_softc *sc = arg1;
6555 	u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
6556 	int error;
6557 
6558 	error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
6559 	if (error || !req->newptr)
6560 		return error;
6561 	return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
6562 }
6563 
6564 static int
6565 ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
6566 {
6567 	struct ath_softc *sc = arg1;
6568 	int softled = sc->sc_softled;
6569 	int error;
6570 
6571 	error = sysctl_handle_int(oidp, &softled, 0, req);
6572 	if (error || !req->newptr)
6573 		return error;
6574 	softled = (softled != 0);
6575 	if (softled != sc->sc_softled) {
6576 		if (softled) {
6577 			/* NB: handle any sc_ledpin change */
6578 			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin);
6579 			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6580 				!sc->sc_ledon);
6581 		}
6582 		sc->sc_softled = softled;
6583 	}
6584 	return 0;
6585 }
6586 
6587 static int
6588 ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
6589 {
6590 	struct ath_softc *sc = arg1;
6591 	int ledpin = sc->sc_ledpin;
6592 	int error;
6593 
6594 	error = sysctl_handle_int(oidp, &ledpin, 0, req);
6595 	if (error || !req->newptr)
6596 		return error;
6597 	if (ledpin != sc->sc_ledpin) {
6598 		sc->sc_ledpin = ledpin;
6599 		if (sc->sc_softled) {
6600 			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin);
6601 			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6602 				!sc->sc_ledon);
6603 		}
6604 	}
6605 	return 0;
6606 }
6607 
6608 static int
6609 ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
6610 {
6611 	struct ath_softc *sc = arg1;
6612 	u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
6613 	int error;
6614 
6615 	error = sysctl_handle_int(oidp, &txantenna, 0, req);
6616 	if (!error && req->newptr) {
6617 		/* XXX assumes 2 antenna ports */
6618 		if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
6619 			return EINVAL;
6620 		ath_hal_setantennaswitch(sc->sc_ah, txantenna);
6621 		/*
6622 		 * NB: with the switch locked this isn't meaningful,
6623 		 *     but set it anyway so things like radiotap get
6624 		 *     consistent info in their data.
6625 		 */
6626 		sc->sc_txantenna = txantenna;
6627 	}
6628 	return error;
6629 }
6630 
6631 static int
6632 ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
6633 {
6634 	struct ath_softc *sc = arg1;
6635 	u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
6636 	int error;
6637 
6638 	error = sysctl_handle_int(oidp, &defantenna, 0, req);
6639 	if (!error && req->newptr)
6640 		ath_hal_setdefantenna(sc->sc_ah, defantenna);
6641 	return error;
6642 }
6643 
6644 static int
6645 ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
6646 {
6647 	struct ath_softc *sc = arg1;
6648 	u_int diversity = ath_hal_getdiversity(sc->sc_ah);
6649 	int error;
6650 
6651 	error = sysctl_handle_int(oidp, &diversity, 0, req);
6652 	if (error || !req->newptr)
6653 		return error;
6654 	if (!ath_hal_setdiversity(sc->sc_ah, diversity))
6655 		return EINVAL;
6656 	sc->sc_diversity = diversity;
6657 	return 0;
6658 }
6659 
6660 static int
6661 ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
6662 {
6663 	struct ath_softc *sc = arg1;
6664 	u_int32_t diag;
6665 	int error;
6666 
6667 	if (!ath_hal_getdiag(sc->sc_ah, &diag))
6668 		return EINVAL;
6669 	error = sysctl_handle_int(oidp, &diag, 0, req);
6670 	if (error || !req->newptr)
6671 		return error;
6672 	return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
6673 }
6674 
6675 static int
6676 ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
6677 {
6678 	struct ath_softc *sc = arg1;
6679 	struct ifnet *ifp = sc->sc_ifp;
6680 	u_int32_t scale;
6681 	int error;
6682 
6683 	(void) ath_hal_gettpscale(sc->sc_ah, &scale);
6684 	error = sysctl_handle_int(oidp, &scale, 0, req);
6685 	if (error || !req->newptr)
6686 		return error;
6687 	return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
6688 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6689 }
6690 
6691 static int
6692 ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
6693 {
6694 	struct ath_softc *sc = arg1;
6695 	u_int tpc = ath_hal_gettpc(sc->sc_ah);
6696 	int error;
6697 
6698 	error = sysctl_handle_int(oidp, &tpc, 0, req);
6699 	if (error || !req->newptr)
6700 		return error;
6701 	return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
6702 }
6703 
6704 static int
6705 ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
6706 {
6707 	struct ath_softc *sc = arg1;
6708 	struct ifnet *ifp = sc->sc_ifp;
6709 	struct ath_hal *ah = sc->sc_ah;
6710 	u_int rfkill = ath_hal_getrfkill(ah);
6711 	int error;
6712 
6713 	error = sysctl_handle_int(oidp, &rfkill, 0, req);
6714 	if (error || !req->newptr)
6715 		return error;
6716 	if (rfkill == ath_hal_getrfkill(ah))	/* unchanged */
6717 		return 0;
6718 	if (!ath_hal_setrfkill(ah, rfkill))
6719 		return EINVAL;
6720 	return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6721 }
6722 
6723 static int
6724 ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
6725 {
6726 	struct ath_softc *sc = arg1;
6727 	u_int rfsilent;
6728 	int error;
6729 
6730 	(void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
6731 	error = sysctl_handle_int(oidp, &rfsilent, 0, req);
6732 	if (error || !req->newptr)
6733 		return error;
6734 	if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6735 		return EINVAL;
6736 	sc->sc_rfsilentpin = rfsilent & 0x1c;
6737 	sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6738 	return 0;
6739 }
6740 
6741 static int
6742 ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6743 {
6744 	struct ath_softc *sc = arg1;
6745 	u_int32_t tpack;
6746 	int error;
6747 
6748 	(void) ath_hal_gettpack(sc->sc_ah, &tpack);
6749 	error = sysctl_handle_int(oidp, &tpack, 0, req);
6750 	if (error || !req->newptr)
6751 		return error;
6752 	return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6753 }
6754 
6755 static int
6756 ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6757 {
6758 	struct ath_softc *sc = arg1;
6759 	u_int32_t tpcts;
6760 	int error;
6761 
6762 	(void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6763 	error = sysctl_handle_int(oidp, &tpcts, 0, req);
6764 	if (error || !req->newptr)
6765 		return error;
6766 	return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6767 }
6768 
6769 static int
6770 ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
6771 {
6772 	struct ath_softc *sc = arg1;
6773 	int intmit, error;
6774 
6775 	intmit = ath_hal_getintmit(sc->sc_ah);
6776 	error = sysctl_handle_int(oidp, &intmit, 0, req);
6777 	if (error || !req->newptr)
6778 		return error;
6779 	return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0;
6780 }
6781 
6782 static void
6783 ath_sysctlattach(struct ath_softc *sc)
6784 {
6785 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6786 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6787 	struct ath_hal *ah = sc->sc_ah;
6788 
6789 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6790 		"countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
6791 		"EEPROM country code");
6792 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6793 		"regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
6794 		"EEPROM regdomain code");
6795 #ifdef	ATH_DEBUG
6796 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6797 		"debug", CTLFLAG_RW, &sc->sc_debug, 0,
6798 		"control debugging printfs");
6799 #endif
6800 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6801 		"slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6802 		ath_sysctl_slottime, "I", "802.11 slot time (us)");
6803 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6804 		"acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6805 		ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6806 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6807 		"ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6808 		ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6809 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6810 		"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6811 		ath_sysctl_softled, "I", "enable/disable software LED support");
6812 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6813 		"ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6814 		ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6815 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6816 		"ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6817 		"setting to turn LED on");
6818 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6819 		"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6820 		"idle time for inactivity LED (ticks)");
6821 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6822 		"txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6823 		ath_sysctl_txantenna, "I", "antenna switch");
6824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6825 		"rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6826 		ath_sysctl_rxantenna, "I", "default/rx antenna");
6827 	if (ath_hal_hasdiversity(ah))
6828 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6829 			"diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6830 			ath_sysctl_diversity, "I", "antenna diversity");
6831 	sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6832 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6833 		"txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6834 		"tx descriptor batching");
6835 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6836 		"diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6837 		ath_sysctl_diag, "I", "h/w diagnostic control");
6838 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6839 		"tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6840 		ath_sysctl_tpscale, "I", "tx power scaling");
6841 	if (ath_hal_hastpc(ah)) {
6842 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6843 			"tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6844 			ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6845 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6846 			"tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6847 			ath_sysctl_tpack, "I", "tx power for ack frames");
6848 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6849 			"tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6850 			ath_sysctl_tpcts, "I", "tx power for cts frames");
6851 	}
6852 	if (ath_hal_hasfastframes(sc->sc_ah)) {
6853 		sc->sc_fftxqmin = ATH_FF_TXQMIN;
6854 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6855 			"fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
6856 			"min frames before fast-frame staging");
6857 		sc->sc_fftxqmax = ATH_FF_TXQMAX;
6858 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6859 			"fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
6860 			"max queued frames before tail drop");
6861 	}
6862 	if (ath_hal_hasrfsilent(ah)) {
6863 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6864 			"rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6865 			ath_sysctl_rfsilent, "I", "h/w RF silent config");
6866 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6867 			"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6868 			ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6869 	}
6870 	if (ath_hal_hasintmit(ah)) {
6871 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6872 			"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6873 			ath_sysctl_intmit, "I", "interference mitigation");
6874 	}
6875 	sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6876 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6877 		"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6878 		"mask of error frames to pass when monitoring");
6879 }
6880 
6881 static void
6882 ath_bpfattach(struct ath_softc *sc)
6883 {
6884 	struct ifnet *ifp = sc->sc_ifp;
6885 
6886 	bpfattach(ifp, DLT_IEEE802_11_RADIO,
6887 		sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th));
6888 	/*
6889 	 * Initialize constant fields.
6890 	 * XXX make header lengths a multiple of 32-bits so subsequent
6891 	 *     headers are properly aligned; this is a kludge to keep
6892 	 *     certain applications happy.
6893 	 *
6894 	 * NB: the channel is setup each time we transition to the
6895 	 *     RUN state to avoid filling it in for each frame.
6896 	 */
6897 	sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t));
6898 	sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len);
6899 	sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
6900 
6901 	sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t));
6902 	sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len);
6903 	sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
6904 }
6905 
6906 static int
6907 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
6908 	struct ath_buf *bf, struct mbuf *m0,
6909 	const struct ieee80211_bpf_params *params)
6910 {
6911 	struct ifnet *ifp = sc->sc_ifp;
6912 	struct ieee80211com *ic = ifp->if_l2com;
6913 	struct ath_hal *ah = sc->sc_ah;
6914 	int error, ismcast, ismrr;
6915 	int keyix, hdrlen, pktlen, try0, txantenna;
6916 	u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
6917 	struct ieee80211_frame *wh;
6918 	u_int flags, ctsduration;
6919 	HAL_PKT_TYPE atype;
6920 	const HAL_RATE_TABLE *rt;
6921 	struct ath_desc *ds;
6922 	u_int pri;
6923 
6924 	wh = mtod(m0, struct ieee80211_frame *);
6925 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
6926 	hdrlen = ieee80211_anyhdrsize(wh);
6927 	/*
6928 	 * Packet length must not include any
6929 	 * pad bytes; deduct them here.
6930 	 */
6931 	/* XXX honor IEEE80211_BPF_DATAPAD */
6932 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
6933 
6934 	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
6935 		const struct ieee80211_cipher *cip;
6936 		struct ieee80211_key *k;
6937 
6938 		/*
6939 		 * Construct the 802.11 header+trailer for an encrypted
6940 		 * frame. The only reason this can fail is because of an
6941 		 * unknown or unsupported cipher/key type.
6942 		 */
6943 		k = ieee80211_crypto_encap(ni, m0);
6944 		if (k == NULL) {
6945 			/*
6946 			 * This can happen when the key is yanked after the
6947 			 * frame was queued.  Just discard the frame; the
6948 			 * 802.11 layer counts failures and provides
6949 			 * debugging/diagnostics.
6950 			 */
6951 			ath_freetx(m0);
6952 			return EIO;
6953 		}
6954 		/*
6955 		 * Adjust the packet + header lengths for the crypto
6956 		 * additions and calculate the h/w key index.  When
6957 		 * a s/w mic is done the frame will have had any mic
6958 		 * added to it prior to entry so m0->m_pkthdr.len will
6959 		 * account for it. Otherwise we need to add it to the
6960 		 * packet length.
6961 		 */
6962 		cip = k->wk_cipher;
6963 		hdrlen += cip->ic_header;
6964 		pktlen += cip->ic_header + cip->ic_trailer;
6965 		/* NB: frags always have any TKIP MIC done in s/w */
6966 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
6967 			pktlen += cip->ic_miclen;
6968 		keyix = k->wk_keyix;
6969 
6970 		/* packet header may have moved, reset our local pointer */
6971 		wh = mtod(m0, struct ieee80211_frame *);
6972 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
6973 		/*
6974 		 * Use station key cache slot, if assigned.
6975 		 */
6976 		keyix = ni->ni_ucastkey.wk_keyix;
6977 		if (keyix == IEEE80211_KEYIX_NONE)
6978 			keyix = HAL_TXKEYIX_INVALID;
6979 	} else
6980 		keyix = HAL_TXKEYIX_INVALID;
6981 
6982 	error = ath_tx_dmasetup(sc, bf, m0);
6983 	if (error != 0)
6984 		return error;
6985 	m0 = bf->bf_m;				/* NB: may have changed */
6986 	wh = mtod(m0, struct ieee80211_frame *);
6987 	bf->bf_node = ni;			/* NB: held reference */
6988 
6989 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
6990 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
6991 	if (params->ibp_flags & IEEE80211_BPF_RTS)
6992 		flags |= HAL_TXDESC_RTSENA;
6993 	else if (params->ibp_flags & IEEE80211_BPF_CTS)
6994 		flags |= HAL_TXDESC_CTSENA;
6995 	/* XXX leave ismcast to injector? */
6996 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
6997 		flags |= HAL_TXDESC_NOACK;
6998 
6999 	rt = sc->sc_currates;
7000 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
7001 	rix = ath_tx_findrix(rt, params->ibp_rate0);
7002 	txrate = rt->info[rix].rateCode;
7003 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7004 		txrate |= rt->info[rix].shortPreamble;
7005 	sc->sc_txrix = rix;
7006 	try0 = params->ibp_try0;
7007 	ismrr = (params->ibp_try1 != 0);
7008 	txantenna = params->ibp_pri >> 2;
7009 	if (txantenna == 0)			/* XXX? */
7010 		txantenna = sc->sc_txantenna;
7011 	ctsduration = 0;
7012 	if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
7013 		cix = ath_tx_findrix(rt, params->ibp_ctsrate);
7014 		ctsrate = rt->info[cix].rateCode;
7015 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
7016 			ctsrate |= rt->info[cix].shortPreamble;
7017 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
7018 				ctsduration += rt->info[cix].spAckDuration;
7019 			ctsduration += ath_hal_computetxtime(ah,
7020 				rt, pktlen, rix, AH_TRUE);
7021 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
7022 				ctsduration += rt->info[rix].spAckDuration;
7023 		} else {
7024 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
7025 				ctsduration += rt->info[cix].lpAckDuration;
7026 			ctsduration += ath_hal_computetxtime(ah,
7027 				rt, pktlen, rix, AH_FALSE);
7028 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
7029 				ctsduration += rt->info[rix].lpAckDuration;
7030 		}
7031 		ismrr = 0;			/* XXX */
7032 	} else
7033 		ctsrate = 0;
7034 	pri = params->ibp_pri & 3;
7035 	/*
7036 	 * NB: we mark all packets as type PSPOLL so the h/w won't
7037 	 * set the sequence number, duration, etc.
7038 	 */
7039 	atype = HAL_PKT_TYPE_PSPOLL;
7040 
7041 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
7042 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
7043 			sc->sc_hwmap[rix].ieeerate, -1);
7044 
7045 	if (bpf_peers_present(ifp->if_bpf)) {
7046 		u_int64_t tsf = ath_hal_gettsf64(ah);
7047 
7048 		sc->sc_tx_th.wt_tsf = htole64(tsf);
7049 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
7050 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
7051 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
7052 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
7053 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
7054 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
7055 
7056 		bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
7057 	}
7058 
7059 	/*
7060 	 * Formulate first tx descriptor with tx controls.
7061 	 */
7062 	ds = bf->bf_desc;
7063 	/* XXX check return value? */
7064 	ath_hal_setuptxdesc(ah, ds
7065 		, pktlen		/* packet length */
7066 		, hdrlen		/* header length */
7067 		, atype			/* Atheros packet type */
7068 		, params->ibp_power	/* txpower */
7069 		, txrate, try0		/* series 0 rate/tries */
7070 		, keyix			/* key cache index */
7071 		, txantenna		/* antenna mode */
7072 		, flags			/* flags */
7073 		, ctsrate		/* rts/cts rate */
7074 		, ctsduration		/* rts/cts duration */
7075 	);
7076 	bf->bf_txflags = flags;
7077 
7078 	if (ismrr) {
7079 		rix = ath_tx_findrix(rt, params->ibp_rate1);
7080 		rate1 = rt->info[rix].rateCode;
7081 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7082 			rate1 |= rt->info[rix].shortPreamble;
7083 		if (params->ibp_try2) {
7084 			rix = ath_tx_findrix(rt, params->ibp_rate2);
7085 			rate2 = rt->info[rix].rateCode;
7086 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7087 				rate2 |= rt->info[rix].shortPreamble;
7088 		} else
7089 			rate2 = 0;
7090 		if (params->ibp_try3) {
7091 			rix = ath_tx_findrix(rt, params->ibp_rate3);
7092 			rate3 = rt->info[rix].rateCode;
7093 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7094 				rate3 |= rt->info[rix].shortPreamble;
7095 		} else
7096 			rate3 = 0;
7097 		ath_hal_setupxtxdesc(ah, ds
7098 			, rate1, params->ibp_try1	/* series 1 */
7099 			, rate2, params->ibp_try2	/* series 2 */
7100 			, rate3, params->ibp_try3	/* series 3 */
7101 		);
7102 	}
7103 
7104 	/* NB: no buffered multicast in power save support */
7105 	ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
7106 	return 0;
7107 }
7108 
7109 static int
7110 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
7111 	const struct ieee80211_bpf_params *params)
7112 {
7113 	struct ieee80211com *ic = ni->ni_ic;
7114 	struct ifnet *ifp = ic->ic_ifp;
7115 	struct ath_softc *sc = ifp->if_softc;
7116 	struct ath_buf *bf;
7117 
7118 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
7119 		ieee80211_free_node(ni);
7120 		m_freem(m);
7121 		return ENETDOWN;
7122 	}
7123 	/*
7124 	 * Grab a TX buffer and associated resources.
7125 	 */
7126 	ATH_TXBUF_LOCK(sc);
7127 	bf = STAILQ_FIRST(&sc->sc_txbuf);
7128 	if (bf != NULL)
7129 		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
7130 	ATH_TXBUF_UNLOCK(sc);
7131 	if (bf == NULL) {
7132 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n",
7133 			__func__);
7134 		sc->sc_stats.ast_tx_qstop++;
7135 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
7136 		ieee80211_free_node(ni);
7137 		m_freem(m);
7138 		return ENOBUFS;
7139 	}
7140 
7141 	ifp->if_opackets++;
7142 	sc->sc_stats.ast_tx_raw++;
7143 
7144 	if (params == NULL) {
7145 		/*
7146 		 * Legacy path; interpret frame contents to decide
7147 		 * precisely how to send the frame.
7148 		 */
7149 		if (ath_tx_start(sc, ni, bf, m))
7150 			goto bad;
7151 	} else {
7152 		/*
7153 		 * Caller supplied explicit parameters to use in
7154 		 * sending the frame.
7155 		 */
7156 		if (ath_tx_raw_start(sc, ni, bf, m, params))
7157 			goto bad;
7158 	}
7159 	ifp->if_timer = 5;
7160 
7161 	return 0;
7162 bad:
7163 	ifp->if_oerrors++;
7164 	ATH_TXBUF_LOCK(sc);
7165 	STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
7166 	ATH_TXBUF_UNLOCK(sc);
7167 	ieee80211_free_node(ni);
7168 	return EIO;		/* XXX */
7169 }
7170 
7171 /*
7172  * Announce various information on device/driver attach.
7173  */
7174 static void
7175 ath_announce(struct ath_softc *sc)
7176 {
7177 #define	HAL_MODE_DUALBAND	(HAL_MODE_11A|HAL_MODE_11B)
7178 	struct ifnet *ifp = sc->sc_ifp;
7179 	struct ath_hal *ah = sc->sc_ah;
7180 	u_int modes, cc;
7181 
7182 	if_printf(ifp, "mac %d.%d phy %d.%d",
7183 		ah->ah_macVersion, ah->ah_macRev,
7184 		ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
7185 	/*
7186 	 * Print radio revision(s).  We check the wireless modes
7187 	 * to avoid falsely printing revs for inoperable parts.
7188 	 * Dual-band radio revs are returned in the 5Ghz rev number.
7189 	 */
7190 	ath_hal_getcountrycode(ah, &cc);
7191 	modes = ath_hal_getwirelessmodes(ah, cc);
7192 	if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) {
7193 		if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev)
7194 			printf(" 5ghz radio %d.%d 2ghz radio %d.%d",
7195 				ah->ah_analog5GhzRev >> 4,
7196 				ah->ah_analog5GhzRev & 0xf,
7197 				ah->ah_analog2GhzRev >> 4,
7198 				ah->ah_analog2GhzRev & 0xf);
7199 		else
7200 			printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
7201 				ah->ah_analog5GhzRev & 0xf);
7202 	} else
7203 		printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
7204 			ah->ah_analog5GhzRev & 0xf);
7205 	printf("\n");
7206 	if (bootverbose) {
7207 		int i;
7208 		for (i = 0; i <= WME_AC_VO; i++) {
7209 			struct ath_txq *txq = sc->sc_ac2q[i];
7210 			if_printf(ifp, "Use hw queue %u for %s traffic\n",
7211 				txq->axq_qnum, ieee80211_wme_acnames[i]);
7212 		}
7213 		if_printf(ifp, "Use hw queue %u for CAB traffic\n",
7214 			sc->sc_cabq->axq_qnum);
7215 		if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
7216 	}
7217 	if (ath_rxbuf != ATH_RXBUF)
7218 		if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
7219 	if (ath_txbuf != ATH_TXBUF)
7220 		if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
7221 #undef HAL_MODE_DUALBAND
7222 }
7223