xref: /freebsd/sys/dev/mwl/if_mwl.c (revision 7850fa71f55a16f414bb21163d80a03a5ab34522)
1 /*-
2  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Marvell 88W8363 Wireless LAN controller.
36  */
37 
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/errno.h>
52 #include <sys/callout.h>
53 #include <sys/bus.h>
54 #include <sys/endian.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 
58 #include <machine/bus.h>
59 
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/if_llc.h>
67 
68 #include <net/bpf.h>
69 
70 #include <net80211/ieee80211_var.h>
71 #include <net80211/ieee80211_regdomain.h>
72 
73 #ifdef INET
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #endif /* INET */
77 
78 #include <dev/mwl/if_mwlvar.h>
79 #include <dev/mwl/mwldiag.h>
80 
81 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82 #define	MS(v,x)	(((v) & x) >> x##_S)
83 #define	SM(v,x)	(((v) << x##_S) & x)
84 
85 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86 		    const char name[IFNAMSIZ], int unit, int opmode,
87 		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
88 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
89 static void	mwl_vap_delete(struct ieee80211vap *);
90 static int	mwl_setupdma(struct mwl_softc *);
91 static int	mwl_hal_reset(struct mwl_softc *sc);
92 static int	mwl_init_locked(struct mwl_softc *);
93 static void	mwl_init(void *);
94 static void	mwl_stop_locked(struct ifnet *, int);
95 static int	mwl_reset(struct ieee80211vap *, u_long);
96 static void	mwl_stop(struct ifnet *, int);
97 static void	mwl_start(struct ifnet *);
98 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99 			const struct ieee80211_bpf_params *);
100 static int	mwl_media_change(struct ifnet *);
101 static void	mwl_watchdog(struct ifnet *);
102 static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void	mwl_radar_proc(void *, int);
104 static void	mwl_chanswitch_proc(void *, int);
105 static void	mwl_bawatchdog_proc(void *, int);
106 static int	mwl_key_alloc(struct ieee80211vap *,
107 			struct ieee80211_key *,
108 			ieee80211_keyix *, ieee80211_keyix *);
109 static int	mwl_key_delete(struct ieee80211vap *,
110 			const struct ieee80211_key *);
111 static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112 			const uint8_t mac[IEEE80211_ADDR_LEN]);
113 static int	mwl_mode_init(struct mwl_softc *);
114 static void	mwl_update_mcast(struct ifnet *);
115 static void	mwl_update_promisc(struct ifnet *);
116 static void	mwl_updateslot(struct ifnet *);
117 static int	mwl_beacon_setup(struct ieee80211vap *);
118 static void	mwl_beacon_update(struct ieee80211vap *, int);
119 #ifdef MWL_HOST_PS_SUPPORT
120 static void	mwl_update_ps(struct ieee80211vap *, int);
121 static int	mwl_set_tim(struct ieee80211_node *, int);
122 #endif
123 static int	mwl_dma_setup(struct mwl_softc *);
124 static void	mwl_dma_cleanup(struct mwl_softc *);
125 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126 		    const uint8_t [IEEE80211_ADDR_LEN]);
127 static void	mwl_node_cleanup(struct ieee80211_node *);
128 static void	mwl_node_drain(struct ieee80211_node *);
129 static void	mwl_node_getsignal(const struct ieee80211_node *,
130 			int8_t *, int8_t *);
131 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132 			struct ieee80211_mimo_info *);
133 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134 static void	mwl_rx_proc(void *, int);
135 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136 static int	mwl_tx_setup(struct mwl_softc *, int, int);
137 static int	mwl_wme_update(struct ieee80211com *);
138 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139 static void	mwl_tx_cleanup(struct mwl_softc *);
140 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142 			     struct mwl_txbuf *, struct mbuf *);
143 static void	mwl_tx_proc(void *, int);
144 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145 static void	mwl_draintxq(struct mwl_softc *);
146 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147 static void	mwl_recv_action(struct ieee80211_node *,
148 			const uint8_t *, const uint8_t *);
149 static int	mwl_addba_request(struct ieee80211_node *,
150 			struct ieee80211_tx_ampdu *, int dialogtoken,
151 			int baparamset, int batimeout);
152 static int	mwl_addba_response(struct ieee80211_node *,
153 			struct ieee80211_tx_ampdu *, int status,
154 			int baparamset, int batimeout);
155 static void	mwl_addba_stop(struct ieee80211_node *,
156 			struct ieee80211_tx_ampdu *);
157 static int	mwl_startrecv(struct mwl_softc *);
158 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
159 			struct ieee80211_channel *);
160 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
161 static void	mwl_scan_start(struct ieee80211com *);
162 static void	mwl_scan_end(struct ieee80211com *);
163 static void	mwl_set_channel(struct ieee80211com *);
164 static int	mwl_peerstadb(struct ieee80211_node *,
165 			int aid, int staid, MWL_HAL_PEERINFO *pi);
166 static int	mwl_localstadb(struct ieee80211vap *);
167 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
168 static int	allocstaid(struct mwl_softc *sc, int aid);
169 static void	delstaid(struct mwl_softc *sc, int staid);
170 static void	mwl_newassoc(struct ieee80211_node *, int);
171 static void	mwl_agestations(void *);
172 static int	mwl_setregdomain(struct ieee80211com *,
173 			struct ieee80211_regdomain *, int,
174 			struct ieee80211_channel []);
175 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
176 			struct ieee80211_channel []);
177 static int	mwl_getchannels(struct mwl_softc *);
178 
179 static void	mwl_sysctlattach(struct mwl_softc *);
180 static void	mwl_announce(struct mwl_softc *);
181 
182 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
183 
184 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
185 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
186 	    0, "rx descriptors allocated");
187 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
188 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
189 	    0, "rx buffers allocated");
190 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
191 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
192 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
193 	    0, "tx buffers allocated");
194 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
195 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
196 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
197 	    0, "tx buffers to send at once");
198 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
199 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
200 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
201 	    0, "max rx buffers to process per interrupt");
202 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
203 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
204 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
205 	    0, "min free rx buffers before restarting traffic");
206 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
207 
208 #ifdef MWL_DEBUG
209 static	int mwl_debug = 0;
210 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
211 	    0, "control debugging printfs");
212 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
213 enum {
214 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
215 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
216 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
217 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
218 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
219 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
220 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
221 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
222 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
223 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
224 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
225 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
226 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
227 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
228 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
229 	MWL_DEBUG_ANY		= 0xffffffff
230 };
231 #define	IS_BEACON(wh) \
232     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
233 	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
234 #define	IFF_DUMPPKTS_RECV(sc, wh) \
235     (((sc->sc_debug & MWL_DEBUG_RECV) && \
236       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
237      (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
238 #define	IFF_DUMPPKTS_XMIT(sc) \
239 	((sc->sc_debug & MWL_DEBUG_XMIT) || \
240 	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
241 #define	DPRINTF(sc, m, fmt, ...) do {				\
242 	if (sc->sc_debug & (m))					\
243 		printf(fmt, __VA_ARGS__);			\
244 } while (0)
245 #define	KEYPRINTF(sc, hk, mac) do {				\
246 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
247 		mwl_keyprint(sc, __func__, hk, mac);		\
248 } while (0)
249 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
250 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
251 #else
252 #define	IFF_DUMPPKTS_RECV(sc, wh) \
253 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
254 #define	IFF_DUMPPKTS_XMIT(sc) \
255 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256 #define	DPRINTF(sc, m, fmt, ...) do {				\
257 	(void) sc;						\
258 } while (0)
259 #define	KEYPRINTF(sc, k, mac) do {				\
260 	(void) sc;						\
261 } while (0)
262 #endif
263 
264 MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
265 
266 /*
267  * Each packet has fixed front matter: a 2-byte length
268  * of the payload, followed by a 4-address 802.11 header
269  * (regardless of the actual header and always w/o any
270  * QoS header).  The payload then follows.
271  */
272 struct mwltxrec {
273 	uint16_t fwlen;
274 	struct ieee80211_frame_addr4 wh;
275 } __packed;
276 
277 /*
278  * Read/Write shorthands for accesses to BAR 0.  Note
279  * that all BAR 1 operations are done in the "hal" and
280  * there should be no reference to them here.
281  */
282 static __inline uint32_t
283 RD4(struct mwl_softc *sc, bus_size_t off)
284 {
285 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
286 }
287 
288 static __inline void
289 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
290 {
291 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
292 }
293 
294 int
295 mwl_attach(uint16_t devid, struct mwl_softc *sc)
296 {
297 	struct ifnet *ifp;
298 	struct ieee80211com *ic;
299 	struct mwl_hal *mh;
300 	int error = 0;
301 
302 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
303 
304 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
305 	if (ifp == NULL) {
306 		device_printf(sc->sc_dev, "can not if_alloc()\n");
307 		return ENOSPC;
308 	}
309 	ic = ifp->if_l2com;
310 
311 	/* set these up early for if_printf use */
312 	if_initname(ifp, device_get_name(sc->sc_dev),
313 		device_get_unit(sc->sc_dev));
314 
315 	mh = mwl_hal_attach(sc->sc_dev, devid,
316 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
317 	if (mh == NULL) {
318 		if_printf(ifp, "unable to attach HAL\n");
319 		error = EIO;
320 		goto bad;
321 	}
322 	sc->sc_mh = mh;
323 	/*
324 	 * Load firmware so we can get setup.  We arbitrarily
325 	 * pick station firmware; we'll re-load firmware as
326 	 * needed so setting up the wrong mode isn't a big deal.
327 	 */
328 	if (mwl_hal_fwload(mh, NULL) != 0) {
329 		if_printf(ifp, "unable to setup builtin firmware\n");
330 		error = EIO;
331 		goto bad1;
332 	}
333 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
334 		if_printf(ifp, "unable to fetch h/w specs\n");
335 		error = EIO;
336 		goto bad1;
337 	}
338 	error = mwl_getchannels(sc);
339 	if (error != 0)
340 		goto bad1;
341 
342 	sc->sc_txantenna = 0;		/* h/w default */
343 	sc->sc_rxantenna = 0;		/* h/w default */
344 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
345 	sc->sc_ageinterval = MWL_AGEINTERVAL;
346 
347 	/*
348 	 * Allocate tx+rx descriptors and populate the lists.
349 	 * We immediately push the information to the firmware
350 	 * as otherwise it gets upset.
351 	 */
352 	error = mwl_dma_setup(sc);
353 	if (error != 0) {
354 		if_printf(ifp, "failed to setup descriptors: %d\n", error);
355 		goto bad1;
356 	}
357 	error = mwl_setupdma(sc);	/* push to firmware */
358 	if (error != 0)			/* NB: mwl_setupdma prints msg */
359 		goto bad1;
360 
361 	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
362 
363 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
364 		taskqueue_thread_enqueue, &sc->sc_tq);
365 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
366 		"%s taskq", ifp->if_xname);
367 
368 	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
369 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
370 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
371 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
372 
373 	/* NB: insure BK queue is the lowest priority h/w queue */
374 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
375 		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
376 			ieee80211_wme_acnames[WME_AC_BK]);
377 		error = EIO;
378 		goto bad2;
379 	}
380 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
381 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
382 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
383 		/*
384 		 * Not enough hardware tx queues to properly do WME;
385 		 * just punt and assign them all to the same h/w queue.
386 		 * We could do a better job of this if, for example,
387 		 * we allocate queues when we switch from station to
388 		 * AP mode.
389 		 */
390 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
391 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
392 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
393 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
394 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
395 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
396 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
397 	}
398 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
399 
400 	ifp->if_softc = sc;
401 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
402 	ifp->if_start = mwl_start;
403 	ifp->if_watchdog = mwl_watchdog;
404 	ifp->if_ioctl = mwl_ioctl;
405 	ifp->if_init = mwl_init;
406 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
407 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
408 	IFQ_SET_READY(&ifp->if_snd);
409 
410 	ic->ic_ifp = ifp;
411 	/* XXX not right but it's not used anywhere important */
412 	ic->ic_phytype = IEEE80211_T_OFDM;
413 	ic->ic_opmode = IEEE80211_M_STA;
414 	ic->ic_caps =
415 		  IEEE80211_C_STA		/* station mode supported */
416 		| IEEE80211_C_HOSTAP		/* hostap mode */
417 		| IEEE80211_C_MONITOR		/* monitor mode */
418 #if 0
419 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
420 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
421 #endif
422 		| IEEE80211_C_WDS		/* WDS supported */
423 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
424 		| IEEE80211_C_SHSLOT		/* short slot time supported */
425 		| IEEE80211_C_WME		/* WME/WMM supported */
426 		| IEEE80211_C_BURST		/* xmit bursting supported */
427 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
428 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
429 		| IEEE80211_C_TXFRAG		/* handle tx frags */
430 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
431 		| IEEE80211_C_DFS		/* DFS supported */
432 		;
433 
434 	ic->ic_htcaps =
435 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
436 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
437 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
438 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
439 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
440 #if MWL_AGGR_SIZE == 7935
441 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
442 #else
443 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
444 #endif
445 #if 0
446 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
447 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
448 #endif
449 		/* s/w capabilities */
450 		| IEEE80211_HTC_HT		/* HT operation */
451 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
452 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
453 		| IEEE80211_HTC_SMPS		/* SMPS available */
454 		;
455 
456 	/*
457 	 * Mark h/w crypto support.
458 	 * XXX no way to query h/w support.
459 	 */
460 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
461 			  |  IEEE80211_CRYPTO_AES_CCM
462 			  |  IEEE80211_CRYPTO_TKIP
463 			  |  IEEE80211_CRYPTO_TKIPMIC
464 			  ;
465 	/*
466 	 * Transmit requires space in the packet for a special
467 	 * format transmit record and optional padding between
468 	 * this record and the payload.  Ask the net80211 layer
469 	 * to arrange this when encapsulating packets so we can
470 	 * add it efficiently.
471 	 */
472 	ic->ic_headroom = sizeof(struct mwltxrec) -
473 		sizeof(struct ieee80211_frame);
474 
475 	/* call MI attach routine. */
476 	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
477 	ic->ic_setregdomain = mwl_setregdomain;
478 	ic->ic_getradiocaps = mwl_getradiocaps;
479 	/* override default methods */
480 	ic->ic_raw_xmit = mwl_raw_xmit;
481 	ic->ic_newassoc = mwl_newassoc;
482 	ic->ic_updateslot = mwl_updateslot;
483 	ic->ic_update_mcast = mwl_update_mcast;
484 	ic->ic_update_promisc = mwl_update_promisc;
485 	ic->ic_wme.wme_update = mwl_wme_update;
486 
487 	ic->ic_node_alloc = mwl_node_alloc;
488 	sc->sc_node_cleanup = ic->ic_node_cleanup;
489 	ic->ic_node_cleanup = mwl_node_cleanup;
490 	sc->sc_node_drain = ic->ic_node_drain;
491 	ic->ic_node_drain = mwl_node_drain;
492 	ic->ic_node_getsignal = mwl_node_getsignal;
493 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
494 
495 	ic->ic_scan_start = mwl_scan_start;
496 	ic->ic_scan_end = mwl_scan_end;
497 	ic->ic_set_channel = mwl_set_channel;
498 
499 	sc->sc_recv_action = ic->ic_recv_action;
500 	ic->ic_recv_action = mwl_recv_action;
501 	sc->sc_addba_request = ic->ic_addba_request;
502 	ic->ic_addba_request = mwl_addba_request;
503 	sc->sc_addba_response = ic->ic_addba_response;
504 	ic->ic_addba_response = mwl_addba_response;
505 	sc->sc_addba_stop = ic->ic_addba_stop;
506 	ic->ic_addba_stop = mwl_addba_stop;
507 
508 	ic->ic_vap_create = mwl_vap_create;
509 	ic->ic_vap_delete = mwl_vap_delete;
510 
511 	ieee80211_radiotap_attach(ic,
512 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
513 		MWL_TX_RADIOTAP_PRESENT,
514 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
515 		MWL_RX_RADIOTAP_PRESENT);
516 	/*
517 	 * Setup dynamic sysctl's now that country code and
518 	 * regdomain are available from the hal.
519 	 */
520 	mwl_sysctlattach(sc);
521 
522 	if (bootverbose)
523 		ieee80211_announce(ic);
524 	mwl_announce(sc);
525 	return 0;
526 bad2:
527 	mwl_dma_cleanup(sc);
528 bad1:
529 	mwl_hal_detach(mh);
530 bad:
531 	if_free(ifp);
532 	sc->sc_invalid = 1;
533 	return error;
534 }
535 
536 int
537 mwl_detach(struct mwl_softc *sc)
538 {
539 	struct ifnet *ifp = sc->sc_ifp;
540 	struct ieee80211com *ic = ifp->if_l2com;
541 
542 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
543 		__func__, ifp->if_flags);
544 
545 	mwl_stop(ifp, 1);
546 	/*
547 	 * NB: the order of these is important:
548 	 * o call the 802.11 layer before detaching the hal to
549 	 *   insure callbacks into the driver to delete global
550 	 *   key cache entries can be handled
551 	 * o reclaim the tx queue data structures after calling
552 	 *   the 802.11 layer as we'll get called back to reclaim
553 	 *   node state and potentially want to use them
554 	 * o to cleanup the tx queues the hal is called, so detach
555 	 *   it last
556 	 * Other than that, it's straightforward...
557 	 */
558 	ieee80211_ifdetach(ic);
559 	mwl_dma_cleanup(sc);
560 	mwl_tx_cleanup(sc);
561 	mwl_hal_detach(sc->sc_mh);
562 	if_free(ifp);
563 
564 	return 0;
565 }
566 
567 /*
568  * MAC address handling for multiple BSS on the same radio.
569  * The first vap uses the MAC address from the EEPROM.  For
570  * subsequent vap's we set the U/L bit (bit 1) in the MAC
571  * address and use the next six bits as an index.
572  */
573 static void
574 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
575 {
576 	int i;
577 
578 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
579 		/* NB: we only do this if h/w supports multiple bssid */
580 		for (i = 0; i < 32; i++)
581 			if ((sc->sc_bssidmask & (1<<i)) == 0)
582 				break;
583 		if (i != 0)
584 			mac[0] |= (i << 2)|0x2;
585 	} else
586 		i = 0;
587 	sc->sc_bssidmask |= 1<<i;
588 	if (i == 0)
589 		sc->sc_nbssid0++;
590 }
591 
592 static void
593 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
594 {
595 	int i = mac[0] >> 2;
596 	if (i != 0 || --sc->sc_nbssid0 == 0)
597 		sc->sc_bssidmask &= ~(1<<i);
598 }
599 
600 static struct ieee80211vap *
601 mwl_vap_create(struct ieee80211com *ic,
602 	const char name[IFNAMSIZ], int unit, int opmode, int flags,
603 	const uint8_t bssid[IEEE80211_ADDR_LEN],
604 	const uint8_t mac0[IEEE80211_ADDR_LEN])
605 {
606 	struct ifnet *ifp = ic->ic_ifp;
607 	struct mwl_softc *sc = ifp->if_softc;
608 	struct mwl_hal *mh = sc->sc_mh;
609 	struct ieee80211vap *vap, *apvap;
610 	struct mwl_hal_vap *hvap;
611 	struct mwl_vap *mvp;
612 	uint8_t mac[IEEE80211_ADDR_LEN];
613 
614 	IEEE80211_ADDR_COPY(mac, mac0);
615 	switch (opmode) {
616 	case IEEE80211_M_HOSTAP:
617 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
618 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
619 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
620 		if (hvap == NULL) {
621 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622 				reclaim_address(sc, mac);
623 			return NULL;
624 		}
625 		break;
626 	case IEEE80211_M_STA:
627 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
628 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
629 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
630 		if (hvap == NULL) {
631 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632 				reclaim_address(sc, mac);
633 			return NULL;
634 		}
635 		/* no h/w beacon miss support; always use s/w */
636 		flags |= IEEE80211_CLONE_NOBEACONS;
637 		break;
638 	case IEEE80211_M_WDS:
639 		hvap = NULL;		/* NB: we use associated AP vap */
640 		if (sc->sc_napvaps == 0)
641 			return NULL;	/* no existing AP vap */
642 		break;
643 	case IEEE80211_M_MONITOR:
644 		hvap = NULL;
645 		break;
646 	case IEEE80211_M_IBSS:
647 	case IEEE80211_M_AHDEMO:
648 	default:
649 		return NULL;
650 	}
651 
652 	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
653 	    M_80211_VAP, M_NOWAIT | M_ZERO);
654 	if (mvp == NULL) {
655 		if (hvap != NULL) {
656 			mwl_hal_delvap(hvap);
657 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
658 				reclaim_address(sc, mac);
659 		}
660 		/* XXX msg */
661 		return NULL;
662 	}
663 	mvp->mv_hvap = hvap;
664 	if (opmode == IEEE80211_M_WDS) {
665 		/*
666 		 * WDS vaps must have an associated AP vap; find one.
667 		 * XXX not right.
668 		 */
669 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
670 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
671 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
672 				break;
673 			}
674 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
675 	}
676 	vap = &mvp->mv_vap;
677 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
678 	if (hvap != NULL)
679 		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
680 	/* override with driver methods */
681 	mvp->mv_newstate = vap->iv_newstate;
682 	vap->iv_newstate = mwl_newstate;
683 	vap->iv_max_keyix = 0;	/* XXX */
684 	vap->iv_key_alloc = mwl_key_alloc;
685 	vap->iv_key_delete = mwl_key_delete;
686 	vap->iv_key_set = mwl_key_set;
687 #ifdef MWL_HOST_PS_SUPPORT
688 	if (opmode == IEEE80211_M_HOSTAP) {
689 		vap->iv_update_ps = mwl_update_ps;
690 		mvp->mv_set_tim = vap->iv_set_tim;
691 		vap->iv_set_tim = mwl_set_tim;
692 	}
693 #endif
694 	vap->iv_reset = mwl_reset;
695 	vap->iv_update_beacon = mwl_beacon_update;
696 
697 	/* override max aid so sta's cannot assoc when we're out of sta id's */
698 	vap->iv_max_aid = MWL_MAXSTAID;
699 	/* override default A-MPDU rx parameters */
700 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
701 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
702 
703 	/* complete setup */
704 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
705 
706 	switch (vap->iv_opmode) {
707 	case IEEE80211_M_HOSTAP:
708 	case IEEE80211_M_STA:
709 		/*
710 		 * Setup sta db entry for local address.
711 		 */
712 		mwl_localstadb(vap);
713 		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
714 			sc->sc_napvaps++;
715 		else
716 			sc->sc_nstavaps++;
717 		break;
718 	case IEEE80211_M_WDS:
719 		sc->sc_nwdsvaps++;
720 		break;
721 	default:
722 		break;
723 	}
724 	/*
725 	 * Setup overall operating mode.
726 	 */
727 	if (sc->sc_napvaps)
728 		ic->ic_opmode = IEEE80211_M_HOSTAP;
729 	else if (sc->sc_nstavaps)
730 		ic->ic_opmode = IEEE80211_M_STA;
731 	else
732 		ic->ic_opmode = opmode;
733 
734 	return vap;
735 }
736 
737 static void
738 mwl_vap_delete(struct ieee80211vap *vap)
739 {
740 	struct mwl_vap *mvp = MWL_VAP(vap);
741 	struct ifnet *parent = vap->iv_ic->ic_ifp;
742 	struct mwl_softc *sc = parent->if_softc;
743 	struct mwl_hal *mh = sc->sc_mh;
744 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
745 	enum ieee80211_opmode opmode = vap->iv_opmode;
746 
747 	/* XXX disallow ap vap delete if WDS still present */
748 	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
749 		/* quiesce h/w while we remove the vap */
750 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
751 	}
752 	ieee80211_vap_detach(vap);
753 	switch (opmode) {
754 	case IEEE80211_M_HOSTAP:
755 	case IEEE80211_M_STA:
756 		KASSERT(hvap != NULL, ("no hal vap handle"));
757 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
758 		mwl_hal_delvap(hvap);
759 		if (opmode == IEEE80211_M_HOSTAP)
760 			sc->sc_napvaps--;
761 		else
762 			sc->sc_nstavaps--;
763 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
764 		reclaim_address(sc, vap->iv_myaddr);
765 		break;
766 	case IEEE80211_M_WDS:
767 		sc->sc_nwdsvaps--;
768 		break;
769 	default:
770 		break;
771 	}
772 	mwl_cleartxq(sc, vap);
773 	free(mvp, M_80211_VAP);
774 	if (parent->if_drv_flags & IFF_DRV_RUNNING)
775 		mwl_hal_intrset(mh, sc->sc_imask);
776 }
777 
778 void
779 mwl_suspend(struct mwl_softc *sc)
780 {
781 	struct ifnet *ifp = sc->sc_ifp;
782 
783 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
784 		__func__, ifp->if_flags);
785 
786 	mwl_stop(ifp, 1);
787 }
788 
789 void
790 mwl_resume(struct mwl_softc *sc)
791 {
792 	struct ifnet *ifp = sc->sc_ifp;
793 
794 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
795 		__func__, ifp->if_flags);
796 
797 	if (ifp->if_flags & IFF_UP)
798 		mwl_init(sc);
799 }
800 
801 void
802 mwl_shutdown(void *arg)
803 {
804 	struct mwl_softc *sc = arg;
805 
806 	mwl_stop(sc->sc_ifp, 1);
807 }
808 
809 /*
810  * Interrupt handler.  Most of the actual processing is deferred.
811  */
812 void
813 mwl_intr(void *arg)
814 {
815 	struct mwl_softc *sc = arg;
816 	struct mwl_hal *mh = sc->sc_mh;
817 	uint32_t status;
818 
819 	if (sc->sc_invalid) {
820 		/*
821 		 * The hardware is not ready/present, don't touch anything.
822 		 * Note this can happen early on if the IRQ is shared.
823 		 */
824 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
825 		return;
826 	}
827 	/*
828 	 * Figure out the reason(s) for the interrupt.
829 	 */
830 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
831 	if (status == 0)			/* must be a shared irq */
832 		return;
833 
834 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
835 	    __func__, status, sc->sc_imask);
836 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
837 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
838 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
839 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
840 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
841 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
842 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
843 		mwl_hal_cmddone(mh);
844 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
845 		;
846 	}
847 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
848 		/* TKIP ICV error */
849 		sc->sc_stats.mst_rx_badtkipicv++;
850 	}
851 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
852 		/* 11n aggregation queue is empty, re-fill */
853 		;
854 	}
855 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
856 		;
857 	}
858 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
859 		/* radar detected, process event */
860 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
861 	}
862 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
863 		/* DFS channel switch */
864 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
865 	}
866 }
867 
868 static void
869 mwl_radar_proc(void *arg, int pending)
870 {
871 	struct mwl_softc *sc = arg;
872 	struct ifnet *ifp = sc->sc_ifp;
873 	struct ieee80211com *ic = ifp->if_l2com;
874 
875 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
876 	    __func__, pending);
877 
878 	sc->sc_stats.mst_radardetect++;
879 	/* XXX stop h/w BA streams? */
880 
881 	IEEE80211_LOCK(ic);
882 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
883 	IEEE80211_UNLOCK(ic);
884 }
885 
886 static void
887 mwl_chanswitch_proc(void *arg, int pending)
888 {
889 	struct mwl_softc *sc = arg;
890 	struct ifnet *ifp = sc->sc_ifp;
891 	struct ieee80211com *ic = ifp->if_l2com;
892 
893 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
894 	    __func__, pending);
895 
896 	IEEE80211_LOCK(ic);
897 	sc->sc_csapending = 0;
898 	ieee80211_csa_completeswitch(ic);
899 	IEEE80211_UNLOCK(ic);
900 }
901 
902 static void
903 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
904 {
905 	struct ieee80211_node *ni = sp->data[0];
906 
907 	/* send DELBA and drop the stream */
908 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
909 }
910 
911 static void
912 mwl_bawatchdog_proc(void *arg, int pending)
913 {
914 	struct mwl_softc *sc = arg;
915 	struct mwl_hal *mh = sc->sc_mh;
916 	const MWL_HAL_BASTREAM *sp;
917 	uint8_t bitmap, n;
918 
919 	sc->sc_stats.mst_bawatchdog++;
920 
921 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
922 		DPRINTF(sc, MWL_DEBUG_AMPDU,
923 		    "%s: could not get bitmap\n", __func__);
924 		sc->sc_stats.mst_bawatchdog_failed++;
925 		return;
926 	}
927 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
928 	if (bitmap == 0xff) {
929 		n = 0;
930 		/* disable all ba streams */
931 		for (bitmap = 0; bitmap < 8; bitmap++) {
932 			sp = mwl_hal_bastream_lookup(mh, bitmap);
933 			if (sp != NULL) {
934 				mwl_bawatchdog(sp);
935 				n++;
936 			}
937 		}
938 		if (n == 0) {
939 			DPRINTF(sc, MWL_DEBUG_AMPDU,
940 			    "%s: no BA streams found\n", __func__);
941 			sc->sc_stats.mst_bawatchdog_empty++;
942 		}
943 	} else if (bitmap != 0xaa) {
944 		/* disable a single ba stream */
945 		sp = mwl_hal_bastream_lookup(mh, bitmap);
946 		if (sp != NULL) {
947 			mwl_bawatchdog(sp);
948 		} else {
949 			DPRINTF(sc, MWL_DEBUG_AMPDU,
950 			    "%s: no BA stream %d\n", __func__, bitmap);
951 			sc->sc_stats.mst_bawatchdog_notfound++;
952 		}
953 	}
954 }
955 
956 /*
957  * Convert net80211 channel to a HAL channel.
958  */
959 static void
960 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
961 {
962 	hc->channel = chan->ic_ieee;
963 
964 	*(uint32_t *)&hc->channelFlags = 0;
965 	if (IEEE80211_IS_CHAN_2GHZ(chan))
966 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
967 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
968 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
969 	if (IEEE80211_IS_CHAN_HT40(chan)) {
970 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
971 		if (IEEE80211_IS_CHAN_HT40U(chan))
972 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
973 		else
974 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
975 	} else
976 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
977 	/* XXX 10MHz channels */
978 }
979 
980 /*
981  * Inform firmware of our tx/rx dma setup.  The BAR 0
982  * writes below are for compatibility with older firmware.
983  * For current firmware we send this information with a
984  * cmd block via mwl_hal_sethwdma.
985  */
986 static int
987 mwl_setupdma(struct mwl_softc *sc)
988 {
989 	int error, i;
990 
991 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
992 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
993 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
994 
995 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
996 		struct mwl_txq *txq = &sc->sc_txq[i];
997 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
998 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
999 	}
1000 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1001 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1002 
1003 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1004 	if (error != 0) {
1005 		device_printf(sc->sc_dev,
1006 		    "unable to setup tx/rx dma; hal status %u\n", error);
1007 		/* XXX */
1008 	}
1009 	return error;
1010 }
1011 
1012 /*
1013  * Inform firmware of tx rate parameters.
1014  * Called after a channel change.
1015  */
1016 static int
1017 mwl_setcurchanrates(struct mwl_softc *sc)
1018 {
1019 	struct ifnet *ifp = sc->sc_ifp;
1020 	struct ieee80211com *ic = ifp->if_l2com;
1021 	const struct ieee80211_rateset *rs;
1022 	MWL_HAL_TXRATE rates;
1023 
1024 	memset(&rates, 0, sizeof(rates));
1025 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1026 	/* rate used to send management frames */
1027 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1028 	/* rate used to send multicast frames */
1029 	rates.McastRate = rates.MgtRate;
1030 
1031 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1032 }
1033 
1034 /*
1035  * Inform firmware of tx rate parameters.  Called whenever
1036  * user-settable params change and after a channel change.
1037  */
1038 static int
1039 mwl_setrates(struct ieee80211vap *vap)
1040 {
1041 	struct mwl_vap *mvp = MWL_VAP(vap);
1042 	struct ieee80211_node *ni = vap->iv_bss;
1043 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1044 	MWL_HAL_TXRATE rates;
1045 
1046 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1047 
1048 	/*
1049 	 * Update the h/w rate map.
1050 	 * NB: 0x80 for MCS is passed through unchanged
1051 	 */
1052 	memset(&rates, 0, sizeof(rates));
1053 	/* rate used to send management frames */
1054 	rates.MgtRate = tp->mgmtrate;
1055 	/* rate used to send multicast frames */
1056 	rates.McastRate = tp->mcastrate;
1057 
1058 	/* while here calculate EAPOL fixed rate cookie */
1059 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1060 
1061 	return mwl_hal_settxrate(mvp->mv_hvap,
1062 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1063 		RATE_FIXED : RATE_AUTO, &rates);
1064 }
1065 
1066 /*
1067  * Setup a fixed xmit rate cookie for EAPOL frames.
1068  */
1069 static void
1070 mwl_seteapolformat(struct ieee80211vap *vap)
1071 {
1072 	struct mwl_vap *mvp = MWL_VAP(vap);
1073 	struct ieee80211_node *ni = vap->iv_bss;
1074 	enum ieee80211_phymode mode;
1075 	uint8_t rate;
1076 
1077 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1078 
1079 	mode = ieee80211_chan2mode(ni->ni_chan);
1080 	/*
1081 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1082 	 * NB: this may violate POLA for sta and wds vap's.
1083 	 */
1084 	if (mode == IEEE80211_MODE_11NA &&
1085 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1086 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1087 	else if (mode == IEEE80211_MODE_11NG &&
1088 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1089 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1090 	else
1091 		rate = vap->iv_txparms[mode].mgmtrate;
1092 
1093 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1094 }
1095 
1096 /*
1097  * Map SKU+country code to region code for radar bin'ing.
1098  */
1099 static int
1100 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1101 {
1102 	switch (rd->regdomain) {
1103 	case SKU_FCC:
1104 	case SKU_FCC3:
1105 		return DOMAIN_CODE_FCC;
1106 	case SKU_CA:
1107 		return DOMAIN_CODE_IC;
1108 	case SKU_ETSI:
1109 	case SKU_ETSI2:
1110 	case SKU_ETSI3:
1111 		if (rd->country == CTRY_SPAIN)
1112 			return DOMAIN_CODE_SPAIN;
1113 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1114 			return DOMAIN_CODE_FRANCE;
1115 		/* XXX force 1.3.1 radar type */
1116 		return DOMAIN_CODE_ETSI_131;
1117 	case SKU_JAPAN:
1118 		return DOMAIN_CODE_MKK;
1119 	case SKU_ROW:
1120 		return DOMAIN_CODE_DGT;	/* Taiwan */
1121 	case SKU_APAC:
1122 	case SKU_APAC2:
1123 	case SKU_APAC3:
1124 		return DOMAIN_CODE_AUS;	/* Australia */
1125 	}
1126 	/* XXX KOREA? */
1127 	return DOMAIN_CODE_FCC;			/* XXX? */
1128 }
1129 
1130 static int
1131 mwl_hal_reset(struct mwl_softc *sc)
1132 {
1133 	struct ifnet *ifp = sc->sc_ifp;
1134 	struct ieee80211com *ic = ifp->if_l2com;
1135 	struct mwl_hal *mh = sc->sc_mh;
1136 
1137 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1138 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1139 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1140 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1141 	mwl_chan_set(sc, ic->ic_curchan);
1142 	/* NB: RF/RA performance tuned for indoor mode */
1143 	mwl_hal_setrateadaptmode(mh, 0);
1144 	mwl_hal_setoptimizationlevel(mh,
1145 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1146 
1147 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1148 
1149 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1150 	mwl_hal_setcfend(mh, 0);			/* XXX */
1151 
1152 	return 1;
1153 }
1154 
1155 static int
1156 mwl_init_locked(struct mwl_softc *sc)
1157 {
1158 	struct ifnet *ifp = sc->sc_ifp;
1159 	struct mwl_hal *mh = sc->sc_mh;
1160 	int error = 0;
1161 
1162 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1163 		__func__, ifp->if_flags);
1164 
1165 	MWL_LOCK_ASSERT(sc);
1166 
1167 	/*
1168 	 * Stop anything previously setup.  This is safe
1169 	 * whether this is the first time through or not.
1170 	 */
1171 	mwl_stop_locked(ifp, 0);
1172 
1173 	/*
1174 	 * Push vap-independent state to the firmware.
1175 	 */
1176 	if (!mwl_hal_reset(sc)) {
1177 		if_printf(ifp, "unable to reset hardware\n");
1178 		return EIO;
1179 	}
1180 
1181 	/*
1182 	 * Setup recv (once); transmit is already good to go.
1183 	 */
1184 	error = mwl_startrecv(sc);
1185 	if (error != 0) {
1186 		if_printf(ifp, "unable to start recv logic\n");
1187 		return error;
1188 	}
1189 
1190 	/*
1191 	 * Enable interrupts.
1192 	 */
1193 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1194 		     | MACREG_A2HRIC_BIT_TX_DONE
1195 		     | MACREG_A2HRIC_BIT_OPC_DONE
1196 #if 0
1197 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1198 #endif
1199 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1200 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1201 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1202 #if 0
1203 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1204 #endif
1205 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1206 		     | MACREQ_A2HRIC_BIT_TX_ACK
1207 		     ;
1208 
1209 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1210 	mwl_hal_intrset(mh, sc->sc_imask);
1211 
1212 	return 0;
1213 }
1214 
1215 static void
1216 mwl_init(void *arg)
1217 {
1218 	struct mwl_softc *sc = arg;
1219 	struct ifnet *ifp = sc->sc_ifp;
1220 	struct ieee80211com *ic = ifp->if_l2com;
1221 	int error = 0;
1222 
1223 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1224 		__func__, ifp->if_flags);
1225 
1226 	MWL_LOCK(sc);
1227 	error = mwl_init_locked(sc);
1228 	MWL_UNLOCK(sc);
1229 
1230 	if (error == 0)
1231 		ieee80211_start_all(ic);	/* start all vap's */
1232 }
1233 
1234 static void
1235 mwl_stop_locked(struct ifnet *ifp, int disable)
1236 {
1237 	struct mwl_softc *sc = ifp->if_softc;
1238 
1239 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1240 		__func__, sc->sc_invalid, ifp->if_flags);
1241 
1242 	MWL_LOCK_ASSERT(sc);
1243 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1244 		/*
1245 		 * Shutdown the hardware and driver.
1246 		 */
1247 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1248 		ifp->if_timer = 0;
1249 		mwl_draintxq(sc);
1250 	}
1251 }
1252 
1253 static void
1254 mwl_stop(struct ifnet *ifp, int disable)
1255 {
1256 	struct mwl_softc *sc = ifp->if_softc;
1257 
1258 	MWL_LOCK(sc);
1259 	mwl_stop_locked(ifp, disable);
1260 	MWL_UNLOCK(sc);
1261 }
1262 
1263 static int
1264 mwl_reset_vap(struct ieee80211vap *vap, int state)
1265 {
1266 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1267 	struct ieee80211com *ic = vap->iv_ic;
1268 
1269 	if (state == IEEE80211_S_RUN)
1270 		mwl_setrates(vap);
1271 	/* XXX off by 1? */
1272 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1273 	/* XXX auto? 20/40 split? */
1274 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1275 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1276 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1277 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1278 	/* XXX txpower cap */
1279 
1280 	/* re-setup beacons */
1281 	if (state == IEEE80211_S_RUN &&
1282 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1283 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1284 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1285 		mwl_hal_setnprotmode(hvap,
1286 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1287 		return mwl_beacon_setup(vap);
1288 	}
1289 	return 0;
1290 }
1291 
1292 /*
1293  * Reset the hardware w/o losing operational state.
1294  * Used to to reset or reload hardware state for a vap.
1295  */
1296 static int
1297 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1298 {
1299 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1300 	int error = 0;
1301 
1302 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1303 		struct ieee80211com *ic = vap->iv_ic;
1304 		struct ifnet *ifp = ic->ic_ifp;
1305 		struct mwl_softc *sc = ifp->if_softc;
1306 		struct mwl_hal *mh = sc->sc_mh;
1307 
1308 		/* XXX handle DWDS sta vap change */
1309 		/* XXX do we need to disable interrupts? */
1310 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1311 		error = mwl_reset_vap(vap, vap->iv_state);
1312 		mwl_hal_intrset(mh, sc->sc_imask);
1313 	}
1314 	return error;
1315 }
1316 
1317 /*
1318  * Allocate a tx buffer for sending a frame.  The
1319  * packet is assumed to have the WME AC stored so
1320  * we can use it to select the appropriate h/w queue.
1321  */
1322 static struct mwl_txbuf *
1323 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1324 {
1325 	struct mwl_txbuf *bf;
1326 
1327 	/*
1328 	 * Grab a TX buffer and associated resources.
1329 	 */
1330 	MWL_TXQ_LOCK(txq);
1331 	bf = STAILQ_FIRST(&txq->free);
1332 	if (bf != NULL) {
1333 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1334 		txq->nfree--;
1335 	}
1336 	MWL_TXQ_UNLOCK(txq);
1337 	if (bf == NULL)
1338 		DPRINTF(sc, MWL_DEBUG_XMIT,
1339 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1340 	return bf;
1341 }
1342 
1343 /*
1344  * Return a tx buffer to the queue it came from.  Note there
1345  * are two cases because we must preserve the order of buffers
1346  * as it reflects the fixed order of descriptors in memory
1347  * (the firmware pre-fetches descriptors so we cannot reorder).
1348  */
1349 static void
1350 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1351 {
1352 	bf->bf_m = NULL;
1353 	bf->bf_node = NULL;
1354 	MWL_TXQ_LOCK(txq);
1355 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1356 	txq->nfree++;
1357 	MWL_TXQ_UNLOCK(txq);
1358 }
1359 
1360 static void
1361 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1362 {
1363 	bf->bf_m = NULL;
1364 	bf->bf_node = NULL;
1365 	MWL_TXQ_LOCK(txq);
1366 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1367 	txq->nfree++;
1368 	MWL_TXQ_UNLOCK(txq);
1369 }
1370 
1371 static void
1372 mwl_start(struct ifnet *ifp)
1373 {
1374 	struct mwl_softc *sc = ifp->if_softc;
1375 	struct ieee80211_node *ni;
1376 	struct mwl_txbuf *bf;
1377 	struct mbuf *m;
1378 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1379 	int nqueued;
1380 
1381 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1382 		return;
1383 	nqueued = 0;
1384 	for (;;) {
1385 		bf = NULL;
1386 		IFQ_DEQUEUE(&ifp->if_snd, m);
1387 		if (m == NULL)
1388 			break;
1389 		/*
1390 		 * Grab the node for the destination.
1391 		 */
1392 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1393 		KASSERT(ni != NULL, ("no node"));
1394 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1395 		/*
1396 		 * Grab a TX buffer and associated resources.
1397 		 * We honor the classification by the 802.11 layer.
1398 		 */
1399 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1400 		bf = mwl_gettxbuf(sc, txq);
1401 		if (bf == NULL) {
1402 			m_freem(m);
1403 			ieee80211_free_node(ni);
1404 #ifdef MWL_TX_NODROP
1405 			sc->sc_stats.mst_tx_qstop++;
1406 			/* XXX blocks other traffic */
1407 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1408 			break;
1409 #else
1410 			DPRINTF(sc, MWL_DEBUG_XMIT,
1411 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1412 			sc->sc_stats.mst_tx_qdrop++;
1413 			continue;
1414 #endif /* MWL_TX_NODROP */
1415 		}
1416 
1417 		/*
1418 		 * Pass the frame to the h/w for transmission.
1419 		 */
1420 		if (mwl_tx_start(sc, ni, bf, m)) {
1421 			ifp->if_oerrors++;
1422 			mwl_puttxbuf_head(txq, bf);
1423 			ieee80211_free_node(ni);
1424 			continue;
1425 		}
1426 		nqueued++;
1427 		if (nqueued >= mwl_txcoalesce) {
1428 			/*
1429 			 * Poke the firmware to process queued frames;
1430 			 * see below about (lack of) locking.
1431 			 */
1432 			nqueued = 0;
1433 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1434 		}
1435 	}
1436 	if (nqueued) {
1437 		/*
1438 		 * NB: We don't need to lock against tx done because
1439 		 * this just prods the firmware to check the transmit
1440 		 * descriptors.  The firmware will also start fetching
1441 		 * descriptors by itself if it notices new ones are
1442 		 * present when it goes to deliver a tx done interrupt
1443 		 * to the host. So if we race with tx done processing
1444 		 * it's ok.  Delivering the kick here rather than in
1445 		 * mwl_tx_start is an optimization to avoid poking the
1446 		 * firmware for each packet.
1447 		 *
1448 		 * NB: the queue id isn't used so 0 is ok.
1449 		 */
1450 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1451 	}
1452 }
1453 
1454 static int
1455 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1456 	const struct ieee80211_bpf_params *params)
1457 {
1458 	struct ieee80211com *ic = ni->ni_ic;
1459 	struct ifnet *ifp = ic->ic_ifp;
1460 	struct mwl_softc *sc = ifp->if_softc;
1461 	struct mwl_txbuf *bf;
1462 	struct mwl_txq *txq;
1463 
1464 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1465 		ieee80211_free_node(ni);
1466 		m_freem(m);
1467 		return ENETDOWN;
1468 	}
1469 	/*
1470 	 * Grab a TX buffer and associated resources.
1471 	 * Note that we depend on the classification
1472 	 * by the 802.11 layer to get to the right h/w
1473 	 * queue.  Management frames must ALWAYS go on
1474 	 * queue 1 but we cannot just force that here
1475 	 * because we may receive non-mgt frames.
1476 	 */
1477 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1478 	bf = mwl_gettxbuf(sc, txq);
1479 	if (bf == NULL) {
1480 		sc->sc_stats.mst_tx_qstop++;
1481 		/* XXX blocks other traffic */
1482 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1483 		ieee80211_free_node(ni);
1484 		m_freem(m);
1485 		return ENOBUFS;
1486 	}
1487 	/*
1488 	 * Pass the frame to the h/w for transmission.
1489 	 */
1490 	if (mwl_tx_start(sc, ni, bf, m)) {
1491 		ifp->if_oerrors++;
1492 		mwl_puttxbuf_head(txq, bf);
1493 
1494 		ieee80211_free_node(ni);
1495 		return EIO;		/* XXX */
1496 	}
1497 	/*
1498 	 * NB: We don't need to lock against tx done because
1499 	 * this just prods the firmware to check the transmit
1500 	 * descriptors.  The firmware will also start fetching
1501 	 * descriptors by itself if it notices new ones are
1502 	 * present when it goes to deliver a tx done interrupt
1503 	 * to the host. So if we race with tx done processing
1504 	 * it's ok.  Delivering the kick here rather than in
1505 	 * mwl_tx_start is an optimization to avoid poking the
1506 	 * firmware for each packet.
1507 	 *
1508 	 * NB: the queue id isn't used so 0 is ok.
1509 	 */
1510 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1511 	return 0;
1512 }
1513 
1514 static int
1515 mwl_media_change(struct ifnet *ifp)
1516 {
1517 	struct ieee80211vap *vap = ifp->if_softc;
1518 	int error;
1519 
1520 	error = ieee80211_media_change(ifp);
1521 	/* NB: only the fixed rate can change and that doesn't need a reset */
1522 	if (error == ENETRESET) {
1523 		mwl_setrates(vap);
1524 		error = 0;
1525 	}
1526 	return error;
1527 }
1528 
1529 #ifdef MWL_DEBUG
1530 static void
1531 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1532 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1533 {
1534 	static const char *ciphers[] = {
1535 		"WEP",
1536 		"TKIP",
1537 		"AES-CCM",
1538 	};
1539 	int i, n;
1540 
1541 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1542 	for (i = 0, n = hk->keyLen; i < n; i++)
1543 		printf(" %02x", hk->key.aes[i]);
1544 	printf(" mac %s", ether_sprintf(mac));
1545 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1546 		printf(" %s", "rxmic");
1547 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1548 			printf(" %02x", hk->key.tkip.rxMic[i]);
1549 		printf(" txmic");
1550 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1551 			printf(" %02x", hk->key.tkip.txMic[i]);
1552 	}
1553 	printf(" flags 0x%x\n", hk->keyFlags);
1554 }
1555 #endif
1556 
1557 /*
1558  * Allocate a key cache slot for a unicast key.  The
1559  * firmware handles key allocation and every station is
1560  * guaranteed key space so we are always successful.
1561  */
1562 static int
1563 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1564 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1565 {
1566 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1567 
1568 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1569 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1570 		if (!(&vap->iv_nw_keys[0] <= k &&
1571 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1572 			/* should not happen */
1573 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1574 				"%s: bogus group key\n", __func__);
1575 			return 0;
1576 		}
1577 		/* give the caller what they requested */
1578 		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1579 	} else {
1580 		/*
1581 		 * Firmware handles key allocation.
1582 		 */
1583 		*keyix = *rxkeyix = 0;
1584 	}
1585 	return 1;
1586 }
1587 
1588 /*
1589  * Delete a key entry allocated by mwl_key_alloc.
1590  */
1591 static int
1592 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1593 {
1594 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1595 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1596 	MWL_HAL_KEYVAL hk;
1597 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1598 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1599 
1600 	if (hvap == NULL) {
1601 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1602 			/* XXX monitor mode? */
1603 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1604 			    "%s: no hvap for opmode %d\n", __func__,
1605 			    vap->iv_opmode);
1606 			return 0;
1607 		}
1608 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1609 	}
1610 
1611 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1612 	    __func__, k->wk_keyix);
1613 
1614 	memset(&hk, 0, sizeof(hk));
1615 	hk.keyIndex = k->wk_keyix;
1616 	switch (k->wk_cipher->ic_cipher) {
1617 	case IEEE80211_CIPHER_WEP:
1618 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1619 		break;
1620 	case IEEE80211_CIPHER_TKIP:
1621 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1622 		break;
1623 	case IEEE80211_CIPHER_AES_CCM:
1624 		hk.keyTypeId = KEY_TYPE_ID_AES;
1625 		break;
1626 	default:
1627 		/* XXX should not happen */
1628 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1629 		    __func__, k->wk_cipher->ic_cipher);
1630 		return 0;
1631 	}
1632 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1633 }
1634 
1635 static __inline int
1636 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1637 {
1638 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1639 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1640 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1641 		if (k->wk_flags & IEEE80211_KEY_RECV)
1642 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1643 		return 1;
1644 	} else
1645 		return 0;
1646 }
1647 
1648 /*
1649  * Set the key cache contents for the specified key.  Key cache
1650  * slot(s) must already have been allocated by mwl_key_alloc.
1651  */
1652 static int
1653 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1654 	const uint8_t mac[IEEE80211_ADDR_LEN])
1655 {
1656 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1657 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1658 #define	IEEE80211_IS_STATICKEY(k) \
1659 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1660 	 (GRPXMIT|IEEE80211_KEY_RECV))
1661 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1662 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1663 	const struct ieee80211_cipher *cip = k->wk_cipher;
1664 	const uint8_t *macaddr;
1665 	MWL_HAL_KEYVAL hk;
1666 
1667 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1668 		("s/w crypto set?"));
1669 
1670 	if (hvap == NULL) {
1671 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1672 			/* XXX monitor mode? */
1673 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1674 			    "%s: no hvap for opmode %d\n", __func__,
1675 			    vap->iv_opmode);
1676 			return 0;
1677 		}
1678 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1679 	}
1680 	memset(&hk, 0, sizeof(hk));
1681 	hk.keyIndex = k->wk_keyix;
1682 	switch (cip->ic_cipher) {
1683 	case IEEE80211_CIPHER_WEP:
1684 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1685 		hk.keyLen = k->wk_keylen;
1686 		if (k->wk_keyix == vap->iv_def_txkey)
1687 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1688 		if (!IEEE80211_IS_STATICKEY(k)) {
1689 			/* NB: WEP is never used for the PTK */
1690 			(void) addgroupflags(&hk, k);
1691 		}
1692 		break;
1693 	case IEEE80211_CIPHER_TKIP:
1694 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1695 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1696 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1697 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1698 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1699 		if (!addgroupflags(&hk, k))
1700 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1701 		break;
1702 	case IEEE80211_CIPHER_AES_CCM:
1703 		hk.keyTypeId = KEY_TYPE_ID_AES;
1704 		hk.keyLen = k->wk_keylen;
1705 		if (!addgroupflags(&hk, k))
1706 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1707 		break;
1708 	default:
1709 		/* XXX should not happen */
1710 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1711 		    __func__, k->wk_cipher->ic_cipher);
1712 		return 0;
1713 	}
1714 	/*
1715 	 * NB: tkip mic keys get copied here too; the layout
1716 	 *     just happens to match that in ieee80211_key.
1717 	 */
1718 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1719 
1720 	/*
1721 	 * Locate address of sta db entry for writing key;
1722 	 * the convention unfortunately is somewhat different
1723 	 * than how net80211, hostapd, and wpa_supplicant think.
1724 	 */
1725 	if (vap->iv_opmode == IEEE80211_M_STA) {
1726 		/*
1727 		 * NB: keys plumbed before the sta reaches AUTH state
1728 		 * will be discarded or written to the wrong sta db
1729 		 * entry because iv_bss is meaningless.  This is ok
1730 		 * (right now) because we handle deferred plumbing of
1731 		 * WEP keys when the sta reaches AUTH state.
1732 		 */
1733 		macaddr = vap->iv_bss->ni_bssid;
1734 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1735 	    vap->iv_state != IEEE80211_S_RUN) {
1736 		/*
1737 		 * Prior to RUN state a WDS vap will not it's BSS node
1738 		 * setup so we will plumb the key to the wrong mac
1739 		 * address (it'll be our local address).  Workaround
1740 		 * this for the moment by grabbing the correct address.
1741 		 */
1742 		macaddr = vap->iv_des_bssid;
1743 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1744 		macaddr = vap->iv_myaddr;
1745 	else
1746 		macaddr = mac;
1747 	KEYPRINTF(sc, &hk, macaddr);
1748 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1749 #undef IEEE80211_IS_STATICKEY
1750 #undef GRPXMIT
1751 }
1752 
1753 /* unaligned little endian access */
1754 #define LE_READ_2(p)				\
1755 	((uint16_t)				\
1756 	 ((((const uint8_t *)(p))[0]      ) |	\
1757 	  (((const uint8_t *)(p))[1] <<  8)))
1758 #define LE_READ_4(p)				\
1759 	((uint32_t)				\
1760 	 ((((const uint8_t *)(p))[0]      ) |	\
1761 	  (((const uint8_t *)(p))[1] <<  8) |	\
1762 	  (((const uint8_t *)(p))[2] << 16) |	\
1763 	  (((const uint8_t *)(p))[3] << 24)))
1764 
1765 /*
1766  * Set the multicast filter contents into the hardware.
1767  * XXX f/w has no support; just defer to the os.
1768  */
1769 static void
1770 mwl_setmcastfilter(struct mwl_softc *sc)
1771 {
1772 	struct ifnet *ifp = sc->sc_ifp;
1773 #if 0
1774 	struct ether_multi *enm;
1775 	struct ether_multistep estep;
1776 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1777 	uint8_t *mp;
1778 	int nmc;
1779 
1780 	mp = macs;
1781 	nmc = 0;
1782 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1783 	while (enm != NULL) {
1784 		/* XXX Punt on ranges. */
1785 		if (nmc == MWL_HAL_MCAST_MAX ||
1786 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1787 			ifp->if_flags |= IFF_ALLMULTI;
1788 			return;
1789 		}
1790 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1791 		mp += IEEE80211_ADDR_LEN, nmc++;
1792 		ETHER_NEXT_MULTI(estep, enm);
1793 	}
1794 	ifp->if_flags &= ~IFF_ALLMULTI;
1795 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1796 #else
1797 	/* XXX no mcast filter support; we get everything */
1798 	ifp->if_flags |= IFF_ALLMULTI;
1799 #endif
1800 }
1801 
1802 static int
1803 mwl_mode_init(struct mwl_softc *sc)
1804 {
1805 	struct ifnet *ifp = sc->sc_ifp;
1806 	struct ieee80211com *ic = ifp->if_l2com;
1807 	struct mwl_hal *mh = sc->sc_mh;
1808 
1809 	/*
1810 	 * NB: Ignore promisc in hostap mode; it's set by the
1811 	 * bridge.  This is wrong but we have no way to
1812 	 * identify internal requests (from the bridge)
1813 	 * versus external requests such as for tcpdump.
1814 	 */
1815 	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1816 	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1817 	mwl_setmcastfilter(sc);
1818 
1819 	return 0;
1820 }
1821 
1822 /*
1823  * Callback from the 802.11 layer after a multicast state change.
1824  */
1825 static void
1826 mwl_update_mcast(struct ifnet *ifp)
1827 {
1828 	struct mwl_softc *sc = ifp->if_softc;
1829 
1830 	mwl_setmcastfilter(sc);
1831 }
1832 
1833 /*
1834  * Callback from the 802.11 layer after a promiscuous mode change.
1835  * Note this interface does not check the operating mode as this
1836  * is an internal callback and we are expected to honor the current
1837  * state (e.g. this is used for setting the interface in promiscuous
1838  * mode when operating in hostap mode to do ACS).
1839  */
1840 static void
1841 mwl_update_promisc(struct ifnet *ifp)
1842 {
1843 	struct mwl_softc *sc = ifp->if_softc;
1844 
1845 	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1846 }
1847 
1848 /*
1849  * Callback from the 802.11 layer to update the slot time
1850  * based on the current setting.  We use it to notify the
1851  * firmware of ERP changes and the f/w takes care of things
1852  * like slot time and preamble.
1853  */
1854 static void
1855 mwl_updateslot(struct ifnet *ifp)
1856 {
1857 	struct mwl_softc *sc = ifp->if_softc;
1858 	struct ieee80211com *ic = ifp->if_l2com;
1859 	struct mwl_hal *mh = sc->sc_mh;
1860 	int prot;
1861 
1862 	/* NB: can be called early; suppress needless cmds */
1863 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1864 		return;
1865 
1866 	/*
1867 	 * Calculate the ERP flags.  The firwmare will use
1868 	 * this to carry out the appropriate measures.
1869 	 */
1870 	prot = 0;
1871 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1872 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1873 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1874 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1875 			prot |= IEEE80211_ERP_USE_PROTECTION;
1876 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1877 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1878 	}
1879 
1880 	DPRINTF(sc, MWL_DEBUG_RESET,
1881 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1882 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1883 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1884 	    ic->ic_flags);
1885 
1886 	mwl_hal_setgprot(mh, prot);
1887 }
1888 
1889 /*
1890  * Setup the beacon frame.
1891  */
1892 static int
1893 mwl_beacon_setup(struct ieee80211vap *vap)
1894 {
1895 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1896 	struct ieee80211_node *ni = vap->iv_bss;
1897 	struct ieee80211_beacon_offsets bo;
1898 	struct mbuf *m;
1899 
1900 	m = ieee80211_beacon_alloc(ni, &bo);
1901 	if (m == NULL)
1902 		return ENOBUFS;
1903 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1904 	m_free(m);
1905 
1906 	return 0;
1907 }
1908 
1909 /*
1910  * Update the beacon frame in response to a change.
1911  */
1912 static void
1913 mwl_beacon_update(struct ieee80211vap *vap, int item)
1914 {
1915 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1916 	struct ieee80211com *ic = vap->iv_ic;
1917 
1918 	KASSERT(hvap != NULL, ("no beacon"));
1919 	switch (item) {
1920 	case IEEE80211_BEACON_ERP:
1921 		mwl_updateslot(ic->ic_ifp);
1922 		break;
1923 	case IEEE80211_BEACON_HTINFO:
1924 		mwl_hal_setnprotmode(hvap,
1925 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1926 		break;
1927 	case IEEE80211_BEACON_CAPS:
1928 	case IEEE80211_BEACON_WME:
1929 	case IEEE80211_BEACON_APPIE:
1930 	case IEEE80211_BEACON_CSA:
1931 		break;
1932 	case IEEE80211_BEACON_TIM:
1933 		/* NB: firmware always forms TIM */
1934 		return;
1935 	}
1936 	/* XXX retain beacon frame and update */
1937 	mwl_beacon_setup(vap);
1938 }
1939 
1940 static void
1941 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1942 {
1943 	bus_addr_t *paddr = (bus_addr_t*) arg;
1944 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1945 	*paddr = segs->ds_addr;
1946 }
1947 
1948 #ifdef MWL_HOST_PS_SUPPORT
1949 /*
1950  * Handle power save station occupancy changes.
1951  */
1952 static void
1953 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1954 {
1955 	struct mwl_vap *mvp = MWL_VAP(vap);
1956 
1957 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1958 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1959 	mvp->mv_last_ps_sta = nsta;
1960 }
1961 
1962 /*
1963  * Handle associated station power save state changes.
1964  */
1965 static int
1966 mwl_set_tim(struct ieee80211_node *ni, int set)
1967 {
1968 	struct ieee80211vap *vap = ni->ni_vap;
1969 	struct mwl_vap *mvp = MWL_VAP(vap);
1970 
1971 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1972 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1973 		    IEEE80211_AID(ni->ni_associd), set);
1974 		return 1;
1975 	} else
1976 		return 0;
1977 }
1978 #endif /* MWL_HOST_PS_SUPPORT */
1979 
1980 static int
1981 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1982 	struct mwl_descdma *dd,
1983 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1984 {
1985 	struct ifnet *ifp = sc->sc_ifp;
1986 	uint8_t *ds;
1987 	int error;
1988 
1989 	DPRINTF(sc, MWL_DEBUG_RESET,
1990 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1991 	    __func__, name, nbuf, (uintmax_t) bufsize,
1992 	    ndesc, (uintmax_t) descsize);
1993 
1994 	dd->dd_name = name;
1995 	dd->dd_desc_len = nbuf * ndesc * descsize;
1996 
1997 	/*
1998 	 * Setup DMA descriptor area.
1999 	 */
2000 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2001 		       PAGE_SIZE, 0,		/* alignment, bounds */
2002 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2003 		       BUS_SPACE_MAXADDR,	/* highaddr */
2004 		       NULL, NULL,		/* filter, filterarg */
2005 		       dd->dd_desc_len,		/* maxsize */
2006 		       1,			/* nsegments */
2007 		       dd->dd_desc_len,		/* maxsegsize */
2008 		       BUS_DMA_ALLOCNOW,	/* flags */
2009 		       NULL,			/* lockfunc */
2010 		       NULL,			/* lockarg */
2011 		       &dd->dd_dmat);
2012 	if (error != 0) {
2013 		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2014 		return error;
2015 	}
2016 
2017 	/* allocate descriptors */
2018 	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2019 	if (error != 0) {
2020 		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2021 			"error %u\n", dd->dd_name, error);
2022 		goto fail0;
2023 	}
2024 
2025 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2026 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2027 				 &dd->dd_dmamap);
2028 	if (error != 0) {
2029 		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2030 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2031 		goto fail1;
2032 	}
2033 
2034 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2035 				dd->dd_desc, dd->dd_desc_len,
2036 				mwl_load_cb, &dd->dd_desc_paddr,
2037 				BUS_DMA_NOWAIT);
2038 	if (error != 0) {
2039 		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2040 			dd->dd_name, error);
2041 		goto fail2;
2042 	}
2043 
2044 	ds = dd->dd_desc;
2045 	memset(ds, 0, dd->dd_desc_len);
2046 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2047 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2048 	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2049 
2050 	return 0;
2051 fail2:
2052 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2053 fail1:
2054 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2055 fail0:
2056 	bus_dma_tag_destroy(dd->dd_dmat);
2057 	memset(dd, 0, sizeof(*dd));
2058 	return error;
2059 #undef DS2PHYS
2060 }
2061 
2062 static void
2063 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2064 {
2065 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2066 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2067 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2068 	bus_dma_tag_destroy(dd->dd_dmat);
2069 
2070 	memset(dd, 0, sizeof(*dd));
2071 }
2072 
2073 /*
2074  * Construct a tx q's free list.  The order of entries on
2075  * the list must reflect the physical layout of tx descriptors
2076  * because the firmware pre-fetches descriptors.
2077  *
2078  * XXX might be better to use indices into the buffer array.
2079  */
2080 static void
2081 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2082 {
2083 	struct mwl_txbuf *bf;
2084 	int i;
2085 
2086 	bf = txq->dma.dd_bufptr;
2087 	STAILQ_INIT(&txq->free);
2088 	for (i = 0; i < mwl_txbuf; i++, bf++)
2089 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2090 	txq->nfree = i;
2091 }
2092 
2093 #define	DS2PHYS(_dd, _ds) \
2094 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2095 
2096 static int
2097 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2098 {
2099 	struct ifnet *ifp = sc->sc_ifp;
2100 	int error, bsize, i;
2101 	struct mwl_txbuf *bf;
2102 	struct mwl_txdesc *ds;
2103 
2104 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2105 			mwl_txbuf, sizeof(struct mwl_txbuf),
2106 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2107 	if (error != 0)
2108 		return error;
2109 
2110 	/* allocate and setup tx buffers */
2111 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2112 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2113 	if (bf == NULL) {
2114 		if_printf(ifp, "malloc of %u tx buffers failed\n",
2115 			mwl_txbuf);
2116 		return ENOMEM;
2117 	}
2118 	txq->dma.dd_bufptr = bf;
2119 
2120 	ds = txq->dma.dd_desc;
2121 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2122 		bf->bf_desc = ds;
2123 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2124 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2125 				&bf->bf_dmamap);
2126 		if (error != 0) {
2127 			if_printf(ifp, "unable to create dmamap for tx "
2128 				"buffer %u, error %u\n", i, error);
2129 			return error;
2130 		}
2131 	}
2132 	mwl_txq_reset(sc, txq);
2133 	return 0;
2134 }
2135 
2136 static void
2137 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2138 {
2139 	struct mwl_txbuf *bf;
2140 	int i;
2141 
2142 	bf = txq->dma.dd_bufptr;
2143 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2144 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2145 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2146 		if (bf->bf_dmamap != NULL)
2147 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2148 	}
2149 	STAILQ_INIT(&txq->free);
2150 	txq->nfree = 0;
2151 	if (txq->dma.dd_bufptr != NULL) {
2152 		free(txq->dma.dd_bufptr, M_MWLDEV);
2153 		txq->dma.dd_bufptr = NULL;
2154 	}
2155 	if (txq->dma.dd_desc_len != 0)
2156 		mwl_desc_cleanup(sc, &txq->dma);
2157 }
2158 
2159 static int
2160 mwl_rxdma_setup(struct mwl_softc *sc)
2161 {
2162 	struct ifnet *ifp = sc->sc_ifp;
2163 	int error, jumbosize, bsize, i;
2164 	struct mwl_rxbuf *bf;
2165 	struct mwl_jumbo *rbuf;
2166 	struct mwl_rxdesc *ds;
2167 	caddr_t data;
2168 
2169 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2170 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2171 			1, sizeof(struct mwl_rxdesc));
2172 	if (error != 0)
2173 		return error;
2174 
2175 	/*
2176 	 * Receive is done to a private pool of jumbo buffers.
2177 	 * This allows us to attach to mbuf's and avoid re-mapping
2178 	 * memory on each rx we post.  We allocate a large chunk
2179 	 * of memory and manage it in the driver.  The mbuf free
2180 	 * callback method is used to reclaim frames after sending
2181 	 * them up the stack.  By default we allocate 2x the number of
2182 	 * rx descriptors configured so we have some slop to hold
2183 	 * us while frames are processed.
2184 	 */
2185 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2186 		if_printf(ifp,
2187 		    "too few rx dma buffers (%d); increasing to %d\n",
2188 		    mwl_rxbuf, 2*mwl_rxdesc);
2189 		mwl_rxbuf = 2*mwl_rxdesc;
2190 	}
2191 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2192 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2193 
2194 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2195 		       PAGE_SIZE, 0,		/* alignment, bounds */
2196 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2197 		       BUS_SPACE_MAXADDR,	/* highaddr */
2198 		       NULL, NULL,		/* filter, filterarg */
2199 		       sc->sc_rxmemsize,	/* maxsize */
2200 		       1,			/* nsegments */
2201 		       sc->sc_rxmemsize,	/* maxsegsize */
2202 		       BUS_DMA_ALLOCNOW,	/* flags */
2203 		       NULL,			/* lockfunc */
2204 		       NULL,			/* lockarg */
2205 		       &sc->sc_rxdmat);
2206 	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2207 	if (error != 0) {
2208 		if_printf(ifp, "could not create rx DMA map\n");
2209 		return error;
2210 	}
2211 
2212 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2213 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2214 				 &sc->sc_rxmap);
2215 	if (error != 0) {
2216 		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2217 		    (uintmax_t) sc->sc_rxmemsize);
2218 		return error;
2219 	}
2220 
2221 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2222 				sc->sc_rxmem, sc->sc_rxmemsize,
2223 				mwl_load_cb, &sc->sc_rxmem_paddr,
2224 				BUS_DMA_NOWAIT);
2225 	if (error != 0) {
2226 		if_printf(ifp, "could not load rx DMA map\n");
2227 		return error;
2228 	}
2229 
2230 	/*
2231 	 * Allocate rx buffers and set them up.
2232 	 */
2233 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2234 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2235 	if (bf == NULL) {
2236 		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2237 		return error;
2238 	}
2239 	sc->sc_rxdma.dd_bufptr = bf;
2240 
2241 	STAILQ_INIT(&sc->sc_rxbuf);
2242 	ds = sc->sc_rxdma.dd_desc;
2243 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2244 		bf->bf_desc = ds;
2245 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2246 		/* pre-assign dma buffer */
2247 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2248 		/* NB: tail is intentional to preserve descriptor order */
2249 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2250 	}
2251 
2252 	/*
2253 	 * Place remainder of dma memory buffers on the free list.
2254 	 */
2255 	SLIST_INIT(&sc->sc_rxfree);
2256 	for (; i < mwl_rxbuf; i++) {
2257 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2258 		rbuf = MWL_JUMBO_DATA2BUF(data);
2259 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2260 		sc->sc_nrxfree++;
2261 	}
2262 	MWL_RXFREE_INIT(sc);
2263 	return 0;
2264 }
2265 #undef DS2PHYS
2266 
2267 static void
2268 mwl_rxdma_cleanup(struct mwl_softc *sc)
2269 {
2270 	if (sc->sc_rxmap != NULL)
2271 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2272 	if (sc->sc_rxmem != NULL) {
2273 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2274 		sc->sc_rxmem = NULL;
2275 	}
2276 	if (sc->sc_rxmap != NULL) {
2277 		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2278 		sc->sc_rxmap = NULL;
2279 	}
2280 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2281 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2282 		sc->sc_rxdma.dd_bufptr = NULL;
2283 	}
2284 	if (sc->sc_rxdma.dd_desc_len != 0)
2285 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2286 	MWL_RXFREE_DESTROY(sc);
2287 }
2288 
2289 static int
2290 mwl_dma_setup(struct mwl_softc *sc)
2291 {
2292 	int error, i;
2293 
2294 	error = mwl_rxdma_setup(sc);
2295 	if (error != 0)
2296 		return error;
2297 
2298 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2299 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2300 		if (error != 0) {
2301 			mwl_dma_cleanup(sc);
2302 			return error;
2303 		}
2304 	}
2305 	return 0;
2306 }
2307 
2308 static void
2309 mwl_dma_cleanup(struct mwl_softc *sc)
2310 {
2311 	int i;
2312 
2313 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2314 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2315 	mwl_rxdma_cleanup(sc);
2316 }
2317 
2318 static struct ieee80211_node *
2319 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2320 {
2321 	struct ieee80211com *ic = vap->iv_ic;
2322 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2323 	const size_t space = sizeof(struct mwl_node);
2324 	struct mwl_node *mn;
2325 
2326 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2327 	if (mn == NULL) {
2328 		/* XXX stat+msg */
2329 		return NULL;
2330 	}
2331 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2332 	return &mn->mn_node;
2333 }
2334 
2335 static void
2336 mwl_node_cleanup(struct ieee80211_node *ni)
2337 {
2338 	struct ieee80211com *ic = ni->ni_ic;
2339         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2340 	struct mwl_node *mn = MWL_NODE(ni);
2341 
2342 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2343 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2344 
2345 	if (mn->mn_staid != 0) {
2346 		struct ieee80211vap *vap = ni->ni_vap;
2347 
2348 		if (mn->mn_hvap != NULL) {
2349 			if (vap->iv_opmode == IEEE80211_M_STA)
2350 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2351 			else
2352 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2353 		}
2354 		/*
2355 		 * NB: legacy WDS peer sta db entry is installed using
2356 		 * the associate ap's hvap; use it again to delete it.
2357 		 * XXX can vap be NULL?
2358 		 */
2359 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2360 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2361 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2362 			    ni->ni_macaddr);
2363 		delstaid(sc, mn->mn_staid);
2364 		mn->mn_staid = 0;
2365 	}
2366 	sc->sc_node_cleanup(ni);
2367 }
2368 
2369 /*
2370  * Reclaim rx dma buffers from packets sitting on the ampdu
2371  * reorder queue for a station.  We replace buffers with a
2372  * system cluster (if available).
2373  */
2374 static void
2375 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2376 {
2377 #if 0
2378 	int i, n, off;
2379 	struct mbuf *m;
2380 	void *cl;
2381 
2382 	n = rap->rxa_qframes;
2383 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2384 		m = rap->rxa_m[i];
2385 		if (m == NULL)
2386 			continue;
2387 		n--;
2388 		/* our dma buffers have a well-known free routine */
2389 		if ((m->m_flags & M_EXT) == 0 ||
2390 		    m->m_ext.ext_free != mwl_ext_free)
2391 			continue;
2392 		/*
2393 		 * Try to allocate a cluster and move the data.
2394 		 */
2395 		off = m->m_data - m->m_ext.ext_buf;
2396 		if (off + m->m_pkthdr.len > MCLBYTES) {
2397 			/* XXX no AMSDU for now */
2398 			continue;
2399 		}
2400 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2401 		    &m->m_ext.ext_paddr);
2402 		if (cl != NULL) {
2403 			/*
2404 			 * Copy the existing data to the cluster, remove
2405 			 * the rx dma buffer, and attach the cluster in
2406 			 * its place.  Note we preserve the offset to the
2407 			 * data so frames being bridged can still prepend
2408 			 * their headers without adding another mbuf.
2409 			 */
2410 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2411 			MEXTREMOVE(m);
2412 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2413 			/* setup mbuf like _MCLGET does */
2414 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2415 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2416 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2417 			m->m_data += off;
2418 		}
2419 	}
2420 #endif
2421 }
2422 
2423 /*
2424  * Callback to reclaim resources.  We first let the
2425  * net80211 layer do it's thing, then if we are still
2426  * blocked by a lack of rx dma buffers we walk the ampdu
2427  * reorder q's to reclaim buffers by copying to a system
2428  * cluster.
2429  */
2430 static void
2431 mwl_node_drain(struct ieee80211_node *ni)
2432 {
2433 	struct ieee80211com *ic = ni->ni_ic;
2434         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2435 	struct mwl_node *mn = MWL_NODE(ni);
2436 
2437 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2438 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2439 
2440 	/* NB: call up first to age out ampdu q's */
2441 	sc->sc_node_drain(ni);
2442 
2443 	/* XXX better to not check low water mark? */
2444 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2445 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2446 		uint8_t tid;
2447 		/*
2448 		 * Walk the reorder q and reclaim rx dma buffers by copying
2449 		 * the packet contents into clusters.
2450 		 */
2451 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2452 			struct ieee80211_rx_ampdu *rap;
2453 
2454 			rap = &ni->ni_rx_ampdu[tid];
2455 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2456 				continue;
2457 			if (rap->rxa_qframes)
2458 				mwl_ampdu_rxdma_reclaim(rap);
2459 		}
2460 	}
2461 }
2462 
2463 static void
2464 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2465 {
2466 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2467 #ifdef MWL_ANT_INFO_SUPPORT
2468 #if 0
2469 	/* XXX need to smooth data */
2470 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2471 #else
2472 	*noise = -95;		/* XXX */
2473 #endif
2474 #else
2475 	*noise = -95;		/* XXX */
2476 #endif
2477 }
2478 
2479 /*
2480  * Convert Hardware per-antenna rssi info to common format:
2481  * Let a1, a2, a3 represent the amplitudes per chain
2482  * Let amax represent max[a1, a2, a3]
2483  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2484  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2485  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2486  * maintain some extra precision.
2487  *
2488  * Values are stored in .5 db format capped at 127.
2489  */
2490 static void
2491 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2492 	struct ieee80211_mimo_info *mi)
2493 {
2494 #define	CVT(_dst, _src) do {						\
2495 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2496 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2497 } while (0)
2498 	static const int8_t logdbtbl[32] = {
2499 	       0,   0,  24,  38,  48,  56,  62,  68,
2500 	      72,  76,  80,  83,  86,  89,  92,  94,
2501 	      96,  98, 100, 102, 104, 106, 107, 109,
2502 	     110, 112, 113, 115, 116, 117, 118, 119
2503 	};
2504 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2505 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2506 	uint32_t rssi_max;
2507 
2508 	rssi_max = mn->mn_ai.rssi_a;
2509 	if (mn->mn_ai.rssi_b > rssi_max)
2510 		rssi_max = mn->mn_ai.rssi_b;
2511 	if (mn->mn_ai.rssi_c > rssi_max)
2512 		rssi_max = mn->mn_ai.rssi_c;
2513 
2514 	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2515 	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2516 	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2517 
2518 	mi->noise[0] = mn->mn_ai.nf_a;
2519 	mi->noise[1] = mn->mn_ai.nf_b;
2520 	mi->noise[2] = mn->mn_ai.nf_c;
2521 #undef CVT
2522 }
2523 
2524 static __inline void *
2525 mwl_getrxdma(struct mwl_softc *sc)
2526 {
2527 	struct mwl_jumbo *buf;
2528 	void *data;
2529 
2530 	/*
2531 	 * Allocate from jumbo pool.
2532 	 */
2533 	MWL_RXFREE_LOCK(sc);
2534 	buf = SLIST_FIRST(&sc->sc_rxfree);
2535 	if (buf == NULL) {
2536 		DPRINTF(sc, MWL_DEBUG_ANY,
2537 		    "%s: out of rx dma buffers\n", __func__);
2538 		sc->sc_stats.mst_rx_nodmabuf++;
2539 		data = NULL;
2540 	} else {
2541 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2542 		sc->sc_nrxfree--;
2543 		data = MWL_JUMBO_BUF2DATA(buf);
2544 	}
2545 	MWL_RXFREE_UNLOCK(sc);
2546 	return data;
2547 }
2548 
2549 static __inline void
2550 mwl_putrxdma(struct mwl_softc *sc, void *data)
2551 {
2552 	struct mwl_jumbo *buf;
2553 
2554 	/* XXX bounds check data */
2555 	MWL_RXFREE_LOCK(sc);
2556 	buf = MWL_JUMBO_DATA2BUF(data);
2557 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2558 	sc->sc_nrxfree++;
2559 	MWL_RXFREE_UNLOCK(sc);
2560 }
2561 
2562 static int
2563 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2564 {
2565 	struct mwl_rxdesc *ds;
2566 
2567 	ds = bf->bf_desc;
2568 	if (bf->bf_data == NULL) {
2569 		bf->bf_data = mwl_getrxdma(sc);
2570 		if (bf->bf_data == NULL) {
2571 			/* mark descriptor to be skipped */
2572 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2573 			/* NB: don't need PREREAD */
2574 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2575 			sc->sc_stats.mst_rxbuf_failed++;
2576 			return ENOMEM;
2577 		}
2578 	}
2579 	/*
2580 	 * NB: DMA buffer contents is known to be unmodified
2581 	 *     so there's no need to flush the data cache.
2582 	 */
2583 
2584 	/*
2585 	 * Setup descriptor.
2586 	 */
2587 	ds->QosCtrl = 0;
2588 	ds->RSSI = 0;
2589 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2590 	ds->Channel = 0;
2591 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2592 	ds->SQ2 = 0;
2593 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2594 	/* NB: don't touch pPhysNext, set once */
2595 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2596 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2597 
2598 	return 0;
2599 }
2600 
2601 static void
2602 mwl_ext_free(void *data, void *arg)
2603 {
2604 	struct mwl_softc *sc = arg;
2605 
2606 	/* XXX bounds check data */
2607 	mwl_putrxdma(sc, data);
2608 	/*
2609 	 * If we were previously blocked by a lack of rx dma buffers
2610 	 * check if we now have enough to restart rx interrupt handling.
2611 	 * NB: we know we are called at splvm which is above splnet.
2612 	 */
2613 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2614 		sc->sc_rxblocked = 0;
2615 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2616 	}
2617 }
2618 
2619 struct mwl_frame_bar {
2620 	u_int8_t	i_fc[2];
2621 	u_int8_t	i_dur[2];
2622 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2623 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2624 	/* ctl, seq, FCS */
2625 } __packed;
2626 
2627 /*
2628  * Like ieee80211_anyhdrsize, but handles BAR frames
2629  * specially so the logic below to piece the 802.11
2630  * header together works.
2631  */
2632 static __inline int
2633 mwl_anyhdrsize(const void *data)
2634 {
2635 	const struct ieee80211_frame *wh = data;
2636 
2637 	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2638 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2639 		case IEEE80211_FC0_SUBTYPE_CTS:
2640 		case IEEE80211_FC0_SUBTYPE_ACK:
2641 			return sizeof(struct ieee80211_frame_ack);
2642 		case IEEE80211_FC0_SUBTYPE_BAR:
2643 			return sizeof(struct mwl_frame_bar);
2644 		}
2645 		return sizeof(struct ieee80211_frame_min);
2646 	} else
2647 		return ieee80211_hdrsize(data);
2648 }
2649 
2650 static void
2651 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2652 {
2653 	const struct ieee80211_frame *wh;
2654 	struct ieee80211_node *ni;
2655 
2656 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2657 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2658 	if (ni != NULL) {
2659 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2660 		ieee80211_free_node(ni);
2661 	}
2662 }
2663 
2664 /*
2665  * Convert hardware signal strength to rssi.  The value
2666  * provided by the device has the noise floor added in;
2667  * we need to compensate for this but we don't have that
2668  * so we use a fixed value.
2669  *
2670  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2671  * offset is already set as part of the initial gain.  This
2672  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2673  */
2674 static __inline int
2675 cvtrssi(uint8_t ssi)
2676 {
2677 	int rssi = (int) ssi + 8;
2678 	/* XXX hack guess until we have a real noise floor */
2679 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2680 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2681 }
2682 
2683 static void
2684 mwl_rx_proc(void *arg, int npending)
2685 {
2686 #define	IEEE80211_DIR_DSTODS(wh) \
2687 	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2688 	struct mwl_softc *sc = arg;
2689 	struct ifnet *ifp = sc->sc_ifp;
2690 	struct ieee80211com *ic = ifp->if_l2com;
2691 	struct mwl_rxbuf *bf;
2692 	struct mwl_rxdesc *ds;
2693 	struct mbuf *m;
2694 	struct ieee80211_qosframe *wh;
2695 	struct ieee80211_qosframe_addr4 *wh4;
2696 	struct ieee80211_node *ni;
2697 	struct mwl_node *mn;
2698 	int off, len, hdrlen, pktlen, rssi, ntodo;
2699 	uint8_t *data, status;
2700 	void *newdata;
2701 	int16_t nf;
2702 
2703 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2704 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2705 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2706 	nf = -96;			/* XXX */
2707 	bf = sc->sc_rxnext;
2708 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2709 		if (bf == NULL)
2710 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2711 		ds = bf->bf_desc;
2712 		data = bf->bf_data;
2713 		if (data == NULL) {
2714 			/*
2715 			 * If data allocation failed previously there
2716 			 * will be no buffer; try again to re-populate it.
2717 			 * Note the firmware will not advance to the next
2718 			 * descriptor with a dma buffer so we must mimic
2719 			 * this or we'll get out of sync.
2720 			 */
2721 			DPRINTF(sc, MWL_DEBUG_ANY,
2722 			    "%s: rx buf w/o dma memory\n", __func__);
2723 			(void) mwl_rxbuf_init(sc, bf);
2724 			sc->sc_stats.mst_rx_dmabufmissing++;
2725 			break;
2726 		}
2727 		MWL_RXDESC_SYNC(sc, ds,
2728 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2729 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2730 			break;
2731 #ifdef MWL_DEBUG
2732 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2733 			mwl_printrxbuf(bf, 0);
2734 #endif
2735 		status = ds->Status;
2736 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2737 			ifp->if_ierrors++;
2738 			sc->sc_stats.mst_rx_crypto++;
2739 			/*
2740 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2741 			 *     for backwards compatibility.
2742 			 */
2743 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2744 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2745 				/*
2746 				 * MIC error, notify upper layers.
2747 				 */
2748 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2749 				    BUS_DMASYNC_POSTREAD);
2750 				mwl_handlemicerror(ic, data);
2751 				sc->sc_stats.mst_rx_tkipmic++;
2752 			}
2753 			/* XXX too painful to tap packets */
2754 			goto rx_next;
2755 		}
2756 		/*
2757 		 * Sync the data buffer.
2758 		 */
2759 		len = le16toh(ds->PktLen);
2760 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2761 		/*
2762 		 * The 802.11 header is provided all or in part at the front;
2763 		 * use it to calculate the true size of the header that we'll
2764 		 * construct below.  We use this to figure out where to copy
2765 		 * payload prior to constructing the header.
2766 		 */
2767 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2768 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2769 
2770 		/* calculate rssi early so we can re-use for each aggregate */
2771 		rssi = cvtrssi(ds->RSSI);
2772 
2773 		pktlen = hdrlen + (len - off);
2774 		/*
2775 		 * NB: we know our frame is at least as large as
2776 		 * IEEE80211_MIN_LEN because there is a 4-address
2777 		 * frame at the front.  Hence there's no need to
2778 		 * vet the packet length.  If the frame in fact
2779 		 * is too small it should be discarded at the
2780 		 * net80211 layer.
2781 		 */
2782 
2783 		/*
2784 		 * Attach dma buffer to an mbuf.  We tried
2785 		 * doing this based on the packet size (i.e.
2786 		 * copying small packets) but it turns out to
2787 		 * be a net loss.  The tradeoff might be system
2788 		 * dependent (cache architecture is important).
2789 		 */
2790 		MGETHDR(m, M_DONTWAIT, MT_DATA);
2791 		if (m == NULL) {
2792 			DPRINTF(sc, MWL_DEBUG_ANY,
2793 			    "%s: no rx mbuf\n", __func__);
2794 			sc->sc_stats.mst_rx_nombuf++;
2795 			goto rx_next;
2796 		}
2797 		/*
2798 		 * Acquire the replacement dma buffer before
2799 		 * processing the frame.  If we're out of dma
2800 		 * buffers we disable rx interrupts and wait
2801 		 * for the free pool to reach mlw_rxdmalow buffers
2802 		 * before starting to do work again.  If the firmware
2803 		 * runs out of descriptors then it will toss frames
2804 		 * which is better than our doing it as that can
2805 		 * starve our processing.  It is also important that
2806 		 * we always process rx'd frames in case they are
2807 		 * A-MPDU as otherwise the host's view of the BA
2808 		 * window may get out of sync with the firmware.
2809 		 */
2810 		newdata = mwl_getrxdma(sc);
2811 		if (newdata == NULL) {
2812 			/* NB: stat+msg in mwl_getrxdma */
2813 			m_free(m);
2814 			/* disable RX interrupt and mark state */
2815 			mwl_hal_intrset(sc->sc_mh,
2816 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2817 			sc->sc_rxblocked = 1;
2818 			ieee80211_drain(ic);
2819 			/* XXX check rxblocked and immediately start again? */
2820 			goto rx_stop;
2821 		}
2822 		bf->bf_data = newdata;
2823 		/*
2824 		 * Attach the dma buffer to the mbuf;
2825 		 * mwl_rxbuf_init will re-setup the rx
2826 		 * descriptor using the replacement dma
2827 		 * buffer we just installed above.
2828 		 */
2829 		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2830 		    data, sc, 0, EXT_NET_DRV);
2831 		m->m_data += off - hdrlen;
2832 		m->m_pkthdr.len = m->m_len = pktlen;
2833 		m->m_pkthdr.rcvif = ifp;
2834 		/* NB: dma buffer assumed read-only */
2835 
2836 		/*
2837 		 * Piece 802.11 header together.
2838 		 */
2839 		wh = mtod(m, struct ieee80211_qosframe *);
2840 		/* NB: don't need to do this sometimes but ... */
2841 		/* XXX special case so we can memcpy after m_devget? */
2842 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2843 		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2844 			if (IEEE80211_DIR_DSTODS(wh)) {
2845 				wh4 = mtod(m,
2846 				    struct ieee80211_qosframe_addr4*);
2847 				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2848 			} else {
2849 				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2850 			}
2851 		}
2852 		/*
2853 		 * The f/w strips WEP header but doesn't clear
2854 		 * the WEP bit; mark the packet with M_WEP so
2855 		 * net80211 will treat the data as decrypted.
2856 		 * While here also clear the PWR_MGT bit since
2857 		 * power save is handled by the firmware and
2858 		 * passing this up will potentially cause the
2859 		 * upper layer to put a station in power save
2860 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2861 		 */
2862 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2863 			m->m_flags |= M_WEP;
2864 #ifdef MWL_HOST_PS_SUPPORT
2865 		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2866 #else
2867 		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2868 #endif
2869 
2870 		if (ieee80211_radiotap_active(ic)) {
2871 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2872 
2873 			tap->wr_flags = 0;
2874 			tap->wr_rate = ds->Rate;
2875 			tap->wr_antsignal = rssi + nf;
2876 			tap->wr_antnoise = nf;
2877 		}
2878 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2879 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2880 			    len, ds->Rate, rssi);
2881 		}
2882 		ifp->if_ipackets++;
2883 
2884 		/* dispatch */
2885 		ni = ieee80211_find_rxnode(ic,
2886 		    (const struct ieee80211_frame_min *) wh);
2887 		if (ni != NULL) {
2888 			mn = MWL_NODE(ni);
2889 #ifdef MWL_ANT_INFO_SUPPORT
2890 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2891 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2892 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2893 			mn->mn_ai.rsvd1 = rssi;
2894 #endif
2895 			/* tag AMPDU aggregates for reorder processing */
2896 			if (ni->ni_flags & IEEE80211_NODE_HT)
2897 				m->m_flags |= M_AMPDU;
2898 			(void) ieee80211_input(ni, m, rssi, nf);
2899 			ieee80211_free_node(ni);
2900 		} else
2901 			(void) ieee80211_input_all(ic, m, rssi, nf);
2902 rx_next:
2903 		/* NB: ignore ENOMEM so we process more descriptors */
2904 		(void) mwl_rxbuf_init(sc, bf);
2905 		bf = STAILQ_NEXT(bf, bf_list);
2906 	}
2907 rx_stop:
2908 	sc->sc_rxnext = bf;
2909 
2910 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2911 	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2912 		/* NB: kick fw; the tx thread may have been preempted */
2913 		mwl_hal_txstart(sc->sc_mh, 0);
2914 		mwl_start(ifp);
2915 	}
2916 #undef IEEE80211_DIR_DSTODS
2917 }
2918 
2919 static void
2920 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2921 {
2922 	struct mwl_txbuf *bf, *bn;
2923 	struct mwl_txdesc *ds;
2924 
2925 	MWL_TXQ_LOCK_INIT(sc, txq);
2926 	txq->qnum = qnum;
2927 	txq->txpri = 0;	/* XXX */
2928 #if 0
2929 	/* NB: q setup by mwl_txdma_setup XXX */
2930 	STAILQ_INIT(&txq->free);
2931 #endif
2932 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2933 		bf->bf_txq = txq;
2934 
2935 		ds = bf->bf_desc;
2936 		bn = STAILQ_NEXT(bf, bf_list);
2937 		if (bn == NULL)
2938 			bn = STAILQ_FIRST(&txq->free);
2939 		ds->pPhysNext = htole32(bn->bf_daddr);
2940 	}
2941 	STAILQ_INIT(&txq->active);
2942 }
2943 
2944 /*
2945  * Setup a hardware data transmit queue for the specified
2946  * access control.  We record the mapping from ac's
2947  * to h/w queues for use by mwl_tx_start.
2948  */
2949 static int
2950 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2951 {
2952 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2953 	struct mwl_txq *txq;
2954 
2955 	if (ac >= N(sc->sc_ac2q)) {
2956 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2957 			ac, N(sc->sc_ac2q));
2958 		return 0;
2959 	}
2960 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2961 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2962 			mvtype, MWL_NUM_TX_QUEUES);
2963 		return 0;
2964 	}
2965 	txq = &sc->sc_txq[mvtype];
2966 	mwl_txq_init(sc, txq, mvtype);
2967 	sc->sc_ac2q[ac] = txq;
2968 	return 1;
2969 #undef N
2970 }
2971 
2972 /*
2973  * Update WME parameters for a transmit queue.
2974  */
2975 static int
2976 mwl_txq_update(struct mwl_softc *sc, int ac)
2977 {
2978 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2979 	struct ifnet *ifp = sc->sc_ifp;
2980 	struct ieee80211com *ic = ifp->if_l2com;
2981 	struct mwl_txq *txq = sc->sc_ac2q[ac];
2982 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2983 	struct mwl_hal *mh = sc->sc_mh;
2984 	int aifs, cwmin, cwmax, txoplim;
2985 
2986 	aifs = wmep->wmep_aifsn;
2987 	/* XXX in sta mode need to pass log values for cwmin/max */
2988 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2989 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2990 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2991 
2992 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2993 		device_printf(sc->sc_dev, "unable to update hardware queue "
2994 			"parameters for %s traffic!\n",
2995 			ieee80211_wme_acnames[ac]);
2996 		return 0;
2997 	}
2998 	return 1;
2999 #undef MWL_EXPONENT_TO_VALUE
3000 }
3001 
3002 /*
3003  * Callback from the 802.11 layer to update WME parameters.
3004  */
3005 static int
3006 mwl_wme_update(struct ieee80211com *ic)
3007 {
3008 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3009 
3010 	return !mwl_txq_update(sc, WME_AC_BE) ||
3011 	    !mwl_txq_update(sc, WME_AC_BK) ||
3012 	    !mwl_txq_update(sc, WME_AC_VI) ||
3013 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3014 }
3015 
3016 /*
3017  * Reclaim resources for a setup queue.
3018  */
3019 static void
3020 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3021 {
3022 	/* XXX hal work? */
3023 	MWL_TXQ_LOCK_DESTROY(txq);
3024 }
3025 
3026 /*
3027  * Reclaim all tx queue resources.
3028  */
3029 static void
3030 mwl_tx_cleanup(struct mwl_softc *sc)
3031 {
3032 	int i;
3033 
3034 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3035 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3036 }
3037 
3038 static int
3039 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3040 {
3041 	struct mbuf *m;
3042 	int error;
3043 
3044 	/*
3045 	 * Load the DMA map so any coalescing is done.  This
3046 	 * also calculates the number of descriptors we need.
3047 	 */
3048 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3049 				     bf->bf_segs, &bf->bf_nseg,
3050 				     BUS_DMA_NOWAIT);
3051 	if (error == EFBIG) {
3052 		/* XXX packet requires too many descriptors */
3053 		bf->bf_nseg = MWL_TXDESC+1;
3054 	} else if (error != 0) {
3055 		sc->sc_stats.mst_tx_busdma++;
3056 		m_freem(m0);
3057 		return error;
3058 	}
3059 	/*
3060 	 * Discard null packets and check for packets that
3061 	 * require too many TX descriptors.  We try to convert
3062 	 * the latter to a cluster.
3063 	 */
3064 	if (error == EFBIG) {		/* too many desc's, linearize */
3065 		sc->sc_stats.mst_tx_linear++;
3066 #if MWL_TXDESC > 1
3067 		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3068 #else
3069 		m = m_defrag(m0, M_DONTWAIT);
3070 #endif
3071 		if (m == NULL) {
3072 			m_freem(m0);
3073 			sc->sc_stats.mst_tx_nombuf++;
3074 			return ENOMEM;
3075 		}
3076 		m0 = m;
3077 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3078 					     bf->bf_segs, &bf->bf_nseg,
3079 					     BUS_DMA_NOWAIT);
3080 		if (error != 0) {
3081 			sc->sc_stats.mst_tx_busdma++;
3082 			m_freem(m0);
3083 			return error;
3084 		}
3085 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3086 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3087 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3088 		sc->sc_stats.mst_tx_nodata++;
3089 		m_freem(m0);
3090 		return EIO;
3091 	}
3092 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3093 		__func__, m0, m0->m_pkthdr.len);
3094 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3095 	bf->bf_m = m0;
3096 
3097 	return 0;
3098 }
3099 
3100 static __inline int
3101 mwl_cvtlegacyrate(int rate)
3102 {
3103 	switch (rate) {
3104 	case 2:	 return 0;
3105 	case 4:	 return 1;
3106 	case 11: return 2;
3107 	case 22: return 3;
3108 	case 44: return 4;
3109 	case 12: return 5;
3110 	case 18: return 6;
3111 	case 24: return 7;
3112 	case 36: return 8;
3113 	case 48: return 9;
3114 	case 72: return 10;
3115 	case 96: return 11;
3116 	case 108:return 12;
3117 	}
3118 	return 0;
3119 }
3120 
3121 /*
3122  * Calculate fixed tx rate information per client state;
3123  * this value is suitable for writing to the Format field
3124  * of a tx descriptor.
3125  */
3126 static uint16_t
3127 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3128 {
3129 	uint16_t fmt;
3130 
3131 	fmt = SM(3, EAGLE_TXD_ANTENNA)
3132 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3133 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3134 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3135 		fmt |= EAGLE_TXD_FORMAT_HT
3136 		    /* NB: 0x80 implicitly stripped from ucastrate */
3137 		    | SM(rate, EAGLE_TXD_RATE);
3138 		/* XXX short/long GI may be wrong; re-check */
3139 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3140 			fmt |= EAGLE_TXD_CHW_40
3141 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3142 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3143 		} else {
3144 			fmt |= EAGLE_TXD_CHW_20
3145 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3146 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3147 		}
3148 	} else {			/* legacy rate */
3149 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3150 		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3151 		    | EAGLE_TXD_CHW_20
3152 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3153 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3154 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3155 	}
3156 	return fmt;
3157 }
3158 
3159 static int
3160 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3161     struct mbuf *m0)
3162 {
3163 #define	IEEE80211_DIR_DSTODS(wh) \
3164 	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3165 	struct ifnet *ifp = sc->sc_ifp;
3166 	struct ieee80211com *ic = ifp->if_l2com;
3167 	struct ieee80211vap *vap = ni->ni_vap;
3168 	int error, iswep, ismcast;
3169 	int hdrlen, copyhdrlen, pktlen;
3170 	struct mwl_txdesc *ds;
3171 	struct mwl_txq *txq;
3172 	struct ieee80211_frame *wh;
3173 	struct mwltxrec *tr;
3174 	struct mwl_node *mn;
3175 	uint16_t qos;
3176 #if MWL_TXDESC > 1
3177 	int i;
3178 #endif
3179 
3180 	wh = mtod(m0, struct ieee80211_frame *);
3181 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3182 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3183 	hdrlen = ieee80211_anyhdrsize(wh);
3184 	copyhdrlen = hdrlen;
3185 	pktlen = m0->m_pkthdr.len;
3186 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3187 		if (IEEE80211_DIR_DSTODS(wh)) {
3188 			qos = *(uint16_t *)
3189 			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3190 			copyhdrlen -= sizeof(qos);
3191 		} else
3192 			qos = *(uint16_t *)
3193 			    (((struct ieee80211_qosframe *) wh)->i_qos);
3194 	} else
3195 		qos = 0;
3196 
3197 	if (iswep) {
3198 		const struct ieee80211_cipher *cip;
3199 		struct ieee80211_key *k;
3200 
3201 		/*
3202 		 * Construct the 802.11 header+trailer for an encrypted
3203 		 * frame. The only reason this can fail is because of an
3204 		 * unknown or unsupported cipher/key type.
3205 		 *
3206 		 * NB: we do this even though the firmware will ignore
3207 		 *     what we've done for WEP and TKIP as we need the
3208 		 *     ExtIV filled in for CCMP and this also adjusts
3209 		 *     the headers which simplifies our work below.
3210 		 */
3211 		k = ieee80211_crypto_encap(ni, m0);
3212 		if (k == NULL) {
3213 			/*
3214 			 * This can happen when the key is yanked after the
3215 			 * frame was queued.  Just discard the frame; the
3216 			 * 802.11 layer counts failures and provides
3217 			 * debugging/diagnostics.
3218 			 */
3219 			m_freem(m0);
3220 			return EIO;
3221 		}
3222 		/*
3223 		 * Adjust the packet length for the crypto additions
3224 		 * done during encap and any other bits that the f/w
3225 		 * will add later on.
3226 		 */
3227 		cip = k->wk_cipher;
3228 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3229 
3230 		/* packet header may have moved, reset our local pointer */
3231 		wh = mtod(m0, struct ieee80211_frame *);
3232 	}
3233 
3234 	if (ieee80211_radiotap_active_vap(vap)) {
3235 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3236 		if (iswep)
3237 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3238 #if 0
3239 		sc->sc_tx_th.wt_rate = ds->DataRate;
3240 #endif
3241 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3242 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3243 
3244 		ieee80211_radiotap_tx(vap, m0);
3245 	}
3246 	/*
3247 	 * Copy up/down the 802.11 header; the firmware requires
3248 	 * we present a 2-byte payload length followed by a
3249 	 * 4-address header (w/o QoS), followed (optionally) by
3250 	 * any WEP/ExtIV header (but only filled in for CCMP).
3251 	 * We are assured the mbuf has sufficient headroom to
3252 	 * prepend in-place by the setup of ic_headroom in
3253 	 * mwl_attach.
3254 	 */
3255 	if (hdrlen < sizeof(struct mwltxrec)) {
3256 		const int space = sizeof(struct mwltxrec) - hdrlen;
3257 		if (M_LEADINGSPACE(m0) < space) {
3258 			/* NB: should never happen */
3259 			device_printf(sc->sc_dev,
3260 			    "not enough headroom, need %d found %zd, "
3261 			    "m_flags 0x%x m_len %d\n",
3262 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3263 			ieee80211_dump_pkt(ic,
3264 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3265 			m_freem(m0);
3266 			sc->sc_stats.mst_tx_noheadroom++;
3267 			return EIO;
3268 		}
3269 		M_PREPEND(m0, space, M_NOWAIT);
3270 	}
3271 	tr = mtod(m0, struct mwltxrec *);
3272 	if (wh != (struct ieee80211_frame *) &tr->wh)
3273 		ovbcopy(wh, &tr->wh, hdrlen);
3274 	/*
3275 	 * Note: the "firmware length" is actually the length
3276 	 * of the fully formed "802.11 payload".  That is, it's
3277 	 * everything except for the 802.11 header.  In particular
3278 	 * this includes all crypto material including the MIC!
3279 	 */
3280 	tr->fwlen = htole16(pktlen - hdrlen);
3281 
3282 	/*
3283 	 * Load the DMA map so any coalescing is done.  This
3284 	 * also calculates the number of descriptors we need.
3285 	 */
3286 	error = mwl_tx_dmasetup(sc, bf, m0);
3287 	if (error != 0) {
3288 		/* NB: stat collected in mwl_tx_dmasetup */
3289 		DPRINTF(sc, MWL_DEBUG_XMIT,
3290 		    "%s: unable to setup dma\n", __func__);
3291 		return error;
3292 	}
3293 	bf->bf_node = ni;			/* NB: held reference */
3294 	m0 = bf->bf_m;				/* NB: may have changed */
3295 	tr = mtod(m0, struct mwltxrec *);
3296 	wh = (struct ieee80211_frame *)&tr->wh;
3297 
3298 	/*
3299 	 * Formulate tx descriptor.
3300 	 */
3301 	ds = bf->bf_desc;
3302 	txq = bf->bf_txq;
3303 
3304 	ds->QosCtrl = qos;			/* NB: already little-endian */
3305 #if MWL_TXDESC == 1
3306 	/*
3307 	 * NB: multiframes should be zero because the descriptors
3308 	 *     are initialized to zero.  This should handle the case
3309 	 *     where the driver is built with MWL_TXDESC=1 but we are
3310 	 *     using firmware with multi-segment support.
3311 	 */
3312 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3313 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3314 #else
3315 	ds->multiframes = htole32(bf->bf_nseg);
3316 	ds->PktLen = htole16(m0->m_pkthdr.len);
3317 	for (i = 0; i < bf->bf_nseg; i++) {
3318 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3319 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3320 	}
3321 #endif
3322 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3323 	ds->Format = 0;
3324 	ds->pad = 0;
3325 	ds->ack_wcb_addr = 0;
3326 
3327 	mn = MWL_NODE(ni);
3328 	/*
3329 	 * Select transmit rate.
3330 	 */
3331 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3332 	case IEEE80211_FC0_TYPE_MGT:
3333 		sc->sc_stats.mst_tx_mgmt++;
3334 		/* fall thru... */
3335 	case IEEE80211_FC0_TYPE_CTL:
3336 		/* NB: assign to BE q to avoid bursting */
3337 		ds->TxPriority = MWL_WME_AC_BE;
3338 		break;
3339 	case IEEE80211_FC0_TYPE_DATA:
3340 		if (!ismcast) {
3341 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3342 			/*
3343 			 * EAPOL frames get forced to a fixed rate and w/o
3344 			 * aggregation; otherwise check for any fixed rate
3345 			 * for the client (may depend on association state).
3346 			 */
3347 			if (m0->m_flags & M_EAPOL) {
3348 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3349 				ds->Format = mvp->mv_eapolformat;
3350 				ds->pad = htole16(
3351 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3352 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3353 				/* XXX pre-calculate per node */
3354 				ds->Format = htole16(
3355 				    mwl_calcformat(tp->ucastrate, ni));
3356 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3357 			}
3358 			/* NB: EAPOL frames will never have qos set */
3359 			if (qos == 0)
3360 				ds->TxPriority = txq->qnum;
3361 #if MWL_MAXBA > 3
3362 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3363 				ds->TxPriority = mn->mn_ba[3].txq;
3364 #endif
3365 #if MWL_MAXBA > 2
3366 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3367 				ds->TxPriority = mn->mn_ba[2].txq;
3368 #endif
3369 #if MWL_MAXBA > 1
3370 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3371 				ds->TxPriority = mn->mn_ba[1].txq;
3372 #endif
3373 #if MWL_MAXBA > 0
3374 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3375 				ds->TxPriority = mn->mn_ba[0].txq;
3376 #endif
3377 			else
3378 				ds->TxPriority = txq->qnum;
3379 		} else
3380 			ds->TxPriority = txq->qnum;
3381 		break;
3382 	default:
3383 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3384 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3385 		sc->sc_stats.mst_tx_badframetype++;
3386 		m_freem(m0);
3387 		return EIO;
3388 	}
3389 
3390 	if (IFF_DUMPPKTS_XMIT(sc))
3391 		ieee80211_dump_pkt(ic,
3392 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3393 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3394 
3395 	MWL_TXQ_LOCK(txq);
3396 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3397 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3398 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3399 
3400 	ifp->if_opackets++;
3401 	ifp->if_timer = 5;
3402 	MWL_TXQ_UNLOCK(txq);
3403 
3404 	return 0;
3405 #undef	IEEE80211_DIR_DSTODS
3406 }
3407 
3408 static __inline int
3409 mwl_cvtlegacyrix(int rix)
3410 {
3411 #define	N(x)	(sizeof(x)/sizeof(x[0]))
3412 	static const int ieeerates[] =
3413 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3414 	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3415 #undef N
3416 }
3417 
3418 /*
3419  * Process completed xmit descriptors from the specified queue.
3420  */
3421 static int
3422 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3423 {
3424 #define	EAGLE_TXD_STATUS_MCAST \
3425 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3426 	struct ifnet *ifp = sc->sc_ifp;
3427 	struct ieee80211com *ic = ifp->if_l2com;
3428 	struct mwl_txbuf *bf;
3429 	struct mwl_txdesc *ds;
3430 	struct ieee80211_node *ni;
3431 	struct mwl_node *an;
3432 	int nreaped;
3433 	uint32_t status;
3434 
3435 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3436 	for (nreaped = 0;; nreaped++) {
3437 		MWL_TXQ_LOCK(txq);
3438 		bf = STAILQ_FIRST(&txq->active);
3439 		if (bf == NULL) {
3440 			MWL_TXQ_UNLOCK(txq);
3441 			break;
3442 		}
3443 		ds = bf->bf_desc;
3444 		MWL_TXDESC_SYNC(txq, ds,
3445 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3446 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3447 			MWL_TXQ_UNLOCK(txq);
3448 			break;
3449 		}
3450 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3451 		MWL_TXQ_UNLOCK(txq);
3452 
3453 #ifdef MWL_DEBUG
3454 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3455 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3456 #endif
3457 		ni = bf->bf_node;
3458 		if (ni != NULL) {
3459 			an = MWL_NODE(ni);
3460 			status = le32toh(ds->Status);
3461 			if (status & EAGLE_TXD_STATUS_OK) {
3462 				uint16_t Format = le16toh(ds->Format);
3463 				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3464 
3465 				sc->sc_stats.mst_ant_tx[txant]++;
3466 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3467 					sc->sc_stats.mst_tx_retries++;
3468 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3469 					sc->sc_stats.mst_tx_mretries++;
3470 				if (txq->qnum >= MWL_WME_AC_VO)
3471 					ic->ic_wme.wme_hipri_traffic++;
3472 				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3473 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3474 					ni->ni_txrate = mwl_cvtlegacyrix(
3475 					    ni->ni_txrate);
3476 				} else
3477 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3478 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3479 			} else {
3480 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3481 					sc->sc_stats.mst_tx_linkerror++;
3482 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3483 					sc->sc_stats.mst_tx_xretries++;
3484 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3485 					sc->sc_stats.mst_tx_aging++;
3486 				if (bf->bf_m->m_flags & M_FF)
3487 					sc->sc_stats.mst_ff_txerr++;
3488 			}
3489 			/*
3490 			 * Do any tx complete callback.  Note this must
3491 			 * be done before releasing the node reference.
3492 			 * XXX no way to figure out if frame was ACK'd
3493 			 */
3494 			if (bf->bf_m->m_flags & M_TXCB) {
3495 				/* XXX strip fw len in case header inspected */
3496 				m_adj(bf->bf_m, sizeof(uint16_t));
3497 				ieee80211_process_callback(ni, bf->bf_m,
3498 					(status & EAGLE_TXD_STATUS_OK) == 0);
3499 			}
3500 			/*
3501 			 * Reclaim reference to node.
3502 			 *
3503 			 * NB: the node may be reclaimed here if, for example
3504 			 *     this is a DEAUTH message that was sent and the
3505 			 *     node was timed out due to inactivity.
3506 			 */
3507 			ieee80211_free_node(ni);
3508 		}
3509 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3510 
3511 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3512 		    BUS_DMASYNC_POSTWRITE);
3513 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3514 		m_freem(bf->bf_m);
3515 
3516 		mwl_puttxbuf_tail(txq, bf);
3517 	}
3518 	return nreaped;
3519 #undef EAGLE_TXD_STATUS_MCAST
3520 }
3521 
3522 /*
3523  * Deferred processing of transmit interrupt; special-cased
3524  * for four hardware queues, 0-3.
3525  */
3526 static void
3527 mwl_tx_proc(void *arg, int npending)
3528 {
3529 	struct mwl_softc *sc = arg;
3530 	struct ifnet *ifp = sc->sc_ifp;
3531 	int nreaped;
3532 
3533 	/*
3534 	 * Process each active queue.
3535 	 */
3536 	nreaped = 0;
3537 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3538 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3539 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3540 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3541 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3542 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3543 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3544 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3545 
3546 	if (nreaped != 0) {
3547 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3548 		ifp->if_timer = 0;
3549 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3550 			/* NB: kick fw; the tx thread may have been preempted */
3551 			mwl_hal_txstart(sc->sc_mh, 0);
3552 			mwl_start(ifp);
3553 		}
3554 	}
3555 }
3556 
3557 static void
3558 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3559 {
3560 	struct ieee80211_node *ni;
3561 	struct mwl_txbuf *bf;
3562 	u_int ix;
3563 
3564 	/*
3565 	 * NB: this assumes output has been stopped and
3566 	 *     we do not need to block mwl_tx_tasklet
3567 	 */
3568 	for (ix = 0;; ix++) {
3569 		MWL_TXQ_LOCK(txq);
3570 		bf = STAILQ_FIRST(&txq->active);
3571 		if (bf == NULL) {
3572 			MWL_TXQ_UNLOCK(txq);
3573 			break;
3574 		}
3575 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3576 		MWL_TXQ_UNLOCK(txq);
3577 #ifdef MWL_DEBUG
3578 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3579 			struct ifnet *ifp = sc->sc_ifp;
3580 			struct ieee80211com *ic = ifp->if_l2com;
3581 			const struct mwltxrec *tr =
3582 			    mtod(bf->bf_m, const struct mwltxrec *);
3583 			mwl_printtxbuf(bf, txq->qnum, ix);
3584 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3585 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3586 		}
3587 #endif /* MWL_DEBUG */
3588 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3589 		ni = bf->bf_node;
3590 		if (ni != NULL) {
3591 			/*
3592 			 * Reclaim node reference.
3593 			 */
3594 			ieee80211_free_node(ni);
3595 		}
3596 		m_freem(bf->bf_m);
3597 
3598 		mwl_puttxbuf_tail(txq, bf);
3599 	}
3600 }
3601 
3602 /*
3603  * Drain the transmit queues and reclaim resources.
3604  */
3605 static void
3606 mwl_draintxq(struct mwl_softc *sc)
3607 {
3608 	struct ifnet *ifp = sc->sc_ifp;
3609 	int i;
3610 
3611 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3612 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3613 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3614 	ifp->if_timer = 0;
3615 }
3616 
3617 #ifdef MWL_DIAGAPI
3618 /*
3619  * Reset the transmit queues to a pristine state after a fw download.
3620  */
3621 static void
3622 mwl_resettxq(struct mwl_softc *sc)
3623 {
3624 	int i;
3625 
3626 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3627 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3628 }
3629 #endif /* MWL_DIAGAPI */
3630 
3631 /*
3632  * Clear the transmit queues of any frames submitted for the
3633  * specified vap.  This is done when the vap is deleted so we
3634  * don't potentially reference the vap after it is gone.
3635  * Note we cannot remove the frames; we only reclaim the node
3636  * reference.
3637  */
3638 static void
3639 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3640 {
3641 	struct mwl_txq *txq;
3642 	struct mwl_txbuf *bf;
3643 	int i;
3644 
3645 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3646 		txq = &sc->sc_txq[i];
3647 		MWL_TXQ_LOCK(txq);
3648 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3649 			struct ieee80211_node *ni = bf->bf_node;
3650 			if (ni != NULL && ni->ni_vap == vap) {
3651 				bf->bf_node = NULL;
3652 				ieee80211_free_node(ni);
3653 			}
3654 		}
3655 		MWL_TXQ_UNLOCK(txq);
3656 	}
3657 }
3658 
3659 static void
3660 mwl_recv_action(struct ieee80211_node *ni, const uint8_t *frm, const uint8_t *efrm)
3661 {
3662 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3663 	const struct ieee80211_action *ia;
3664 
3665 	ia = (const struct ieee80211_action *) frm;
3666 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3667 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3668 		const struct ieee80211_action_ht_mimopowersave *mps =
3669 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3670 
3671 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3672 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3673 		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3674 	} else
3675 		sc->sc_recv_action(ni, frm, efrm);
3676 }
3677 
3678 static int
3679 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3680 	int dialogtoken, int baparamset, int batimeout)
3681 {
3682 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3683 	struct ieee80211vap *vap = ni->ni_vap;
3684 	struct mwl_node *mn = MWL_NODE(ni);
3685 	struct mwl_bastate *bas;
3686 
3687 	bas = tap->txa_private;
3688 	if (bas == NULL) {
3689 		const MWL_HAL_BASTREAM *sp;
3690 		/*
3691 		 * Check for a free BA stream slot.
3692 		 */
3693 #if MWL_MAXBA > 3
3694 		if (mn->mn_ba[3].bastream == NULL)
3695 			bas = &mn->mn_ba[3];
3696 		else
3697 #endif
3698 #if MWL_MAXBA > 2
3699 		if (mn->mn_ba[2].bastream == NULL)
3700 			bas = &mn->mn_ba[2];
3701 		else
3702 #endif
3703 #if MWL_MAXBA > 1
3704 		if (mn->mn_ba[1].bastream == NULL)
3705 			bas = &mn->mn_ba[1];
3706 		else
3707 #endif
3708 #if MWL_MAXBA > 0
3709 		if (mn->mn_ba[0].bastream == NULL)
3710 			bas = &mn->mn_ba[0];
3711 		else
3712 #endif
3713 		{
3714 			/* sta already has max BA streams */
3715 			/* XXX assign BA stream to highest priority tid */
3716 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3717 			    "%s: already has max bastreams\n", __func__);
3718 			sc->sc_stats.mst_ampdu_reject++;
3719 			return 0;
3720 		}
3721 		/* NB: no held reference to ni */
3722 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3723 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3724 		    ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3725 		    ni, tap);
3726 		if (sp == NULL) {
3727 			/*
3728 			 * No available stream, return 0 so no
3729 			 * a-mpdu aggregation will be done.
3730 			 */
3731 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3732 			    "%s: no bastream available\n", __func__);
3733 			sc->sc_stats.mst_ampdu_nostream++;
3734 			return 0;
3735 		}
3736 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3737 		    __func__, sp);
3738 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3739 		bas->bastream = sp;
3740 		tap->txa_private = bas;
3741 	}
3742 	/* fetch current seq# from the firmware; if available */
3743 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3744 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3745 	    &tap->txa_start) != 0)
3746 		tap->txa_start = 0;
3747 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3748 }
3749 
3750 static int
3751 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3752 	int code, int baparamset, int batimeout)
3753 {
3754 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3755 	struct mwl_bastate *bas;
3756 
3757 	bas = tap->txa_private;
3758 	if (bas == NULL) {
3759 		/* XXX should not happen */
3760 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3761 		    "%s: no BA stream allocated, AC %d\n",
3762 		    __func__, tap->txa_ac);
3763 		sc->sc_stats.mst_addba_nostream++;
3764 		return 0;
3765 	}
3766 	if (code == IEEE80211_STATUS_SUCCESS) {
3767 		struct ieee80211vap *vap = ni->ni_vap;
3768 		int bufsiz, error;
3769 
3770 		/*
3771 		 * Tell the firmware to setup the BA stream;
3772 		 * we know resources are available because we
3773 		 * pre-allocated one before forming the request.
3774 		 */
3775 		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3776 		if (bufsiz == 0)
3777 			bufsiz = IEEE80211_AGGR_BAWMAX;
3778 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3779 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3780 		if (error != 0) {
3781 			/*
3782 			 * Setup failed, return immediately so no a-mpdu
3783 			 * aggregation will be done.
3784 			 */
3785 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3786 			mwl_bastream_free(bas);
3787 			tap->txa_private = NULL;
3788 
3789 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3790 			    "%s: create failed, error %d, bufsiz %d AC %d "
3791 			    "htparam 0x%x\n", __func__, error, bufsiz,
3792 			    tap->txa_ac, ni->ni_htparam);
3793 			sc->sc_stats.mst_bacreate_failed++;
3794 			return 0;
3795 		}
3796 		/* NB: cache txq to avoid ptr indirect */
3797 		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3798 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3799 		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3800 		    "htparam 0x%x\n", __func__, bas->bastream,
3801 		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3802 	} else {
3803 		/*
3804 		 * Other side NAK'd us; return the resources.
3805 		 */
3806 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3807 		    "%s: request failed with code %d, destroy bastream %p\n",
3808 		    __func__, code, bas->bastream);
3809 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3810 		mwl_bastream_free(bas);
3811 		tap->txa_private = NULL;
3812 	}
3813 	/* NB: firmware sends BAR so we don't need to */
3814 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3815 }
3816 
3817 static void
3818 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3819 {
3820 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3821 	struct mwl_bastate *bas;
3822 
3823 	bas = tap->txa_private;
3824 	if (bas != NULL) {
3825 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3826 		    __func__, bas->bastream);
3827 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3828 		mwl_bastream_free(bas);
3829 		tap->txa_private = NULL;
3830 	}
3831 	sc->sc_addba_stop(ni, tap);
3832 }
3833 
3834 /*
3835  * Setup the rx data structures.  This should only be
3836  * done once or we may get out of sync with the firmware.
3837  */
3838 static int
3839 mwl_startrecv(struct mwl_softc *sc)
3840 {
3841 	if (!sc->sc_recvsetup) {
3842 		struct mwl_rxbuf *bf, *prev;
3843 		struct mwl_rxdesc *ds;
3844 
3845 		prev = NULL;
3846 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3847 			int error = mwl_rxbuf_init(sc, bf);
3848 			if (error != 0) {
3849 				DPRINTF(sc, MWL_DEBUG_RECV,
3850 					"%s: mwl_rxbuf_init failed %d\n",
3851 					__func__, error);
3852 				return error;
3853 			}
3854 			if (prev != NULL) {
3855 				ds = prev->bf_desc;
3856 				ds->pPhysNext = htole32(bf->bf_daddr);
3857 			}
3858 			prev = bf;
3859 		}
3860 		if (prev != NULL) {
3861 			ds = prev->bf_desc;
3862 			ds->pPhysNext =
3863 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3864 		}
3865 		sc->sc_recvsetup = 1;
3866 	}
3867 	mwl_mode_init(sc);		/* set filters, etc. */
3868 	return 0;
3869 }
3870 
3871 static MWL_HAL_APMODE
3872 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3873 {
3874 	MWL_HAL_APMODE mode;
3875 
3876 	if (IEEE80211_IS_CHAN_HT(chan)) {
3877 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3878 			mode = AP_MODE_N_ONLY;
3879 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3880 			mode = AP_MODE_AandN;
3881 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3882 			mode = AP_MODE_GandN;
3883 		else
3884 			mode = AP_MODE_BandGandN;
3885 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3886 		if (vap->iv_flags & IEEE80211_F_PUREG)
3887 			mode = AP_MODE_G_ONLY;
3888 		else
3889 			mode = AP_MODE_MIXED;
3890 	} else if (IEEE80211_IS_CHAN_B(chan))
3891 		mode = AP_MODE_B_ONLY;
3892 	else if (IEEE80211_IS_CHAN_A(chan))
3893 		mode = AP_MODE_A_ONLY;
3894 	else
3895 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3896 	return mode;
3897 }
3898 
3899 static int
3900 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3901 {
3902 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3903 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3904 }
3905 
3906 /*
3907  * Set/change channels.
3908  */
3909 static int
3910 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3911 {
3912 	struct mwl_hal *mh = sc->sc_mh;
3913 	struct ifnet *ifp = sc->sc_ifp;
3914 	struct ieee80211com *ic = ifp->if_l2com;
3915 	MWL_HAL_CHANNEL hchan;
3916 	int maxtxpow;
3917 
3918 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3919 	    __func__, chan->ic_freq, chan->ic_flags);
3920 
3921 	/*
3922 	 * Convert to a HAL channel description with
3923 	 * the flags constrained to reflect the current
3924 	 * operating mode.
3925 	 */
3926 	mwl_mapchan(&hchan, chan);
3927 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3928 #if 0
3929 	mwl_draintxq(sc);		/* clear pending tx frames */
3930 #endif
3931 	mwl_hal_setchannel(mh, &hchan);
3932 	/*
3933 	 * Tx power is cap'd by the regulatory setting and
3934 	 * possibly a user-set limit.  We pass the min of
3935 	 * these to the hal to apply them to the cal data
3936 	 * for this channel.
3937 	 * XXX min bound?
3938 	 */
3939 	maxtxpow = 2*chan->ic_maxregpower;
3940 	if (maxtxpow > ic->ic_txpowlimit)
3941 		maxtxpow = ic->ic_txpowlimit;
3942 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3943 	/* NB: potentially change mcast/mgt rates */
3944 	mwl_setcurchanrates(sc);
3945 
3946 	/*
3947 	 * Update internal state.
3948 	 */
3949 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3950 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3951 	if (IEEE80211_IS_CHAN_A(chan)) {
3952 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3953 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3954 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3955 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3956 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3957 	} else {
3958 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3959 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3960 	}
3961 	sc->sc_curchan = hchan;
3962 	mwl_hal_intrset(mh, sc->sc_imask);
3963 
3964 	return 0;
3965 }
3966 
3967 static void
3968 mwl_scan_start(struct ieee80211com *ic)
3969 {
3970 	struct ifnet *ifp = ic->ic_ifp;
3971 	struct mwl_softc *sc = ifp->if_softc;
3972 
3973 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3974 }
3975 
3976 static void
3977 mwl_scan_end(struct ieee80211com *ic)
3978 {
3979 	struct ifnet *ifp = ic->ic_ifp;
3980 	struct mwl_softc *sc = ifp->if_softc;
3981 
3982 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3983 }
3984 
3985 static void
3986 mwl_set_channel(struct ieee80211com *ic)
3987 {
3988 	struct ifnet *ifp = ic->ic_ifp;
3989 	struct mwl_softc *sc = ifp->if_softc;
3990 
3991 	(void) mwl_chan_set(sc, ic->ic_curchan);
3992 }
3993 
3994 /*
3995  * Handle a channel switch request.  We inform the firmware
3996  * and mark the global state to suppress various actions.
3997  * NB: we issue only one request to the fw; we may be called
3998  * multiple times if there are multiple vap's.
3999  */
4000 static void
4001 mwl_startcsa(struct ieee80211vap *vap)
4002 {
4003 	struct ieee80211com *ic = vap->iv_ic;
4004 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4005 	MWL_HAL_CHANNEL hchan;
4006 
4007 	if (sc->sc_csapending)
4008 		return;
4009 
4010 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4011 	/* 1 =>'s quiet channel */
4012 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4013 	sc->sc_csapending = 1;
4014 }
4015 
4016 /*
4017  * Plumb any static WEP key for the station.  This is
4018  * necessary as we must propagate the key from the
4019  * global key table of the vap to each sta db entry.
4020  */
4021 static void
4022 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4023 {
4024 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4025 		IEEE80211_F_PRIVACY &&
4026 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4027 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4028 		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4029 }
4030 
4031 static int
4032 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4033 {
4034 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4035 	struct ieee80211vap *vap = ni->ni_vap;
4036 	struct mwl_hal_vap *hvap;
4037 	int error;
4038 
4039 	if (vap->iv_opmode == IEEE80211_M_WDS) {
4040 		/*
4041 		 * WDS vap's do not have a f/w vap; instead they piggyback
4042 		 * on an AP vap and we must install the sta db entry and
4043 		 * crypto state using that AP's handle (the WDS vap has none).
4044 		 */
4045 		hvap = MWL_VAP(vap)->mv_ap_hvap;
4046 	} else
4047 		hvap = MWL_VAP(vap)->mv_hvap;
4048 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4049 	    aid, staid, pi,
4050 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4051 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4052 	if (error == 0) {
4053 		/*
4054 		 * Setup security for this station.  For sta mode this is
4055 		 * needed even though do the same thing on transition to
4056 		 * AUTH state because the call to mwl_hal_newstation
4057 		 * clobbers the crypto state we setup.
4058 		 */
4059 		mwl_setanywepkey(vap, ni->ni_macaddr);
4060 	}
4061 	return error;
4062 #undef WME
4063 }
4064 
4065 static void
4066 mwl_setglobalkeys(struct ieee80211vap *vap)
4067 {
4068 	struct ieee80211_key *wk;
4069 
4070 	wk = &vap->iv_nw_keys[0];
4071 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4072 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4073 			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4074 }
4075 
4076 /*
4077  * Convert a legacy rate set to a firmware bitmask.
4078  */
4079 static uint32_t
4080 get_rate_bitmap(const struct ieee80211_rateset *rs)
4081 {
4082 	uint32_t rates;
4083 	int i;
4084 
4085 	rates = 0;
4086 	for (i = 0; i < rs->rs_nrates; i++)
4087 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4088 		case 2:	  rates |= 0x001; break;
4089 		case 4:	  rates |= 0x002; break;
4090 		case 11:  rates |= 0x004; break;
4091 		case 22:  rates |= 0x008; break;
4092 		case 44:  rates |= 0x010; break;
4093 		case 12:  rates |= 0x020; break;
4094 		case 18:  rates |= 0x040; break;
4095 		case 24:  rates |= 0x080; break;
4096 		case 36:  rates |= 0x100; break;
4097 		case 48:  rates |= 0x200; break;
4098 		case 72:  rates |= 0x400; break;
4099 		case 96:  rates |= 0x800; break;
4100 		case 108: rates |= 0x1000; break;
4101 		}
4102 	return rates;
4103 }
4104 
4105 /*
4106  * Construct an HT firmware bitmask from an HT rate set.
4107  */
4108 static uint32_t
4109 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4110 {
4111 	uint32_t rates;
4112 	int i;
4113 
4114 	rates = 0;
4115 	for (i = 0; i < rs->rs_nrates; i++) {
4116 		if (rs->rs_rates[i] < 16)
4117 			rates |= 1<<rs->rs_rates[i];
4118 	}
4119 	return rates;
4120 }
4121 
4122 /*
4123  * Craft station database entry for station.
4124  * NB: use host byte order here, the hal handles byte swapping.
4125  */
4126 static MWL_HAL_PEERINFO *
4127 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4128 {
4129 	const struct ieee80211vap *vap = ni->ni_vap;
4130 
4131 	memset(pi, 0, sizeof(*pi));
4132 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4133 	pi->CapInfo = ni->ni_capinfo;
4134 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4135 		/* HT capabilities, etc */
4136 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4137 		/* XXX pi.HTCapabilitiesInfo */
4138 	        pi->MacHTParamInfo = ni->ni_htparam;
4139 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4140 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4141 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4142 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4143 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4144 
4145 		/* constrain according to local configuration */
4146 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4147 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4148 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4149 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4150 		if (ni->ni_chw != 40)
4151 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4152 	}
4153 	return pi;
4154 }
4155 
4156 /*
4157  * Re-create the local sta db entry for a vap to ensure
4158  * up to date WME state is pushed to the firmware.  Because
4159  * this resets crypto state this must be followed by a
4160  * reload of any keys in the global key table.
4161  */
4162 static int
4163 mwl_localstadb(struct ieee80211vap *vap)
4164 {
4165 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4166 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4167 	struct ieee80211_node *bss;
4168 	MWL_HAL_PEERINFO pi;
4169 	int error;
4170 
4171 	switch (vap->iv_opmode) {
4172 	case IEEE80211_M_STA:
4173 		bss = vap->iv_bss;
4174 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4175 		    vap->iv_state == IEEE80211_S_RUN ?
4176 			mkpeerinfo(&pi, bss) : NULL,
4177 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4178 		    bss->ni_ies.wme_ie != NULL ?
4179 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4180 		if (error == 0)
4181 			mwl_setglobalkeys(vap);
4182 		break;
4183 	case IEEE80211_M_HOSTAP:
4184 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4185 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4186 		if (error == 0)
4187 			mwl_setglobalkeys(vap);
4188 		break;
4189 	default:
4190 		error = 0;
4191 		break;
4192 	}
4193 	return error;
4194 #undef WME
4195 }
4196 
4197 static int
4198 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4199 {
4200 	struct mwl_vap *mvp = MWL_VAP(vap);
4201 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4202 	struct ieee80211com *ic = vap->iv_ic;
4203 	struct ieee80211_node *ni = NULL;
4204 	struct ifnet *ifp = ic->ic_ifp;
4205 	struct mwl_softc *sc = ifp->if_softc;
4206 	struct mwl_hal *mh = sc->sc_mh;
4207 	enum ieee80211_state ostate = vap->iv_state;
4208 	int error;
4209 
4210 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4211 	    vap->iv_ifp->if_xname, __func__,
4212 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4213 
4214 	callout_stop(&sc->sc_timer);
4215 	/*
4216 	 * Clear current radar detection state.
4217 	 */
4218 	if (ostate == IEEE80211_S_CAC) {
4219 		/* stop quiet mode radar detection */
4220 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4221 	} else if (sc->sc_radarena) {
4222 		/* stop in-service radar detection */
4223 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4224 		sc->sc_radarena = 0;
4225 	}
4226 	/*
4227 	 * Carry out per-state actions before doing net80211 work.
4228 	 */
4229 	if (nstate == IEEE80211_S_INIT) {
4230 		/* NB: only ap+sta vap's have a fw entity */
4231 		if (hvap != NULL)
4232 			mwl_hal_stop(hvap);
4233 	} else if (nstate == IEEE80211_S_SCAN) {
4234 		mwl_hal_start(hvap);
4235 		/* NB: this disables beacon frames */
4236 		mwl_hal_setinframode(hvap);
4237 	} else if (nstate == IEEE80211_S_AUTH) {
4238 		/*
4239 		 * Must create a sta db entry in case a WEP key needs to
4240 		 * be plumbed.  This entry will be overwritten if we
4241 		 * associate; otherwise it will be reclaimed on node free.
4242 		 */
4243 		ni = vap->iv_bss;
4244 		MWL_NODE(ni)->mn_hvap = hvap;
4245 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4246 	} else if (nstate == IEEE80211_S_CSA) {
4247 		/* XXX move to below? */
4248 		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
4249 			mwl_startcsa(vap);
4250 	} else if (nstate == IEEE80211_S_CAC) {
4251 		/* XXX move to below? */
4252 		/* stop ap xmit and enable quiet mode radar detection */
4253 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4254 	}
4255 
4256 	/*
4257 	 * Invoke the parent method to do net80211 work.
4258 	 */
4259 	error = mvp->mv_newstate(vap, nstate, arg);
4260 
4261 	/*
4262 	 * Carry out work that must be done after net80211 runs;
4263 	 * this work requires up to date state (e.g. iv_bss).
4264 	 */
4265 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4266 		/* NB: collect bss node again, it may have changed */
4267 		ni = vap->iv_bss;
4268 
4269 		DPRINTF(sc, MWL_DEBUG_STATE,
4270 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4271 		    "capinfo 0x%04x chan %d\n",
4272 		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4273 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4274 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4275 
4276 		/*
4277 		 * Recreate local sta db entry to update WME/HT state.
4278 		 */
4279 		mwl_localstadb(vap);
4280 		switch (vap->iv_opmode) {
4281 		case IEEE80211_M_HOSTAP:
4282 			if (ostate == IEEE80211_S_CAC) {
4283 				/* enable in-service radar detection */
4284 				mwl_hal_setradardetection(mh,
4285 				    DR_IN_SERVICE_MONITOR_START);
4286 				sc->sc_radarena = 1;
4287 			}
4288 			/*
4289 			 * Allocate and setup the beacon frame
4290 			 * (and related state).
4291 			 */
4292 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4293 			if (error != 0) {
4294 				DPRINTF(sc, MWL_DEBUG_STATE,
4295 				    "%s: beacon setup failed, error %d\n",
4296 				    __func__, error);
4297 				goto bad;
4298 			}
4299 			/* NB: must be after setting up beacon */
4300 			mwl_hal_start(hvap);
4301 			break;
4302 		case IEEE80211_M_STA:
4303 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4304 			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4305 			/*
4306 			 * Set state now that we're associated.
4307 			 */
4308 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4309 			mwl_setrates(vap);
4310 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4311 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4312 			    sc->sc_ndwdsvaps++ == 0)
4313 				mwl_hal_setdwds(mh, 1);
4314 			break;
4315 		case IEEE80211_M_WDS:
4316 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4317 			    vap->iv_ifp->if_xname, __func__,
4318 			    ether_sprintf(ni->ni_bssid));
4319 			mwl_seteapolformat(vap);
4320 			break;
4321 		default:
4322 			break;
4323 		}
4324 		/*
4325 		 * Set CS mode according to operating channel;
4326 		 * this mostly an optimization for 5GHz.
4327 		 *
4328 		 * NB: must follow mwl_hal_start which resets csmode
4329 		 */
4330 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4331 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4332 		else
4333 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4334 		/*
4335 		 * Start timer to prod firmware.
4336 		 */
4337 		if (sc->sc_ageinterval != 0)
4338 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4339 			    mwl_agestations, sc);
4340 	} else if (nstate == IEEE80211_S_SLEEP) {
4341 		/* XXX set chip in power save */
4342 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4343 	    --sc->sc_ndwdsvaps == 0)
4344 		mwl_hal_setdwds(mh, 0);
4345 bad:
4346 	return error;
4347 }
4348 
4349 /*
4350  * Manage station id's; these are separate from AID's
4351  * as AID's may have values out of the range of possible
4352  * station id's acceptable to the firmware.
4353  */
4354 static int
4355 allocstaid(struct mwl_softc *sc, int aid)
4356 {
4357 	int staid;
4358 
4359 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4360 		/* NB: don't use 0 */
4361 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4362 			if (isclr(sc->sc_staid, staid))
4363 				break;
4364 	} else
4365 		staid = aid;
4366 	setbit(sc->sc_staid, staid);
4367 	return staid;
4368 }
4369 
4370 static void
4371 delstaid(struct mwl_softc *sc, int staid)
4372 {
4373 	clrbit(sc->sc_staid, staid);
4374 }
4375 
4376 /*
4377  * Setup driver-specific state for a newly associated node.
4378  * Note that we're called also on a re-associate, the isnew
4379  * param tells us if this is the first time or not.
4380  */
4381 static void
4382 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4383 {
4384 	struct ieee80211vap *vap = ni->ni_vap;
4385         struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4386 	struct mwl_node *mn = MWL_NODE(ni);
4387 	MWL_HAL_PEERINFO pi;
4388 	uint16_t aid;
4389 	int error;
4390 
4391 	aid = IEEE80211_AID(ni->ni_associd);
4392 	if (isnew) {
4393 		mn->mn_staid = allocstaid(sc, aid);
4394 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4395 	} else {
4396 		mn = MWL_NODE(ni);
4397 		/* XXX reset BA stream? */
4398 	}
4399 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4400 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4401 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4402 	if (error != 0) {
4403 		DPRINTF(sc, MWL_DEBUG_NODE,
4404 		    "%s: error %d creating sta db entry\n",
4405 		    __func__, error);
4406 		/* XXX how to deal with error? */
4407 	}
4408 }
4409 
4410 /*
4411  * Periodically poke the firmware to age out station state
4412  * (power save queues, pending tx aggregates).
4413  */
4414 static void
4415 mwl_agestations(void *arg)
4416 {
4417 	struct mwl_softc *sc = arg;
4418 
4419 	mwl_hal_setkeepalive(sc->sc_mh);
4420 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4421 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4422 }
4423 
4424 static const struct mwl_hal_channel *
4425 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4426 {
4427 	int i;
4428 
4429 	for (i = 0; i < ci->nchannels; i++) {
4430 		const struct mwl_hal_channel *hc = &ci->channels[i];
4431 		if (hc->ieee == ieee)
4432 			return hc;
4433 	}
4434 	return NULL;
4435 }
4436 
4437 static int
4438 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4439 	int nchan, struct ieee80211_channel chans[])
4440 {
4441 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4442 	struct mwl_hal *mh = sc->sc_mh;
4443 	const MWL_HAL_CHANNELINFO *ci;
4444 	int i;
4445 
4446 	for (i = 0; i < nchan; i++) {
4447 		struct ieee80211_channel *c = &chans[i];
4448 		const struct mwl_hal_channel *hc;
4449 
4450 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4451 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4452 			    IEEE80211_IS_CHAN_HT40(c) ?
4453 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4454 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4455 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4456 			    IEEE80211_IS_CHAN_HT40(c) ?
4457 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4458 		} else {
4459 			if_printf(ic->ic_ifp,
4460 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4461 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4462 			return EINVAL;
4463 		}
4464 		/*
4465 		 * Verify channel has cal data and cap tx power.
4466 		 */
4467 		hc = findhalchannel(ci, c->ic_ieee);
4468 		if (hc != NULL) {
4469 			if (c->ic_maxpower > 2*hc->maxTxPow)
4470 				c->ic_maxpower = 2*hc->maxTxPow;
4471 			goto next;
4472 		}
4473 		if (IEEE80211_IS_CHAN_HT40(c)) {
4474 			/*
4475 			 * Look for the extension channel since the
4476 			 * hal table only has the primary channel.
4477 			 */
4478 			hc = findhalchannel(ci, c->ic_extieee);
4479 			if (hc != NULL) {
4480 				if (c->ic_maxpower > 2*hc->maxTxPow)
4481 					c->ic_maxpower = 2*hc->maxTxPow;
4482 				goto next;
4483 			}
4484 		}
4485 		if_printf(ic->ic_ifp,
4486 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4487 		    __func__, c->ic_ieee, c->ic_extieee,
4488 		    c->ic_freq, c->ic_flags);
4489 		return EINVAL;
4490 	next:
4491 		;
4492 	}
4493 	return 0;
4494 }
4495 
4496 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4497 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4498 
4499 static void
4500 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4501 {
4502 	c->ic_freq = freq;
4503 	c->ic_flags = flags;
4504 	c->ic_ieee = ieee;
4505 	c->ic_minpower = 0;
4506 	c->ic_maxpower = 2*txpow;
4507 	c->ic_maxregpower = txpow;
4508 }
4509 
4510 static const struct ieee80211_channel *
4511 findchannel(const struct ieee80211_channel chans[], int nchans,
4512 	int freq, int flags)
4513 {
4514 	const struct ieee80211_channel *c;
4515 	int i;
4516 
4517 	for (i = 0; i < nchans; i++) {
4518 		c = &chans[i];
4519 		if (c->ic_freq == freq && c->ic_flags == flags)
4520 			return c;
4521 	}
4522 	return NULL;
4523 }
4524 
4525 static void
4526 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4527 	const MWL_HAL_CHANNELINFO *ci, int flags)
4528 {
4529 	struct ieee80211_channel *c;
4530 	const struct ieee80211_channel *extc;
4531 	const struct mwl_hal_channel *hc;
4532 	int i;
4533 
4534 	c = &chans[*nchans];
4535 
4536 	flags &= ~IEEE80211_CHAN_HT;
4537 	for (i = 0; i < ci->nchannels; i++) {
4538 		/*
4539 		 * Each entry defines an HT40 channel pair; find the
4540 		 * extension channel above and the insert the pair.
4541 		 */
4542 		hc = &ci->channels[i];
4543 		extc = findchannel(chans, *nchans, hc->freq+20,
4544 		    flags | IEEE80211_CHAN_HT20);
4545 		if (extc != NULL) {
4546 			if (*nchans >= maxchans)
4547 				break;
4548 			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4549 			    hc->ieee, hc->maxTxPow);
4550 			c->ic_extieee = extc->ic_ieee;
4551 			c++, (*nchans)++;
4552 			if (*nchans >= maxchans)
4553 				break;
4554 			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4555 			    extc->ic_ieee, hc->maxTxPow);
4556 			c->ic_extieee = hc->ieee;
4557 			c++, (*nchans)++;
4558 		}
4559 	}
4560 }
4561 
4562 static void
4563 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4564 	const MWL_HAL_CHANNELINFO *ci, int flags)
4565 {
4566 	struct ieee80211_channel *c;
4567 	int i;
4568 
4569 	c = &chans[*nchans];
4570 
4571 	for (i = 0; i < ci->nchannels; i++) {
4572 		const struct mwl_hal_channel *hc;
4573 
4574 		hc = &ci->channels[i];
4575 		if (*nchans >= maxchans)
4576 			break;
4577 		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4578 		c++, (*nchans)++;
4579 		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4580 			/* g channel have a separate b-only entry */
4581 			if (*nchans >= maxchans)
4582 				break;
4583 			c[0] = c[-1];
4584 			c[-1].ic_flags = IEEE80211_CHAN_B;
4585 			c++, (*nchans)++;
4586 		}
4587 		if (flags == IEEE80211_CHAN_HTG) {
4588 			/* HT g channel have a separate g-only entry */
4589 			if (*nchans >= maxchans)
4590 				break;
4591 			c[-1].ic_flags = IEEE80211_CHAN_G;
4592 			c[0] = c[-1];
4593 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4594 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4595 			c++, (*nchans)++;
4596 		}
4597 		if (flags == IEEE80211_CHAN_HTA) {
4598 			/* HT a channel have a separate a-only entry */
4599 			if (*nchans >= maxchans)
4600 				break;
4601 			c[-1].ic_flags = IEEE80211_CHAN_A;
4602 			c[0] = c[-1];
4603 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4604 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4605 			c++, (*nchans)++;
4606 		}
4607 	}
4608 }
4609 
4610 static void
4611 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4612 	struct ieee80211_channel chans[])
4613 {
4614 	const MWL_HAL_CHANNELINFO *ci;
4615 
4616 	/*
4617 	 * Use the channel info from the hal to craft the
4618 	 * channel list.  Note that we pass back an unsorted
4619 	 * list; the caller is required to sort it for us
4620 	 * (if desired).
4621 	 */
4622 	*nchans = 0;
4623 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4624 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4625 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4626 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4627 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4628 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4629 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4630 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4631 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4632 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4633 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4634 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4635 }
4636 
4637 static void
4638 mwl_getradiocaps(struct ieee80211com *ic,
4639 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4640 {
4641 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4642 
4643 	getchannels(sc, maxchans, nchans, chans);
4644 }
4645 
4646 static int
4647 mwl_getchannels(struct mwl_softc *sc)
4648 {
4649 	struct ifnet *ifp = sc->sc_ifp;
4650 	struct ieee80211com *ic = ifp->if_l2com;
4651 
4652 	/*
4653 	 * Use the channel info from the hal to craft the
4654 	 * channel list for net80211.  Note that we pass up
4655 	 * an unsorted list; net80211 will sort it for us.
4656 	 */
4657 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4658 	ic->ic_nchans = 0;
4659 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4660 
4661 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4662 	ic->ic_regdomain.country = CTRY_DEFAULT;
4663 	ic->ic_regdomain.location = 'I';
4664 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4665 	ic->ic_regdomain.isocc[1] = ' ';
4666 	return (ic->ic_nchans == 0 ? EIO : 0);
4667 }
4668 #undef IEEE80211_CHAN_HTA
4669 #undef IEEE80211_CHAN_HTG
4670 
4671 #ifdef MWL_DEBUG
4672 static void
4673 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4674 {
4675 	const struct mwl_rxdesc *ds = bf->bf_desc;
4676 	uint32_t status = le32toh(ds->Status);
4677 
4678 	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4679 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4680 	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4681 	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4682 	    ds->RxControl,
4683 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4684 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4685 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4686 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4687 }
4688 
4689 static void
4690 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4691 {
4692 	const struct mwl_txdesc *ds = bf->bf_desc;
4693 	uint32_t status = le32toh(ds->Status);
4694 
4695 	printf("Q%u[%3u]", qnum, ix);
4696 	printf(" (DS.V:%p DS.P:%p)\n",
4697 	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4698 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4699 	    le32toh(ds->pPhysNext),
4700 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4701 	    status & EAGLE_TXD_STATUS_USED ?
4702 		"" : (status & 3) != 0 ? " *" : " !");
4703 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4704 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4705 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4706 #if MWL_TXDESC > 1
4707 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4708 	    , le32toh(ds->multiframes)
4709 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4710 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4711 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4712 	);
4713 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4714 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4715 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4716 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4717 	);
4718 #endif
4719 #if 0
4720 { const uint8_t *cp = (const uint8_t *) ds;
4721   int i;
4722   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4723 	printf("%02x ", cp[i]);
4724 	if (((i+1) % 16) == 0)
4725 		printf("\n");
4726   }
4727   printf("\n");
4728 }
4729 #endif
4730 }
4731 #endif /* MWL_DEBUG */
4732 
4733 #if 0
4734 static void
4735 mwl_txq_dump(struct mwl_txq *txq)
4736 {
4737 	struct mwl_txbuf *bf;
4738 	int i = 0;
4739 
4740 	MWL_TXQ_LOCK(txq);
4741 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4742 		struct mwl_txdesc *ds = bf->bf_desc;
4743 		MWL_TXDESC_SYNC(txq, ds,
4744 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4745 #ifdef MWL_DEBUG
4746 		mwl_printtxbuf(bf, txq->qnum, i);
4747 #endif
4748 		i++;
4749 	}
4750 	MWL_TXQ_UNLOCK(txq);
4751 }
4752 #endif
4753 
4754 static void
4755 mwl_watchdog(struct ifnet *ifp)
4756 {
4757 	struct mwl_softc *sc = ifp->if_softc;
4758 
4759 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4760 		if (mwl_hal_setkeepalive(sc->sc_mh))
4761 			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4762 		else
4763 			if_printf(ifp, "transmit timeout\n");
4764 #if 0
4765 		mwl_reset(ifp);
4766 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4767 #endif
4768 		ifp->if_oerrors++;
4769 		sc->sc_stats.mst_watchdog++;
4770 	}
4771 }
4772 
4773 #ifdef MWL_DIAGAPI
4774 /*
4775  * Diagnostic interface to the HAL.  This is used by various
4776  * tools to do things like retrieve register contents for
4777  * debugging.  The mechanism is intentionally opaque so that
4778  * it can change frequently w/o concern for compatiblity.
4779  */
4780 static int
4781 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4782 {
4783 	struct mwl_hal *mh = sc->sc_mh;
4784 	u_int id = md->md_id & MWL_DIAG_ID;
4785 	void *indata = NULL;
4786 	void *outdata = NULL;
4787 	u_int32_t insize = md->md_in_size;
4788 	u_int32_t outsize = md->md_out_size;
4789 	int error = 0;
4790 
4791 	if (md->md_id & MWL_DIAG_IN) {
4792 		/*
4793 		 * Copy in data.
4794 		 */
4795 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4796 		if (indata == NULL) {
4797 			error = ENOMEM;
4798 			goto bad;
4799 		}
4800 		error = copyin(md->md_in_data, indata, insize);
4801 		if (error)
4802 			goto bad;
4803 	}
4804 	if (md->md_id & MWL_DIAG_DYN) {
4805 		/*
4806 		 * Allocate a buffer for the results (otherwise the HAL
4807 		 * returns a pointer to a buffer where we can read the
4808 		 * results).  Note that we depend on the HAL leaving this
4809 		 * pointer for us to use below in reclaiming the buffer;
4810 		 * may want to be more defensive.
4811 		 */
4812 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4813 		if (outdata == NULL) {
4814 			error = ENOMEM;
4815 			goto bad;
4816 		}
4817 	}
4818 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4819 		if (outsize < md->md_out_size)
4820 			md->md_out_size = outsize;
4821 		if (outdata != NULL)
4822 			error = copyout(outdata, md->md_out_data,
4823 					md->md_out_size);
4824 	} else {
4825 		error = EINVAL;
4826 	}
4827 bad:
4828 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4829 		free(indata, M_TEMP);
4830 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4831 		free(outdata, M_TEMP);
4832 	return error;
4833 }
4834 
4835 static int
4836 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4837 {
4838 	struct mwl_hal *mh = sc->sc_mh;
4839 	int error;
4840 
4841 	MWL_LOCK_ASSERT(sc);
4842 
4843 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4844 		device_printf(sc->sc_dev, "unable to load firmware\n");
4845 		return EIO;
4846 	}
4847 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4848 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4849 		return EIO;
4850 	}
4851 	error = mwl_setupdma(sc);
4852 	if (error != 0) {
4853 		/* NB: mwl_setupdma prints a msg */
4854 		return error;
4855 	}
4856 	/*
4857 	 * Reset tx/rx data structures; after reload we must
4858 	 * re-start the driver's notion of the next xmit/recv.
4859 	 */
4860 	mwl_draintxq(sc);		/* clear pending frames */
4861 	mwl_resettxq(sc);		/* rebuild tx q lists */
4862 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4863 	return 0;
4864 }
4865 #endif /* MWL_DIAGAPI */
4866 
4867 static int
4868 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4869 {
4870 #define	IS_RUNNING(ifp) \
4871 	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4872 	struct mwl_softc *sc = ifp->if_softc;
4873 	struct ieee80211com *ic = ifp->if_l2com;
4874 	struct ifreq *ifr = (struct ifreq *)data;
4875 	int error = 0, startall;
4876 
4877 	switch (cmd) {
4878 	case SIOCSIFFLAGS:
4879 		MWL_LOCK(sc);
4880 		startall = 0;
4881 		if (IS_RUNNING(ifp)) {
4882 			/*
4883 			 * To avoid rescanning another access point,
4884 			 * do not call mwl_init() here.  Instead,
4885 			 * only reflect promisc mode settings.
4886 			 */
4887 			mwl_mode_init(sc);
4888 		} else if (ifp->if_flags & IFF_UP) {
4889 			/*
4890 			 * Beware of being called during attach/detach
4891 			 * to reset promiscuous mode.  In that case we
4892 			 * will still be marked UP but not RUNNING.
4893 			 * However trying to re-init the interface
4894 			 * is the wrong thing to do as we've already
4895 			 * torn down much of our state.  There's
4896 			 * probably a better way to deal with this.
4897 			 */
4898 			if (!sc->sc_invalid) {
4899 				mwl_init_locked(sc);	/* XXX lose error */
4900 				startall = 1;
4901 			}
4902 		} else
4903 			mwl_stop_locked(ifp, 1);
4904 		MWL_UNLOCK(sc);
4905 		if (startall)
4906 			ieee80211_start_all(ic);
4907 		break;
4908 	case SIOCGMVSTATS:
4909 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4910 		/* NB: embed these numbers to get a consistent view */
4911 		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4912 		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4913 		/*
4914 		 * NB: Drop the softc lock in case of a page fault;
4915 		 * we'll accept any potential inconsisentcy in the
4916 		 * statistics.  The alternative is to copy the data
4917 		 * to a local structure.
4918 		 */
4919 		return copyout(&sc->sc_stats,
4920 				ifr->ifr_data, sizeof (sc->sc_stats));
4921 #ifdef MWL_DIAGAPI
4922 	case SIOCGMVDIAG:
4923 		/* XXX check privs */
4924 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4925 	case SIOCGMVRESET:
4926 		/* XXX check privs */
4927 		MWL_LOCK(sc);
4928 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4929 		MWL_UNLOCK(sc);
4930 		break;
4931 #endif /* MWL_DIAGAPI */
4932 	case SIOCGIFMEDIA:
4933 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4934 		break;
4935 	case SIOCGIFADDR:
4936 		error = ether_ioctl(ifp, cmd, data);
4937 		break;
4938 	default:
4939 		error = EINVAL;
4940 		break;
4941 	}
4942 	return error;
4943 #undef IS_RUNNING
4944 }
4945 
4946 #ifdef	MWL_DEBUG
4947 static int
4948 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4949 {
4950 	struct mwl_softc *sc = arg1;
4951 	int debug, error;
4952 
4953 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4954 	error = sysctl_handle_int(oidp, &debug, 0, req);
4955 	if (error || !req->newptr)
4956 		return error;
4957 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4958 	sc->sc_debug = debug & 0x00ffffff;
4959 	return 0;
4960 }
4961 #endif /* MWL_DEBUG */
4962 
4963 static void
4964 mwl_sysctlattach(struct mwl_softc *sc)
4965 {
4966 #ifdef	MWL_DEBUG
4967 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4968 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4969 
4970 	sc->sc_debug = mwl_debug;
4971 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4972 		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4973 		mwl_sysctl_debug, "I", "control debugging printfs");
4974 #endif
4975 }
4976 
4977 /*
4978  * Announce various information on device/driver attach.
4979  */
4980 static void
4981 mwl_announce(struct mwl_softc *sc)
4982 {
4983 	struct ifnet *ifp = sc->sc_ifp;
4984 
4985 	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4986 		sc->sc_hwspecs.hwVersion,
4987 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4988 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4989 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4990 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4991 		sc->sc_hwspecs.regionCode);
4992 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4993 
4994 	if (bootverbose) {
4995 		int i;
4996 		for (i = 0; i <= WME_AC_VO; i++) {
4997 			struct mwl_txq *txq = sc->sc_ac2q[i];
4998 			if_printf(ifp, "Use hw queue %u for %s traffic\n",
4999 				txq->qnum, ieee80211_wme_acnames[i]);
5000 		}
5001 	}
5002 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5003 		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5004 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5005 		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5006 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5007 		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5008 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5009 		if_printf(ifp, "multi-bss support\n");
5010 #ifdef MWL_TX_NODROP
5011 	if (bootverbose)
5012 		if_printf(ifp, "no tx drop\n");
5013 #endif
5014 }
5015