xref: /freebsd/sys/dev/mwl/if_mwl.c (revision 4abd7edcbde21ba7a089c7d1a0bba8f87ebece06)
1 /*-
2  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Marvell 88W8363 Wireless LAN controller.
36  */
37 
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 #include "opt_wlan.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
54 #include <sys/bus.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
58 
59 #include <machine/bus.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
69 
70 #include <net/bpf.h>
71 
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_regdomain.h>
74 
75 #ifdef INET
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 #endif /* INET */
79 
80 #include <dev/mwl/if_mwlvar.h>
81 #include <dev/mwl/mwldiag.h>
82 
83 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
84 #define	MS(v,x)	(((v) & x) >> x##_S)
85 #define	SM(v,x)	(((v) << x##_S) & x)
86 
87 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
88 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
89 		    const uint8_t [IEEE80211_ADDR_LEN],
90 		    const uint8_t [IEEE80211_ADDR_LEN]);
91 static void	mwl_vap_delete(struct ieee80211vap *);
92 static int	mwl_setupdma(struct mwl_softc *);
93 static int	mwl_hal_reset(struct mwl_softc *sc);
94 static int	mwl_init_locked(struct mwl_softc *);
95 static void	mwl_init(void *);
96 static void	mwl_stop_locked(struct ifnet *, int);
97 static int	mwl_reset(struct ieee80211vap *, u_long);
98 static void	mwl_stop(struct ifnet *, int);
99 static void	mwl_start(struct ifnet *);
100 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
101 			const struct ieee80211_bpf_params *);
102 static int	mwl_media_change(struct ifnet *);
103 static void	mwl_watchdog(void *);
104 static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
105 static void	mwl_radar_proc(void *, int);
106 static void	mwl_chanswitch_proc(void *, int);
107 static void	mwl_bawatchdog_proc(void *, int);
108 static int	mwl_key_alloc(struct ieee80211vap *,
109 			struct ieee80211_key *,
110 			ieee80211_keyix *, ieee80211_keyix *);
111 static int	mwl_key_delete(struct ieee80211vap *,
112 			const struct ieee80211_key *);
113 static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
114 			const uint8_t mac[IEEE80211_ADDR_LEN]);
115 static int	mwl_mode_init(struct mwl_softc *);
116 static void	mwl_update_mcast(struct ifnet *);
117 static void	mwl_update_promisc(struct ifnet *);
118 static void	mwl_updateslot(struct ifnet *);
119 static int	mwl_beacon_setup(struct ieee80211vap *);
120 static void	mwl_beacon_update(struct ieee80211vap *, int);
121 #ifdef MWL_HOST_PS_SUPPORT
122 static void	mwl_update_ps(struct ieee80211vap *, int);
123 static int	mwl_set_tim(struct ieee80211_node *, int);
124 #endif
125 static int	mwl_dma_setup(struct mwl_softc *);
126 static void	mwl_dma_cleanup(struct mwl_softc *);
127 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128 		    const uint8_t [IEEE80211_ADDR_LEN]);
129 static void	mwl_node_cleanup(struct ieee80211_node *);
130 static void	mwl_node_drain(struct ieee80211_node *);
131 static void	mwl_node_getsignal(const struct ieee80211_node *,
132 			int8_t *, int8_t *);
133 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134 			struct ieee80211_mimo_info *);
135 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136 static void	mwl_rx_proc(void *, int);
137 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138 static int	mwl_tx_setup(struct mwl_softc *, int, int);
139 static int	mwl_wme_update(struct ieee80211com *);
140 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141 static void	mwl_tx_cleanup(struct mwl_softc *);
142 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144 			     struct mwl_txbuf *, struct mbuf *);
145 static void	mwl_tx_proc(void *, int);
146 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147 static void	mwl_draintxq(struct mwl_softc *);
148 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149 static int	mwl_recv_action(struct ieee80211_node *,
150 			const struct ieee80211_frame *,
151 			const uint8_t *, const uint8_t *);
152 static int	mwl_addba_request(struct ieee80211_node *,
153 			struct ieee80211_tx_ampdu *, int dialogtoken,
154 			int baparamset, int batimeout);
155 static int	mwl_addba_response(struct ieee80211_node *,
156 			struct ieee80211_tx_ampdu *, int status,
157 			int baparamset, int batimeout);
158 static void	mwl_addba_stop(struct ieee80211_node *,
159 			struct ieee80211_tx_ampdu *);
160 static int	mwl_startrecv(struct mwl_softc *);
161 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162 			struct ieee80211_channel *);
163 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164 static void	mwl_scan_start(struct ieee80211com *);
165 static void	mwl_scan_end(struct ieee80211com *);
166 static void	mwl_set_channel(struct ieee80211com *);
167 static int	mwl_peerstadb(struct ieee80211_node *,
168 			int aid, int staid, MWL_HAL_PEERINFO *pi);
169 static int	mwl_localstadb(struct ieee80211vap *);
170 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171 static int	allocstaid(struct mwl_softc *sc, int aid);
172 static void	delstaid(struct mwl_softc *sc, int staid);
173 static void	mwl_newassoc(struct ieee80211_node *, int);
174 static void	mwl_agestations(void *);
175 static int	mwl_setregdomain(struct ieee80211com *,
176 			struct ieee80211_regdomain *, int,
177 			struct ieee80211_channel []);
178 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179 			struct ieee80211_channel []);
180 static int	mwl_getchannels(struct mwl_softc *);
181 
182 static void	mwl_sysctlattach(struct mwl_softc *);
183 static void	mwl_announce(struct mwl_softc *);
184 
185 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186 
187 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
188 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
189 	    0, "rx descriptors allocated");
190 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
191 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
192 	    0, "rx buffers allocated");
193 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
194 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
195 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
196 	    0, "tx buffers allocated");
197 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
198 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
199 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
200 	    0, "tx buffers to send at once");
201 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
202 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
203 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
204 	    0, "max rx buffers to process per interrupt");
205 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
206 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
207 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
208 	    0, "min free rx buffers before restarting traffic");
209 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
210 
211 #ifdef MWL_DEBUG
212 static	int mwl_debug = 0;
213 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
214 	    0, "control debugging printfs");
215 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
216 enum {
217 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
218 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
219 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
220 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
221 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
222 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
223 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
224 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
225 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
226 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
227 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
228 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
229 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
230 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
231 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
232 	MWL_DEBUG_ANY		= 0xffffffff
233 };
234 #define	IS_BEACON(wh) \
235     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
236 	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
237 #define	IFF_DUMPPKTS_RECV(sc, wh) \
238     (((sc->sc_debug & MWL_DEBUG_RECV) && \
239       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
240      (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
241 #define	IFF_DUMPPKTS_XMIT(sc) \
242 	((sc->sc_debug & MWL_DEBUG_XMIT) || \
243 	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
244 #define	DPRINTF(sc, m, fmt, ...) do {				\
245 	if (sc->sc_debug & (m))					\
246 		printf(fmt, __VA_ARGS__);			\
247 } while (0)
248 #define	KEYPRINTF(sc, hk, mac) do {				\
249 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
250 		mwl_keyprint(sc, __func__, hk, mac);		\
251 } while (0)
252 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
253 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
254 #else
255 #define	IFF_DUMPPKTS_RECV(sc, wh) \
256 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257 #define	IFF_DUMPPKTS_XMIT(sc) \
258 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
259 #define	DPRINTF(sc, m, fmt, ...) do {				\
260 	(void) sc;						\
261 } while (0)
262 #define	KEYPRINTF(sc, k, mac) do {				\
263 	(void) sc;						\
264 } while (0)
265 #endif
266 
267 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
268 
269 /*
270  * Each packet has fixed front matter: a 2-byte length
271  * of the payload, followed by a 4-address 802.11 header
272  * (regardless of the actual header and always w/o any
273  * QoS header).  The payload then follows.
274  */
275 struct mwltxrec {
276 	uint16_t fwlen;
277 	struct ieee80211_frame_addr4 wh;
278 } __packed;
279 
280 /*
281  * Read/Write shorthands for accesses to BAR 0.  Note
282  * that all BAR 1 operations are done in the "hal" and
283  * there should be no reference to them here.
284  */
285 #ifdef MWL_DEBUG
286 static __inline uint32_t
287 RD4(struct mwl_softc *sc, bus_size_t off)
288 {
289 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
290 }
291 #endif
292 
293 static __inline void
294 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
295 {
296 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
297 }
298 
299 int
300 mwl_attach(uint16_t devid, struct mwl_softc *sc)
301 {
302 	struct ifnet *ifp;
303 	struct ieee80211com *ic;
304 	struct mwl_hal *mh;
305 	int error = 0;
306 
307 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
308 
309 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
310 	if (ifp == NULL) {
311 		device_printf(sc->sc_dev, "cannot if_alloc()\n");
312 		return ENOSPC;
313 	}
314 	ic = ifp->if_l2com;
315 
316 	/*
317 	 * Setup the RX free list lock early, so it can be consistently
318 	 * removed.
319 	 */
320 	MWL_RXFREE_INIT(sc);
321 
322 	/* set these up early for if_printf use */
323 	if_initname(ifp, device_get_name(sc->sc_dev),
324 		device_get_unit(sc->sc_dev));
325 
326 	mh = mwl_hal_attach(sc->sc_dev, devid,
327 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
328 	if (mh == NULL) {
329 		if_printf(ifp, "unable to attach HAL\n");
330 		error = EIO;
331 		goto bad;
332 	}
333 	sc->sc_mh = mh;
334 	/*
335 	 * Load firmware so we can get setup.  We arbitrarily
336 	 * pick station firmware; we'll re-load firmware as
337 	 * needed so setting up the wrong mode isn't a big deal.
338 	 */
339 	if (mwl_hal_fwload(mh, NULL) != 0) {
340 		if_printf(ifp, "unable to setup builtin firmware\n");
341 		error = EIO;
342 		goto bad1;
343 	}
344 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
345 		if_printf(ifp, "unable to fetch h/w specs\n");
346 		error = EIO;
347 		goto bad1;
348 	}
349 	error = mwl_getchannels(sc);
350 	if (error != 0)
351 		goto bad1;
352 
353 	sc->sc_txantenna = 0;		/* h/w default */
354 	sc->sc_rxantenna = 0;		/* h/w default */
355 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
356 	sc->sc_ageinterval = MWL_AGEINTERVAL;
357 
358 	/*
359 	 * Allocate tx+rx descriptors and populate the lists.
360 	 * We immediately push the information to the firmware
361 	 * as otherwise it gets upset.
362 	 */
363 	error = mwl_dma_setup(sc);
364 	if (error != 0) {
365 		if_printf(ifp, "failed to setup descriptors: %d\n", error);
366 		goto bad1;
367 	}
368 	error = mwl_setupdma(sc);	/* push to firmware */
369 	if (error != 0)			/* NB: mwl_setupdma prints msg */
370 		goto bad1;
371 
372 	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
373 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
374 
375 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
376 		taskqueue_thread_enqueue, &sc->sc_tq);
377 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
378 		"%s taskq", ifp->if_xname);
379 
380 	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
381 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
382 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
383 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
384 
385 	/* NB: insure BK queue is the lowest priority h/w queue */
386 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
387 		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
388 			ieee80211_wme_acnames[WME_AC_BK]);
389 		error = EIO;
390 		goto bad2;
391 	}
392 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
393 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
394 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
395 		/*
396 		 * Not enough hardware tx queues to properly do WME;
397 		 * just punt and assign them all to the same h/w queue.
398 		 * We could do a better job of this if, for example,
399 		 * we allocate queues when we switch from station to
400 		 * AP mode.
401 		 */
402 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
403 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
404 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
405 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
406 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
407 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
408 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
409 	}
410 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
411 
412 	ifp->if_softc = sc;
413 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
414 	ifp->if_start = mwl_start;
415 	ifp->if_ioctl = mwl_ioctl;
416 	ifp->if_init = mwl_init;
417 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
418 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
419 	IFQ_SET_READY(&ifp->if_snd);
420 
421 	ic->ic_ifp = ifp;
422 	/* XXX not right but it's not used anywhere important */
423 	ic->ic_phytype = IEEE80211_T_OFDM;
424 	ic->ic_opmode = IEEE80211_M_STA;
425 	ic->ic_caps =
426 		  IEEE80211_C_STA		/* station mode supported */
427 		| IEEE80211_C_HOSTAP		/* hostap mode */
428 		| IEEE80211_C_MONITOR		/* monitor mode */
429 #if 0
430 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
431 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
432 #endif
433 		| IEEE80211_C_MBSS		/* mesh point link mode */
434 		| IEEE80211_C_WDS		/* WDS supported */
435 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
436 		| IEEE80211_C_SHSLOT		/* short slot time supported */
437 		| IEEE80211_C_WME		/* WME/WMM supported */
438 		| IEEE80211_C_BURST		/* xmit bursting supported */
439 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
440 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
441 		| IEEE80211_C_TXFRAG		/* handle tx frags */
442 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
443 		| IEEE80211_C_DFS		/* DFS supported */
444 		;
445 
446 	ic->ic_htcaps =
447 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
448 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
449 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
450 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
451 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
452 #if MWL_AGGR_SIZE == 7935
453 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
454 #else
455 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
456 #endif
457 #if 0
458 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
459 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
460 #endif
461 		/* s/w capabilities */
462 		| IEEE80211_HTC_HT		/* HT operation */
463 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
464 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
465 		| IEEE80211_HTC_SMPS		/* SMPS available */
466 		;
467 
468 	/*
469 	 * Mark h/w crypto support.
470 	 * XXX no way to query h/w support.
471 	 */
472 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
473 			  |  IEEE80211_CRYPTO_AES_CCM
474 			  |  IEEE80211_CRYPTO_TKIP
475 			  |  IEEE80211_CRYPTO_TKIPMIC
476 			  ;
477 	/*
478 	 * Transmit requires space in the packet for a special
479 	 * format transmit record and optional padding between
480 	 * this record and the payload.  Ask the net80211 layer
481 	 * to arrange this when encapsulating packets so we can
482 	 * add it efficiently.
483 	 */
484 	ic->ic_headroom = sizeof(struct mwltxrec) -
485 		sizeof(struct ieee80211_frame);
486 
487 	/* call MI attach routine. */
488 	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
489 	ic->ic_setregdomain = mwl_setregdomain;
490 	ic->ic_getradiocaps = mwl_getradiocaps;
491 	/* override default methods */
492 	ic->ic_raw_xmit = mwl_raw_xmit;
493 	ic->ic_newassoc = mwl_newassoc;
494 	ic->ic_updateslot = mwl_updateslot;
495 	ic->ic_update_mcast = mwl_update_mcast;
496 	ic->ic_update_promisc = mwl_update_promisc;
497 	ic->ic_wme.wme_update = mwl_wme_update;
498 
499 	ic->ic_node_alloc = mwl_node_alloc;
500 	sc->sc_node_cleanup = ic->ic_node_cleanup;
501 	ic->ic_node_cleanup = mwl_node_cleanup;
502 	sc->sc_node_drain = ic->ic_node_drain;
503 	ic->ic_node_drain = mwl_node_drain;
504 	ic->ic_node_getsignal = mwl_node_getsignal;
505 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
506 
507 	ic->ic_scan_start = mwl_scan_start;
508 	ic->ic_scan_end = mwl_scan_end;
509 	ic->ic_set_channel = mwl_set_channel;
510 
511 	sc->sc_recv_action = ic->ic_recv_action;
512 	ic->ic_recv_action = mwl_recv_action;
513 	sc->sc_addba_request = ic->ic_addba_request;
514 	ic->ic_addba_request = mwl_addba_request;
515 	sc->sc_addba_response = ic->ic_addba_response;
516 	ic->ic_addba_response = mwl_addba_response;
517 	sc->sc_addba_stop = ic->ic_addba_stop;
518 	ic->ic_addba_stop = mwl_addba_stop;
519 
520 	ic->ic_vap_create = mwl_vap_create;
521 	ic->ic_vap_delete = mwl_vap_delete;
522 
523 	ieee80211_radiotap_attach(ic,
524 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
525 		MWL_TX_RADIOTAP_PRESENT,
526 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
527 		MWL_RX_RADIOTAP_PRESENT);
528 	/*
529 	 * Setup dynamic sysctl's now that country code and
530 	 * regdomain are available from the hal.
531 	 */
532 	mwl_sysctlattach(sc);
533 
534 	if (bootverbose)
535 		ieee80211_announce(ic);
536 	mwl_announce(sc);
537 	return 0;
538 bad2:
539 	mwl_dma_cleanup(sc);
540 bad1:
541 	mwl_hal_detach(mh);
542 bad:
543 	MWL_RXFREE_DESTROY(sc);
544 	if_free(ifp);
545 	sc->sc_invalid = 1;
546 	return error;
547 }
548 
549 int
550 mwl_detach(struct mwl_softc *sc)
551 {
552 	struct ifnet *ifp = sc->sc_ifp;
553 	struct ieee80211com *ic = ifp->if_l2com;
554 
555 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
556 		__func__, ifp->if_flags);
557 
558 	mwl_stop(ifp, 1);
559 	/*
560 	 * NB: the order of these is important:
561 	 * o call the 802.11 layer before detaching the hal to
562 	 *   insure callbacks into the driver to delete global
563 	 *   key cache entries can be handled
564 	 * o reclaim the tx queue data structures after calling
565 	 *   the 802.11 layer as we'll get called back to reclaim
566 	 *   node state and potentially want to use them
567 	 * o to cleanup the tx queues the hal is called, so detach
568 	 *   it last
569 	 * Other than that, it's straightforward...
570 	 */
571 	ieee80211_ifdetach(ic);
572 	callout_drain(&sc->sc_watchdog);
573 	mwl_dma_cleanup(sc);
574 	MWL_RXFREE_DESTROY(sc);
575 	mwl_tx_cleanup(sc);
576 	mwl_hal_detach(sc->sc_mh);
577 	if_free(ifp);
578 
579 	return 0;
580 }
581 
582 /*
583  * MAC address handling for multiple BSS on the same radio.
584  * The first vap uses the MAC address from the EEPROM.  For
585  * subsequent vap's we set the U/L bit (bit 1) in the MAC
586  * address and use the next six bits as an index.
587  */
588 static void
589 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
590 {
591 	int i;
592 
593 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
594 		/* NB: we only do this if h/w supports multiple bssid */
595 		for (i = 0; i < 32; i++)
596 			if ((sc->sc_bssidmask & (1<<i)) == 0)
597 				break;
598 		if (i != 0)
599 			mac[0] |= (i << 2)|0x2;
600 	} else
601 		i = 0;
602 	sc->sc_bssidmask |= 1<<i;
603 	if (i == 0)
604 		sc->sc_nbssid0++;
605 }
606 
607 static void
608 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
609 {
610 	int i = mac[0] >> 2;
611 	if (i != 0 || --sc->sc_nbssid0 == 0)
612 		sc->sc_bssidmask &= ~(1<<i);
613 }
614 
615 static struct ieee80211vap *
616 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
617     enum ieee80211_opmode opmode, int flags,
618     const uint8_t bssid[IEEE80211_ADDR_LEN],
619     const uint8_t mac0[IEEE80211_ADDR_LEN])
620 {
621 	struct ifnet *ifp = ic->ic_ifp;
622 	struct mwl_softc *sc = ifp->if_softc;
623 	struct mwl_hal *mh = sc->sc_mh;
624 	struct ieee80211vap *vap, *apvap;
625 	struct mwl_hal_vap *hvap;
626 	struct mwl_vap *mvp;
627 	uint8_t mac[IEEE80211_ADDR_LEN];
628 
629 	IEEE80211_ADDR_COPY(mac, mac0);
630 	switch (opmode) {
631 	case IEEE80211_M_HOSTAP:
632 	case IEEE80211_M_MBSS:
633 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
634 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
635 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
636 		if (hvap == NULL) {
637 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
638 				reclaim_address(sc, mac);
639 			return NULL;
640 		}
641 		break;
642 	case IEEE80211_M_STA:
643 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
644 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
645 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
646 		if (hvap == NULL) {
647 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
648 				reclaim_address(sc, mac);
649 			return NULL;
650 		}
651 		/* no h/w beacon miss support; always use s/w */
652 		flags |= IEEE80211_CLONE_NOBEACONS;
653 		break;
654 	case IEEE80211_M_WDS:
655 		hvap = NULL;		/* NB: we use associated AP vap */
656 		if (sc->sc_napvaps == 0)
657 			return NULL;	/* no existing AP vap */
658 		break;
659 	case IEEE80211_M_MONITOR:
660 		hvap = NULL;
661 		break;
662 	case IEEE80211_M_IBSS:
663 	case IEEE80211_M_AHDEMO:
664 	default:
665 		return NULL;
666 	}
667 
668 	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
669 	    M_80211_VAP, M_NOWAIT | M_ZERO);
670 	if (mvp == NULL) {
671 		if (hvap != NULL) {
672 			mwl_hal_delvap(hvap);
673 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
674 				reclaim_address(sc, mac);
675 		}
676 		/* XXX msg */
677 		return NULL;
678 	}
679 	mvp->mv_hvap = hvap;
680 	if (opmode == IEEE80211_M_WDS) {
681 		/*
682 		 * WDS vaps must have an associated AP vap; find one.
683 		 * XXX not right.
684 		 */
685 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
686 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
687 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
688 				break;
689 			}
690 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
691 	}
692 	vap = &mvp->mv_vap;
693 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
694 	if (hvap != NULL)
695 		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
696 	/* override with driver methods */
697 	mvp->mv_newstate = vap->iv_newstate;
698 	vap->iv_newstate = mwl_newstate;
699 	vap->iv_max_keyix = 0;	/* XXX */
700 	vap->iv_key_alloc = mwl_key_alloc;
701 	vap->iv_key_delete = mwl_key_delete;
702 	vap->iv_key_set = mwl_key_set;
703 #ifdef MWL_HOST_PS_SUPPORT
704 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
705 		vap->iv_update_ps = mwl_update_ps;
706 		mvp->mv_set_tim = vap->iv_set_tim;
707 		vap->iv_set_tim = mwl_set_tim;
708 	}
709 #endif
710 	vap->iv_reset = mwl_reset;
711 	vap->iv_update_beacon = mwl_beacon_update;
712 
713 	/* override max aid so sta's cannot assoc when we're out of sta id's */
714 	vap->iv_max_aid = MWL_MAXSTAID;
715 	/* override default A-MPDU rx parameters */
716 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
717 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
718 
719 	/* complete setup */
720 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
721 
722 	switch (vap->iv_opmode) {
723 	case IEEE80211_M_HOSTAP:
724 	case IEEE80211_M_MBSS:
725 	case IEEE80211_M_STA:
726 		/*
727 		 * Setup sta db entry for local address.
728 		 */
729 		mwl_localstadb(vap);
730 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
731 		    vap->iv_opmode == IEEE80211_M_MBSS)
732 			sc->sc_napvaps++;
733 		else
734 			sc->sc_nstavaps++;
735 		break;
736 	case IEEE80211_M_WDS:
737 		sc->sc_nwdsvaps++;
738 		break;
739 	default:
740 		break;
741 	}
742 	/*
743 	 * Setup overall operating mode.
744 	 */
745 	if (sc->sc_napvaps)
746 		ic->ic_opmode = IEEE80211_M_HOSTAP;
747 	else if (sc->sc_nstavaps)
748 		ic->ic_opmode = IEEE80211_M_STA;
749 	else
750 		ic->ic_opmode = opmode;
751 
752 	return vap;
753 }
754 
755 static void
756 mwl_vap_delete(struct ieee80211vap *vap)
757 {
758 	struct mwl_vap *mvp = MWL_VAP(vap);
759 	struct ifnet *parent = vap->iv_ic->ic_ifp;
760 	struct mwl_softc *sc = parent->if_softc;
761 	struct mwl_hal *mh = sc->sc_mh;
762 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
763 	enum ieee80211_opmode opmode = vap->iv_opmode;
764 
765 	/* XXX disallow ap vap delete if WDS still present */
766 	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
767 		/* quiesce h/w while we remove the vap */
768 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
769 	}
770 	ieee80211_vap_detach(vap);
771 	switch (opmode) {
772 	case IEEE80211_M_HOSTAP:
773 	case IEEE80211_M_MBSS:
774 	case IEEE80211_M_STA:
775 		KASSERT(hvap != NULL, ("no hal vap handle"));
776 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
777 		mwl_hal_delvap(hvap);
778 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
779 			sc->sc_napvaps--;
780 		else
781 			sc->sc_nstavaps--;
782 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
783 		reclaim_address(sc, vap->iv_myaddr);
784 		break;
785 	case IEEE80211_M_WDS:
786 		sc->sc_nwdsvaps--;
787 		break;
788 	default:
789 		break;
790 	}
791 	mwl_cleartxq(sc, vap);
792 	free(mvp, M_80211_VAP);
793 	if (parent->if_drv_flags & IFF_DRV_RUNNING)
794 		mwl_hal_intrset(mh, sc->sc_imask);
795 }
796 
797 void
798 mwl_suspend(struct mwl_softc *sc)
799 {
800 	struct ifnet *ifp = sc->sc_ifp;
801 
802 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
803 		__func__, ifp->if_flags);
804 
805 	mwl_stop(ifp, 1);
806 }
807 
808 void
809 mwl_resume(struct mwl_softc *sc)
810 {
811 	struct ifnet *ifp = sc->sc_ifp;
812 
813 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
814 		__func__, ifp->if_flags);
815 
816 	if (ifp->if_flags & IFF_UP)
817 		mwl_init(sc);
818 }
819 
820 void
821 mwl_shutdown(void *arg)
822 {
823 	struct mwl_softc *sc = arg;
824 
825 	mwl_stop(sc->sc_ifp, 1);
826 }
827 
828 /*
829  * Interrupt handler.  Most of the actual processing is deferred.
830  */
831 void
832 mwl_intr(void *arg)
833 {
834 	struct mwl_softc *sc = arg;
835 	struct mwl_hal *mh = sc->sc_mh;
836 	uint32_t status;
837 
838 	if (sc->sc_invalid) {
839 		/*
840 		 * The hardware is not ready/present, don't touch anything.
841 		 * Note this can happen early on if the IRQ is shared.
842 		 */
843 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
844 		return;
845 	}
846 	/*
847 	 * Figure out the reason(s) for the interrupt.
848 	 */
849 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
850 	if (status == 0)			/* must be a shared irq */
851 		return;
852 
853 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
854 	    __func__, status, sc->sc_imask);
855 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
856 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
857 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
858 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
859 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
860 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
861 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
862 		mwl_hal_cmddone(mh);
863 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
864 		;
865 	}
866 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
867 		/* TKIP ICV error */
868 		sc->sc_stats.mst_rx_badtkipicv++;
869 	}
870 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
871 		/* 11n aggregation queue is empty, re-fill */
872 		;
873 	}
874 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
875 		;
876 	}
877 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
878 		/* radar detected, process event */
879 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
880 	}
881 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
882 		/* DFS channel switch */
883 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
884 	}
885 }
886 
887 static void
888 mwl_radar_proc(void *arg, int pending)
889 {
890 	struct mwl_softc *sc = arg;
891 	struct ifnet *ifp = sc->sc_ifp;
892 	struct ieee80211com *ic = ifp->if_l2com;
893 
894 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
895 	    __func__, pending);
896 
897 	sc->sc_stats.mst_radardetect++;
898 	/* XXX stop h/w BA streams? */
899 
900 	IEEE80211_LOCK(ic);
901 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
902 	IEEE80211_UNLOCK(ic);
903 }
904 
905 static void
906 mwl_chanswitch_proc(void *arg, int pending)
907 {
908 	struct mwl_softc *sc = arg;
909 	struct ifnet *ifp = sc->sc_ifp;
910 	struct ieee80211com *ic = ifp->if_l2com;
911 
912 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
913 	    __func__, pending);
914 
915 	IEEE80211_LOCK(ic);
916 	sc->sc_csapending = 0;
917 	ieee80211_csa_completeswitch(ic);
918 	IEEE80211_UNLOCK(ic);
919 }
920 
921 static void
922 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
923 {
924 	struct ieee80211_node *ni = sp->data[0];
925 
926 	/* send DELBA and drop the stream */
927 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
928 }
929 
930 static void
931 mwl_bawatchdog_proc(void *arg, int pending)
932 {
933 	struct mwl_softc *sc = arg;
934 	struct mwl_hal *mh = sc->sc_mh;
935 	const MWL_HAL_BASTREAM *sp;
936 	uint8_t bitmap, n;
937 
938 	sc->sc_stats.mst_bawatchdog++;
939 
940 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
941 		DPRINTF(sc, MWL_DEBUG_AMPDU,
942 		    "%s: could not get bitmap\n", __func__);
943 		sc->sc_stats.mst_bawatchdog_failed++;
944 		return;
945 	}
946 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
947 	if (bitmap == 0xff) {
948 		n = 0;
949 		/* disable all ba streams */
950 		for (bitmap = 0; bitmap < 8; bitmap++) {
951 			sp = mwl_hal_bastream_lookup(mh, bitmap);
952 			if (sp != NULL) {
953 				mwl_bawatchdog(sp);
954 				n++;
955 			}
956 		}
957 		if (n == 0) {
958 			DPRINTF(sc, MWL_DEBUG_AMPDU,
959 			    "%s: no BA streams found\n", __func__);
960 			sc->sc_stats.mst_bawatchdog_empty++;
961 		}
962 	} else if (bitmap != 0xaa) {
963 		/* disable a single ba stream */
964 		sp = mwl_hal_bastream_lookup(mh, bitmap);
965 		if (sp != NULL) {
966 			mwl_bawatchdog(sp);
967 		} else {
968 			DPRINTF(sc, MWL_DEBUG_AMPDU,
969 			    "%s: no BA stream %d\n", __func__, bitmap);
970 			sc->sc_stats.mst_bawatchdog_notfound++;
971 		}
972 	}
973 }
974 
975 /*
976  * Convert net80211 channel to a HAL channel.
977  */
978 static void
979 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
980 {
981 	hc->channel = chan->ic_ieee;
982 
983 	*(uint32_t *)&hc->channelFlags = 0;
984 	if (IEEE80211_IS_CHAN_2GHZ(chan))
985 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
986 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
987 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
988 	if (IEEE80211_IS_CHAN_HT40(chan)) {
989 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
990 		if (IEEE80211_IS_CHAN_HT40U(chan))
991 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
992 		else
993 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
994 	} else
995 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
996 	/* XXX 10MHz channels */
997 }
998 
999 /*
1000  * Inform firmware of our tx/rx dma setup.  The BAR 0
1001  * writes below are for compatibility with older firmware.
1002  * For current firmware we send this information with a
1003  * cmd block via mwl_hal_sethwdma.
1004  */
1005 static int
1006 mwl_setupdma(struct mwl_softc *sc)
1007 {
1008 	int error, i;
1009 
1010 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1011 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1012 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1013 
1014 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1015 		struct mwl_txq *txq = &sc->sc_txq[i];
1016 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1017 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1018 	}
1019 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1020 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1021 
1022 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1023 	if (error != 0) {
1024 		device_printf(sc->sc_dev,
1025 		    "unable to setup tx/rx dma; hal status %u\n", error);
1026 		/* XXX */
1027 	}
1028 	return error;
1029 }
1030 
1031 /*
1032  * Inform firmware of tx rate parameters.
1033  * Called after a channel change.
1034  */
1035 static int
1036 mwl_setcurchanrates(struct mwl_softc *sc)
1037 {
1038 	struct ifnet *ifp = sc->sc_ifp;
1039 	struct ieee80211com *ic = ifp->if_l2com;
1040 	const struct ieee80211_rateset *rs;
1041 	MWL_HAL_TXRATE rates;
1042 
1043 	memset(&rates, 0, sizeof(rates));
1044 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1045 	/* rate used to send management frames */
1046 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1047 	/* rate used to send multicast frames */
1048 	rates.McastRate = rates.MgtRate;
1049 
1050 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1051 }
1052 
1053 /*
1054  * Inform firmware of tx rate parameters.  Called whenever
1055  * user-settable params change and after a channel change.
1056  */
1057 static int
1058 mwl_setrates(struct ieee80211vap *vap)
1059 {
1060 	struct mwl_vap *mvp = MWL_VAP(vap);
1061 	struct ieee80211_node *ni = vap->iv_bss;
1062 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1063 	MWL_HAL_TXRATE rates;
1064 
1065 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1066 
1067 	/*
1068 	 * Update the h/w rate map.
1069 	 * NB: 0x80 for MCS is passed through unchanged
1070 	 */
1071 	memset(&rates, 0, sizeof(rates));
1072 	/* rate used to send management frames */
1073 	rates.MgtRate = tp->mgmtrate;
1074 	/* rate used to send multicast frames */
1075 	rates.McastRate = tp->mcastrate;
1076 
1077 	/* while here calculate EAPOL fixed rate cookie */
1078 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1079 
1080 	return mwl_hal_settxrate(mvp->mv_hvap,
1081 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1082 		RATE_FIXED : RATE_AUTO, &rates);
1083 }
1084 
1085 /*
1086  * Setup a fixed xmit rate cookie for EAPOL frames.
1087  */
1088 static void
1089 mwl_seteapolformat(struct ieee80211vap *vap)
1090 {
1091 	struct mwl_vap *mvp = MWL_VAP(vap);
1092 	struct ieee80211_node *ni = vap->iv_bss;
1093 	enum ieee80211_phymode mode;
1094 	uint8_t rate;
1095 
1096 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1097 
1098 	mode = ieee80211_chan2mode(ni->ni_chan);
1099 	/*
1100 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1101 	 * NB: this may violate POLA for sta and wds vap's.
1102 	 */
1103 	if (mode == IEEE80211_MODE_11NA &&
1104 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1105 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1106 	else if (mode == IEEE80211_MODE_11NG &&
1107 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1108 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1109 	else
1110 		rate = vap->iv_txparms[mode].mgmtrate;
1111 
1112 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1113 }
1114 
1115 /*
1116  * Map SKU+country code to region code for radar bin'ing.
1117  */
1118 static int
1119 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1120 {
1121 	switch (rd->regdomain) {
1122 	case SKU_FCC:
1123 	case SKU_FCC3:
1124 		return DOMAIN_CODE_FCC;
1125 	case SKU_CA:
1126 		return DOMAIN_CODE_IC;
1127 	case SKU_ETSI:
1128 	case SKU_ETSI2:
1129 	case SKU_ETSI3:
1130 		if (rd->country == CTRY_SPAIN)
1131 			return DOMAIN_CODE_SPAIN;
1132 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1133 			return DOMAIN_CODE_FRANCE;
1134 		/* XXX force 1.3.1 radar type */
1135 		return DOMAIN_CODE_ETSI_131;
1136 	case SKU_JAPAN:
1137 		return DOMAIN_CODE_MKK;
1138 	case SKU_ROW:
1139 		return DOMAIN_CODE_DGT;	/* Taiwan */
1140 	case SKU_APAC:
1141 	case SKU_APAC2:
1142 	case SKU_APAC3:
1143 		return DOMAIN_CODE_AUS;	/* Australia */
1144 	}
1145 	/* XXX KOREA? */
1146 	return DOMAIN_CODE_FCC;			/* XXX? */
1147 }
1148 
1149 static int
1150 mwl_hal_reset(struct mwl_softc *sc)
1151 {
1152 	struct ifnet *ifp = sc->sc_ifp;
1153 	struct ieee80211com *ic = ifp->if_l2com;
1154 	struct mwl_hal *mh = sc->sc_mh;
1155 
1156 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1157 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1158 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1159 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1160 	mwl_chan_set(sc, ic->ic_curchan);
1161 	/* NB: RF/RA performance tuned for indoor mode */
1162 	mwl_hal_setrateadaptmode(mh, 0);
1163 	mwl_hal_setoptimizationlevel(mh,
1164 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1165 
1166 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1167 
1168 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1169 	mwl_hal_setcfend(mh, 0);			/* XXX */
1170 
1171 	return 1;
1172 }
1173 
1174 static int
1175 mwl_init_locked(struct mwl_softc *sc)
1176 {
1177 	struct ifnet *ifp = sc->sc_ifp;
1178 	struct mwl_hal *mh = sc->sc_mh;
1179 	int error = 0;
1180 
1181 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1182 		__func__, ifp->if_flags);
1183 
1184 	MWL_LOCK_ASSERT(sc);
1185 
1186 	/*
1187 	 * Stop anything previously setup.  This is safe
1188 	 * whether this is the first time through or not.
1189 	 */
1190 	mwl_stop_locked(ifp, 0);
1191 
1192 	/*
1193 	 * Push vap-independent state to the firmware.
1194 	 */
1195 	if (!mwl_hal_reset(sc)) {
1196 		if_printf(ifp, "unable to reset hardware\n");
1197 		return EIO;
1198 	}
1199 
1200 	/*
1201 	 * Setup recv (once); transmit is already good to go.
1202 	 */
1203 	error = mwl_startrecv(sc);
1204 	if (error != 0) {
1205 		if_printf(ifp, "unable to start recv logic\n");
1206 		return error;
1207 	}
1208 
1209 	/*
1210 	 * Enable interrupts.
1211 	 */
1212 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1213 		     | MACREG_A2HRIC_BIT_TX_DONE
1214 		     | MACREG_A2HRIC_BIT_OPC_DONE
1215 #if 0
1216 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1217 #endif
1218 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1219 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1220 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1221 #if 0
1222 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1223 #endif
1224 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1225 		     | MACREQ_A2HRIC_BIT_TX_ACK
1226 		     ;
1227 
1228 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1229 	mwl_hal_intrset(mh, sc->sc_imask);
1230 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1231 
1232 	return 0;
1233 }
1234 
1235 static void
1236 mwl_init(void *arg)
1237 {
1238 	struct mwl_softc *sc = arg;
1239 	struct ifnet *ifp = sc->sc_ifp;
1240 	struct ieee80211com *ic = ifp->if_l2com;
1241 	int error = 0;
1242 
1243 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1244 		__func__, ifp->if_flags);
1245 
1246 	MWL_LOCK(sc);
1247 	error = mwl_init_locked(sc);
1248 	MWL_UNLOCK(sc);
1249 
1250 	if (error == 0)
1251 		ieee80211_start_all(ic);	/* start all vap's */
1252 }
1253 
1254 static void
1255 mwl_stop_locked(struct ifnet *ifp, int disable)
1256 {
1257 	struct mwl_softc *sc = ifp->if_softc;
1258 
1259 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1260 		__func__, sc->sc_invalid, ifp->if_flags);
1261 
1262 	MWL_LOCK_ASSERT(sc);
1263 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1264 		/*
1265 		 * Shutdown the hardware and driver.
1266 		 */
1267 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1268 		callout_stop(&sc->sc_watchdog);
1269 		sc->sc_tx_timer = 0;
1270 		mwl_draintxq(sc);
1271 	}
1272 }
1273 
1274 static void
1275 mwl_stop(struct ifnet *ifp, int disable)
1276 {
1277 	struct mwl_softc *sc = ifp->if_softc;
1278 
1279 	MWL_LOCK(sc);
1280 	mwl_stop_locked(ifp, disable);
1281 	MWL_UNLOCK(sc);
1282 }
1283 
1284 static int
1285 mwl_reset_vap(struct ieee80211vap *vap, int state)
1286 {
1287 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1288 	struct ieee80211com *ic = vap->iv_ic;
1289 
1290 	if (state == IEEE80211_S_RUN)
1291 		mwl_setrates(vap);
1292 	/* XXX off by 1? */
1293 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1294 	/* XXX auto? 20/40 split? */
1295 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1296 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1297 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1298 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1299 	/* XXX txpower cap */
1300 
1301 	/* re-setup beacons */
1302 	if (state == IEEE80211_S_RUN &&
1303 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1304 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1305 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1306 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1307 		mwl_hal_setnprotmode(hvap,
1308 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1309 		return mwl_beacon_setup(vap);
1310 	}
1311 	return 0;
1312 }
1313 
1314 /*
1315  * Reset the hardware w/o losing operational state.
1316  * Used to to reset or reload hardware state for a vap.
1317  */
1318 static int
1319 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1320 {
1321 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1322 	int error = 0;
1323 
1324 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1325 		struct ieee80211com *ic = vap->iv_ic;
1326 		struct ifnet *ifp = ic->ic_ifp;
1327 		struct mwl_softc *sc = ifp->if_softc;
1328 		struct mwl_hal *mh = sc->sc_mh;
1329 
1330 		/* XXX handle DWDS sta vap change */
1331 		/* XXX do we need to disable interrupts? */
1332 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1333 		error = mwl_reset_vap(vap, vap->iv_state);
1334 		mwl_hal_intrset(mh, sc->sc_imask);
1335 	}
1336 	return error;
1337 }
1338 
1339 /*
1340  * Allocate a tx buffer for sending a frame.  The
1341  * packet is assumed to have the WME AC stored so
1342  * we can use it to select the appropriate h/w queue.
1343  */
1344 static struct mwl_txbuf *
1345 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1346 {
1347 	struct mwl_txbuf *bf;
1348 
1349 	/*
1350 	 * Grab a TX buffer and associated resources.
1351 	 */
1352 	MWL_TXQ_LOCK(txq);
1353 	bf = STAILQ_FIRST(&txq->free);
1354 	if (bf != NULL) {
1355 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1356 		txq->nfree--;
1357 	}
1358 	MWL_TXQ_UNLOCK(txq);
1359 	if (bf == NULL)
1360 		DPRINTF(sc, MWL_DEBUG_XMIT,
1361 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1362 	return bf;
1363 }
1364 
1365 /*
1366  * Return a tx buffer to the queue it came from.  Note there
1367  * are two cases because we must preserve the order of buffers
1368  * as it reflects the fixed order of descriptors in memory
1369  * (the firmware pre-fetches descriptors so we cannot reorder).
1370  */
1371 static void
1372 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1373 {
1374 	bf->bf_m = NULL;
1375 	bf->bf_node = NULL;
1376 	MWL_TXQ_LOCK(txq);
1377 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1378 	txq->nfree++;
1379 	MWL_TXQ_UNLOCK(txq);
1380 }
1381 
1382 static void
1383 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1384 {
1385 	bf->bf_m = NULL;
1386 	bf->bf_node = NULL;
1387 	MWL_TXQ_LOCK(txq);
1388 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1389 	txq->nfree++;
1390 	MWL_TXQ_UNLOCK(txq);
1391 }
1392 
1393 static void
1394 mwl_start(struct ifnet *ifp)
1395 {
1396 	struct mwl_softc *sc = ifp->if_softc;
1397 	struct ieee80211_node *ni;
1398 	struct mwl_txbuf *bf;
1399 	struct mbuf *m;
1400 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1401 	int nqueued;
1402 
1403 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1404 		return;
1405 	nqueued = 0;
1406 	for (;;) {
1407 		bf = NULL;
1408 		IFQ_DEQUEUE(&ifp->if_snd, m);
1409 		if (m == NULL)
1410 			break;
1411 		/*
1412 		 * Grab the node for the destination.
1413 		 */
1414 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1415 		KASSERT(ni != NULL, ("no node"));
1416 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1417 		/*
1418 		 * Grab a TX buffer and associated resources.
1419 		 * We honor the classification by the 802.11 layer.
1420 		 */
1421 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1422 		bf = mwl_gettxbuf(sc, txq);
1423 		if (bf == NULL) {
1424 			m_freem(m);
1425 			ieee80211_free_node(ni);
1426 #ifdef MWL_TX_NODROP
1427 			sc->sc_stats.mst_tx_qstop++;
1428 			/* XXX blocks other traffic */
1429 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1430 			break;
1431 #else
1432 			DPRINTF(sc, MWL_DEBUG_XMIT,
1433 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1434 			sc->sc_stats.mst_tx_qdrop++;
1435 			continue;
1436 #endif /* MWL_TX_NODROP */
1437 		}
1438 
1439 		/*
1440 		 * Pass the frame to the h/w for transmission.
1441 		 */
1442 		if (mwl_tx_start(sc, ni, bf, m)) {
1443 			ifp->if_oerrors++;
1444 			mwl_puttxbuf_head(txq, bf);
1445 			ieee80211_free_node(ni);
1446 			continue;
1447 		}
1448 		nqueued++;
1449 		if (nqueued >= mwl_txcoalesce) {
1450 			/*
1451 			 * Poke the firmware to process queued frames;
1452 			 * see below about (lack of) locking.
1453 			 */
1454 			nqueued = 0;
1455 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1456 		}
1457 	}
1458 	if (nqueued) {
1459 		/*
1460 		 * NB: We don't need to lock against tx done because
1461 		 * this just prods the firmware to check the transmit
1462 		 * descriptors.  The firmware will also start fetching
1463 		 * descriptors by itself if it notices new ones are
1464 		 * present when it goes to deliver a tx done interrupt
1465 		 * to the host. So if we race with tx done processing
1466 		 * it's ok.  Delivering the kick here rather than in
1467 		 * mwl_tx_start is an optimization to avoid poking the
1468 		 * firmware for each packet.
1469 		 *
1470 		 * NB: the queue id isn't used so 0 is ok.
1471 		 */
1472 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1473 	}
1474 }
1475 
1476 static int
1477 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1478 	const struct ieee80211_bpf_params *params)
1479 {
1480 	struct ieee80211com *ic = ni->ni_ic;
1481 	struct ifnet *ifp = ic->ic_ifp;
1482 	struct mwl_softc *sc = ifp->if_softc;
1483 	struct mwl_txbuf *bf;
1484 	struct mwl_txq *txq;
1485 
1486 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1487 		ieee80211_free_node(ni);
1488 		m_freem(m);
1489 		return ENETDOWN;
1490 	}
1491 	/*
1492 	 * Grab a TX buffer and associated resources.
1493 	 * Note that we depend on the classification
1494 	 * by the 802.11 layer to get to the right h/w
1495 	 * queue.  Management frames must ALWAYS go on
1496 	 * queue 1 but we cannot just force that here
1497 	 * because we may receive non-mgt frames.
1498 	 */
1499 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1500 	bf = mwl_gettxbuf(sc, txq);
1501 	if (bf == NULL) {
1502 		sc->sc_stats.mst_tx_qstop++;
1503 		/* XXX blocks other traffic */
1504 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1505 		ieee80211_free_node(ni);
1506 		m_freem(m);
1507 		return ENOBUFS;
1508 	}
1509 	/*
1510 	 * Pass the frame to the h/w for transmission.
1511 	 */
1512 	if (mwl_tx_start(sc, ni, bf, m)) {
1513 		ifp->if_oerrors++;
1514 		mwl_puttxbuf_head(txq, bf);
1515 
1516 		ieee80211_free_node(ni);
1517 		return EIO;		/* XXX */
1518 	}
1519 	/*
1520 	 * NB: We don't need to lock against tx done because
1521 	 * this just prods the firmware to check the transmit
1522 	 * descriptors.  The firmware will also start fetching
1523 	 * descriptors by itself if it notices new ones are
1524 	 * present when it goes to deliver a tx done interrupt
1525 	 * to the host. So if we race with tx done processing
1526 	 * it's ok.  Delivering the kick here rather than in
1527 	 * mwl_tx_start is an optimization to avoid poking the
1528 	 * firmware for each packet.
1529 	 *
1530 	 * NB: the queue id isn't used so 0 is ok.
1531 	 */
1532 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1533 	return 0;
1534 }
1535 
1536 static int
1537 mwl_media_change(struct ifnet *ifp)
1538 {
1539 	struct ieee80211vap *vap = ifp->if_softc;
1540 	int error;
1541 
1542 	error = ieee80211_media_change(ifp);
1543 	/* NB: only the fixed rate can change and that doesn't need a reset */
1544 	if (error == ENETRESET) {
1545 		mwl_setrates(vap);
1546 		error = 0;
1547 	}
1548 	return error;
1549 }
1550 
1551 #ifdef MWL_DEBUG
1552 static void
1553 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1554 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1555 {
1556 	static const char *ciphers[] = {
1557 		"WEP",
1558 		"TKIP",
1559 		"AES-CCM",
1560 	};
1561 	int i, n;
1562 
1563 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1564 	for (i = 0, n = hk->keyLen; i < n; i++)
1565 		printf(" %02x", hk->key.aes[i]);
1566 	printf(" mac %s", ether_sprintf(mac));
1567 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1568 		printf(" %s", "rxmic");
1569 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1570 			printf(" %02x", hk->key.tkip.rxMic[i]);
1571 		printf(" txmic");
1572 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1573 			printf(" %02x", hk->key.tkip.txMic[i]);
1574 	}
1575 	printf(" flags 0x%x\n", hk->keyFlags);
1576 }
1577 #endif
1578 
1579 /*
1580  * Allocate a key cache slot for a unicast key.  The
1581  * firmware handles key allocation and every station is
1582  * guaranteed key space so we are always successful.
1583  */
1584 static int
1585 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1586 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1587 {
1588 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1589 
1590 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1591 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1592 		if (!(&vap->iv_nw_keys[0] <= k &&
1593 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1594 			/* should not happen */
1595 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1596 				"%s: bogus group key\n", __func__);
1597 			return 0;
1598 		}
1599 		/* give the caller what they requested */
1600 		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1601 	} else {
1602 		/*
1603 		 * Firmware handles key allocation.
1604 		 */
1605 		*keyix = *rxkeyix = 0;
1606 	}
1607 	return 1;
1608 }
1609 
1610 /*
1611  * Delete a key entry allocated by mwl_key_alloc.
1612  */
1613 static int
1614 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1615 {
1616 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1617 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1618 	MWL_HAL_KEYVAL hk;
1619 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1620 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1621 
1622 	if (hvap == NULL) {
1623 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1624 			/* XXX monitor mode? */
1625 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1626 			    "%s: no hvap for opmode %d\n", __func__,
1627 			    vap->iv_opmode);
1628 			return 0;
1629 		}
1630 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1631 	}
1632 
1633 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1634 	    __func__, k->wk_keyix);
1635 
1636 	memset(&hk, 0, sizeof(hk));
1637 	hk.keyIndex = k->wk_keyix;
1638 	switch (k->wk_cipher->ic_cipher) {
1639 	case IEEE80211_CIPHER_WEP:
1640 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1641 		break;
1642 	case IEEE80211_CIPHER_TKIP:
1643 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1644 		break;
1645 	case IEEE80211_CIPHER_AES_CCM:
1646 		hk.keyTypeId = KEY_TYPE_ID_AES;
1647 		break;
1648 	default:
1649 		/* XXX should not happen */
1650 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1651 		    __func__, k->wk_cipher->ic_cipher);
1652 		return 0;
1653 	}
1654 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1655 }
1656 
1657 static __inline int
1658 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1659 {
1660 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1661 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1662 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1663 		if (k->wk_flags & IEEE80211_KEY_RECV)
1664 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1665 		return 1;
1666 	} else
1667 		return 0;
1668 }
1669 
1670 /*
1671  * Set the key cache contents for the specified key.  Key cache
1672  * slot(s) must already have been allocated by mwl_key_alloc.
1673  */
1674 static int
1675 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1676 	const uint8_t mac[IEEE80211_ADDR_LEN])
1677 {
1678 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1679 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1680 #define	IEEE80211_IS_STATICKEY(k) \
1681 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1682 	 (GRPXMIT|IEEE80211_KEY_RECV))
1683 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1684 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1685 	const struct ieee80211_cipher *cip = k->wk_cipher;
1686 	const uint8_t *macaddr;
1687 	MWL_HAL_KEYVAL hk;
1688 
1689 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1690 		("s/w crypto set?"));
1691 
1692 	if (hvap == NULL) {
1693 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1694 			/* XXX monitor mode? */
1695 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1696 			    "%s: no hvap for opmode %d\n", __func__,
1697 			    vap->iv_opmode);
1698 			return 0;
1699 		}
1700 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1701 	}
1702 	memset(&hk, 0, sizeof(hk));
1703 	hk.keyIndex = k->wk_keyix;
1704 	switch (cip->ic_cipher) {
1705 	case IEEE80211_CIPHER_WEP:
1706 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1707 		hk.keyLen = k->wk_keylen;
1708 		if (k->wk_keyix == vap->iv_def_txkey)
1709 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1710 		if (!IEEE80211_IS_STATICKEY(k)) {
1711 			/* NB: WEP is never used for the PTK */
1712 			(void) addgroupflags(&hk, k);
1713 		}
1714 		break;
1715 	case IEEE80211_CIPHER_TKIP:
1716 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1717 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1718 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1719 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1720 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1721 		if (!addgroupflags(&hk, k))
1722 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1723 		break;
1724 	case IEEE80211_CIPHER_AES_CCM:
1725 		hk.keyTypeId = KEY_TYPE_ID_AES;
1726 		hk.keyLen = k->wk_keylen;
1727 		if (!addgroupflags(&hk, k))
1728 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1729 		break;
1730 	default:
1731 		/* XXX should not happen */
1732 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1733 		    __func__, k->wk_cipher->ic_cipher);
1734 		return 0;
1735 	}
1736 	/*
1737 	 * NB: tkip mic keys get copied here too; the layout
1738 	 *     just happens to match that in ieee80211_key.
1739 	 */
1740 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1741 
1742 	/*
1743 	 * Locate address of sta db entry for writing key;
1744 	 * the convention unfortunately is somewhat different
1745 	 * than how net80211, hostapd, and wpa_supplicant think.
1746 	 */
1747 	if (vap->iv_opmode == IEEE80211_M_STA) {
1748 		/*
1749 		 * NB: keys plumbed before the sta reaches AUTH state
1750 		 * will be discarded or written to the wrong sta db
1751 		 * entry because iv_bss is meaningless.  This is ok
1752 		 * (right now) because we handle deferred plumbing of
1753 		 * WEP keys when the sta reaches AUTH state.
1754 		 */
1755 		macaddr = vap->iv_bss->ni_bssid;
1756 		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1757 			/* XXX plumb to local sta db too for static key wep */
1758 			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1759 		}
1760 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1761 	    vap->iv_state != IEEE80211_S_RUN) {
1762 		/*
1763 		 * Prior to RUN state a WDS vap will not it's BSS node
1764 		 * setup so we will plumb the key to the wrong mac
1765 		 * address (it'll be our local address).  Workaround
1766 		 * this for the moment by grabbing the correct address.
1767 		 */
1768 		macaddr = vap->iv_des_bssid;
1769 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1770 		macaddr = vap->iv_myaddr;
1771 	else
1772 		macaddr = mac;
1773 	KEYPRINTF(sc, &hk, macaddr);
1774 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1775 #undef IEEE80211_IS_STATICKEY
1776 #undef GRPXMIT
1777 }
1778 
1779 /* unaligned little endian access */
1780 #define LE_READ_2(p)				\
1781 	((uint16_t)				\
1782 	 ((((const uint8_t *)(p))[0]      ) |	\
1783 	  (((const uint8_t *)(p))[1] <<  8)))
1784 #define LE_READ_4(p)				\
1785 	((uint32_t)				\
1786 	 ((((const uint8_t *)(p))[0]      ) |	\
1787 	  (((const uint8_t *)(p))[1] <<  8) |	\
1788 	  (((const uint8_t *)(p))[2] << 16) |	\
1789 	  (((const uint8_t *)(p))[3] << 24)))
1790 
1791 /*
1792  * Set the multicast filter contents into the hardware.
1793  * XXX f/w has no support; just defer to the os.
1794  */
1795 static void
1796 mwl_setmcastfilter(struct mwl_softc *sc)
1797 {
1798 	struct ifnet *ifp = sc->sc_ifp;
1799 #if 0
1800 	struct ether_multi *enm;
1801 	struct ether_multistep estep;
1802 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1803 	uint8_t *mp;
1804 	int nmc;
1805 
1806 	mp = macs;
1807 	nmc = 0;
1808 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1809 	while (enm != NULL) {
1810 		/* XXX Punt on ranges. */
1811 		if (nmc == MWL_HAL_MCAST_MAX ||
1812 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1813 			ifp->if_flags |= IFF_ALLMULTI;
1814 			return;
1815 		}
1816 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1817 		mp += IEEE80211_ADDR_LEN, nmc++;
1818 		ETHER_NEXT_MULTI(estep, enm);
1819 	}
1820 	ifp->if_flags &= ~IFF_ALLMULTI;
1821 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1822 #else
1823 	/* XXX no mcast filter support; we get everything */
1824 	ifp->if_flags |= IFF_ALLMULTI;
1825 #endif
1826 }
1827 
1828 static int
1829 mwl_mode_init(struct mwl_softc *sc)
1830 {
1831 	struct ifnet *ifp = sc->sc_ifp;
1832 	struct ieee80211com *ic = ifp->if_l2com;
1833 	struct mwl_hal *mh = sc->sc_mh;
1834 
1835 	/*
1836 	 * NB: Ignore promisc in hostap mode; it's set by the
1837 	 * bridge.  This is wrong but we have no way to
1838 	 * identify internal requests (from the bridge)
1839 	 * versus external requests such as for tcpdump.
1840 	 */
1841 	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1842 	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1843 	mwl_setmcastfilter(sc);
1844 
1845 	return 0;
1846 }
1847 
1848 /*
1849  * Callback from the 802.11 layer after a multicast state change.
1850  */
1851 static void
1852 mwl_update_mcast(struct ifnet *ifp)
1853 {
1854 	struct mwl_softc *sc = ifp->if_softc;
1855 
1856 	mwl_setmcastfilter(sc);
1857 }
1858 
1859 /*
1860  * Callback from the 802.11 layer after a promiscuous mode change.
1861  * Note this interface does not check the operating mode as this
1862  * is an internal callback and we are expected to honor the current
1863  * state (e.g. this is used for setting the interface in promiscuous
1864  * mode when operating in hostap mode to do ACS).
1865  */
1866 static void
1867 mwl_update_promisc(struct ifnet *ifp)
1868 {
1869 	struct mwl_softc *sc = ifp->if_softc;
1870 
1871 	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1872 }
1873 
1874 /*
1875  * Callback from the 802.11 layer to update the slot time
1876  * based on the current setting.  We use it to notify the
1877  * firmware of ERP changes and the f/w takes care of things
1878  * like slot time and preamble.
1879  */
1880 static void
1881 mwl_updateslot(struct ifnet *ifp)
1882 {
1883 	struct mwl_softc *sc = ifp->if_softc;
1884 	struct ieee80211com *ic = ifp->if_l2com;
1885 	struct mwl_hal *mh = sc->sc_mh;
1886 	int prot;
1887 
1888 	/* NB: can be called early; suppress needless cmds */
1889 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1890 		return;
1891 
1892 	/*
1893 	 * Calculate the ERP flags.  The firwmare will use
1894 	 * this to carry out the appropriate measures.
1895 	 */
1896 	prot = 0;
1897 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1898 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1899 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1900 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1901 			prot |= IEEE80211_ERP_USE_PROTECTION;
1902 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1903 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1904 	}
1905 
1906 	DPRINTF(sc, MWL_DEBUG_RESET,
1907 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1908 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1909 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1910 	    ic->ic_flags);
1911 
1912 	mwl_hal_setgprot(mh, prot);
1913 }
1914 
1915 /*
1916  * Setup the beacon frame.
1917  */
1918 static int
1919 mwl_beacon_setup(struct ieee80211vap *vap)
1920 {
1921 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1922 	struct ieee80211_node *ni = vap->iv_bss;
1923 	struct ieee80211_beacon_offsets bo;
1924 	struct mbuf *m;
1925 
1926 	m = ieee80211_beacon_alloc(ni, &bo);
1927 	if (m == NULL)
1928 		return ENOBUFS;
1929 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1930 	m_free(m);
1931 
1932 	return 0;
1933 }
1934 
1935 /*
1936  * Update the beacon frame in response to a change.
1937  */
1938 static void
1939 mwl_beacon_update(struct ieee80211vap *vap, int item)
1940 {
1941 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1942 	struct ieee80211com *ic = vap->iv_ic;
1943 
1944 	KASSERT(hvap != NULL, ("no beacon"));
1945 	switch (item) {
1946 	case IEEE80211_BEACON_ERP:
1947 		mwl_updateslot(ic->ic_ifp);
1948 		break;
1949 	case IEEE80211_BEACON_HTINFO:
1950 		mwl_hal_setnprotmode(hvap,
1951 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1952 		break;
1953 	case IEEE80211_BEACON_CAPS:
1954 	case IEEE80211_BEACON_WME:
1955 	case IEEE80211_BEACON_APPIE:
1956 	case IEEE80211_BEACON_CSA:
1957 		break;
1958 	case IEEE80211_BEACON_TIM:
1959 		/* NB: firmware always forms TIM */
1960 		return;
1961 	}
1962 	/* XXX retain beacon frame and update */
1963 	mwl_beacon_setup(vap);
1964 }
1965 
1966 static void
1967 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1968 {
1969 	bus_addr_t *paddr = (bus_addr_t*) arg;
1970 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1971 	*paddr = segs->ds_addr;
1972 }
1973 
1974 #ifdef MWL_HOST_PS_SUPPORT
1975 /*
1976  * Handle power save station occupancy changes.
1977  */
1978 static void
1979 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1980 {
1981 	struct mwl_vap *mvp = MWL_VAP(vap);
1982 
1983 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1984 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1985 	mvp->mv_last_ps_sta = nsta;
1986 }
1987 
1988 /*
1989  * Handle associated station power save state changes.
1990  */
1991 static int
1992 mwl_set_tim(struct ieee80211_node *ni, int set)
1993 {
1994 	struct ieee80211vap *vap = ni->ni_vap;
1995 	struct mwl_vap *mvp = MWL_VAP(vap);
1996 
1997 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1998 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1999 		    IEEE80211_AID(ni->ni_associd), set);
2000 		return 1;
2001 	} else
2002 		return 0;
2003 }
2004 #endif /* MWL_HOST_PS_SUPPORT */
2005 
2006 static int
2007 mwl_desc_setup(struct mwl_softc *sc, const char *name,
2008 	struct mwl_descdma *dd,
2009 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
2010 {
2011 	struct ifnet *ifp = sc->sc_ifp;
2012 	uint8_t *ds;
2013 	int error;
2014 
2015 	DPRINTF(sc, MWL_DEBUG_RESET,
2016 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2017 	    __func__, name, nbuf, (uintmax_t) bufsize,
2018 	    ndesc, (uintmax_t) descsize);
2019 
2020 	dd->dd_name = name;
2021 	dd->dd_desc_len = nbuf * ndesc * descsize;
2022 
2023 	/*
2024 	 * Setup DMA descriptor area.
2025 	 */
2026 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2027 		       PAGE_SIZE, 0,		/* alignment, bounds */
2028 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2029 		       BUS_SPACE_MAXADDR,	/* highaddr */
2030 		       NULL, NULL,		/* filter, filterarg */
2031 		       dd->dd_desc_len,		/* maxsize */
2032 		       1,			/* nsegments */
2033 		       dd->dd_desc_len,		/* maxsegsize */
2034 		       BUS_DMA_ALLOCNOW,	/* flags */
2035 		       NULL,			/* lockfunc */
2036 		       NULL,			/* lockarg */
2037 		       &dd->dd_dmat);
2038 	if (error != 0) {
2039 		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2040 		return error;
2041 	}
2042 
2043 	/* allocate descriptors */
2044 	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2045 	if (error != 0) {
2046 		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2047 			"error %u\n", dd->dd_name, error);
2048 		goto fail0;
2049 	}
2050 
2051 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2052 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2053 				 &dd->dd_dmamap);
2054 	if (error != 0) {
2055 		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2056 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2057 		goto fail1;
2058 	}
2059 
2060 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2061 				dd->dd_desc, dd->dd_desc_len,
2062 				mwl_load_cb, &dd->dd_desc_paddr,
2063 				BUS_DMA_NOWAIT);
2064 	if (error != 0) {
2065 		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2066 			dd->dd_name, error);
2067 		goto fail2;
2068 	}
2069 
2070 	ds = dd->dd_desc;
2071 	memset(ds, 0, dd->dd_desc_len);
2072 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2073 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2074 	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2075 
2076 	return 0;
2077 fail2:
2078 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2079 fail1:
2080 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2081 fail0:
2082 	bus_dma_tag_destroy(dd->dd_dmat);
2083 	memset(dd, 0, sizeof(*dd));
2084 	return error;
2085 #undef DS2PHYS
2086 }
2087 
2088 static void
2089 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2090 {
2091 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2092 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2093 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2094 	bus_dma_tag_destroy(dd->dd_dmat);
2095 
2096 	memset(dd, 0, sizeof(*dd));
2097 }
2098 
2099 /*
2100  * Construct a tx q's free list.  The order of entries on
2101  * the list must reflect the physical layout of tx descriptors
2102  * because the firmware pre-fetches descriptors.
2103  *
2104  * XXX might be better to use indices into the buffer array.
2105  */
2106 static void
2107 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2108 {
2109 	struct mwl_txbuf *bf;
2110 	int i;
2111 
2112 	bf = txq->dma.dd_bufptr;
2113 	STAILQ_INIT(&txq->free);
2114 	for (i = 0; i < mwl_txbuf; i++, bf++)
2115 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2116 	txq->nfree = i;
2117 }
2118 
2119 #define	DS2PHYS(_dd, _ds) \
2120 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2121 
2122 static int
2123 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2124 {
2125 	struct ifnet *ifp = sc->sc_ifp;
2126 	int error, bsize, i;
2127 	struct mwl_txbuf *bf;
2128 	struct mwl_txdesc *ds;
2129 
2130 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2131 			mwl_txbuf, sizeof(struct mwl_txbuf),
2132 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2133 	if (error != 0)
2134 		return error;
2135 
2136 	/* allocate and setup tx buffers */
2137 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2138 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2139 	if (bf == NULL) {
2140 		if_printf(ifp, "malloc of %u tx buffers failed\n",
2141 			mwl_txbuf);
2142 		return ENOMEM;
2143 	}
2144 	txq->dma.dd_bufptr = bf;
2145 
2146 	ds = txq->dma.dd_desc;
2147 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2148 		bf->bf_desc = ds;
2149 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2150 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2151 				&bf->bf_dmamap);
2152 		if (error != 0) {
2153 			if_printf(ifp, "unable to create dmamap for tx "
2154 				"buffer %u, error %u\n", i, error);
2155 			return error;
2156 		}
2157 	}
2158 	mwl_txq_reset(sc, txq);
2159 	return 0;
2160 }
2161 
2162 static void
2163 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2164 {
2165 	struct mwl_txbuf *bf;
2166 	int i;
2167 
2168 	bf = txq->dma.dd_bufptr;
2169 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2170 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2171 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2172 		if (bf->bf_dmamap != NULL)
2173 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2174 	}
2175 	STAILQ_INIT(&txq->free);
2176 	txq->nfree = 0;
2177 	if (txq->dma.dd_bufptr != NULL) {
2178 		free(txq->dma.dd_bufptr, M_MWLDEV);
2179 		txq->dma.dd_bufptr = NULL;
2180 	}
2181 	if (txq->dma.dd_desc_len != 0)
2182 		mwl_desc_cleanup(sc, &txq->dma);
2183 }
2184 
2185 static int
2186 mwl_rxdma_setup(struct mwl_softc *sc)
2187 {
2188 	struct ifnet *ifp = sc->sc_ifp;
2189 	int error, jumbosize, bsize, i;
2190 	struct mwl_rxbuf *bf;
2191 	struct mwl_jumbo *rbuf;
2192 	struct mwl_rxdesc *ds;
2193 	caddr_t data;
2194 
2195 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2196 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2197 			1, sizeof(struct mwl_rxdesc));
2198 	if (error != 0)
2199 		return error;
2200 
2201 	/*
2202 	 * Receive is done to a private pool of jumbo buffers.
2203 	 * This allows us to attach to mbuf's and avoid re-mapping
2204 	 * memory on each rx we post.  We allocate a large chunk
2205 	 * of memory and manage it in the driver.  The mbuf free
2206 	 * callback method is used to reclaim frames after sending
2207 	 * them up the stack.  By default we allocate 2x the number of
2208 	 * rx descriptors configured so we have some slop to hold
2209 	 * us while frames are processed.
2210 	 */
2211 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2212 		if_printf(ifp,
2213 		    "too few rx dma buffers (%d); increasing to %d\n",
2214 		    mwl_rxbuf, 2*mwl_rxdesc);
2215 		mwl_rxbuf = 2*mwl_rxdesc;
2216 	}
2217 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2218 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2219 
2220 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2221 		       PAGE_SIZE, 0,		/* alignment, bounds */
2222 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2223 		       BUS_SPACE_MAXADDR,	/* highaddr */
2224 		       NULL, NULL,		/* filter, filterarg */
2225 		       sc->sc_rxmemsize,	/* maxsize */
2226 		       1,			/* nsegments */
2227 		       sc->sc_rxmemsize,	/* maxsegsize */
2228 		       BUS_DMA_ALLOCNOW,	/* flags */
2229 		       NULL,			/* lockfunc */
2230 		       NULL,			/* lockarg */
2231 		       &sc->sc_rxdmat);
2232 	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2233 	if (error != 0) {
2234 		if_printf(ifp, "could not create rx DMA map\n");
2235 		return error;
2236 	}
2237 
2238 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2239 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2240 				 &sc->sc_rxmap);
2241 	if (error != 0) {
2242 		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2243 		    (uintmax_t) sc->sc_rxmemsize);
2244 		return error;
2245 	}
2246 
2247 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2248 				sc->sc_rxmem, sc->sc_rxmemsize,
2249 				mwl_load_cb, &sc->sc_rxmem_paddr,
2250 				BUS_DMA_NOWAIT);
2251 	if (error != 0) {
2252 		if_printf(ifp, "could not load rx DMA map\n");
2253 		return error;
2254 	}
2255 
2256 	/*
2257 	 * Allocate rx buffers and set them up.
2258 	 */
2259 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2260 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2261 	if (bf == NULL) {
2262 		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2263 		return error;
2264 	}
2265 	sc->sc_rxdma.dd_bufptr = bf;
2266 
2267 	STAILQ_INIT(&sc->sc_rxbuf);
2268 	ds = sc->sc_rxdma.dd_desc;
2269 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2270 		bf->bf_desc = ds;
2271 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2272 		/* pre-assign dma buffer */
2273 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2274 		/* NB: tail is intentional to preserve descriptor order */
2275 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2276 	}
2277 
2278 	/*
2279 	 * Place remainder of dma memory buffers on the free list.
2280 	 */
2281 	SLIST_INIT(&sc->sc_rxfree);
2282 	for (; i < mwl_rxbuf; i++) {
2283 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2284 		rbuf = MWL_JUMBO_DATA2BUF(data);
2285 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2286 		sc->sc_nrxfree++;
2287 	}
2288 	return 0;
2289 }
2290 #undef DS2PHYS
2291 
2292 static void
2293 mwl_rxdma_cleanup(struct mwl_softc *sc)
2294 {
2295 	if (sc->sc_rxmap != NULL)
2296 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2297 	if (sc->sc_rxmem != NULL) {
2298 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2299 		sc->sc_rxmem = NULL;
2300 	}
2301 	if (sc->sc_rxmap != NULL) {
2302 		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2303 		sc->sc_rxmap = NULL;
2304 	}
2305 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2306 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2307 		sc->sc_rxdma.dd_bufptr = NULL;
2308 	}
2309 	if (sc->sc_rxdma.dd_desc_len != 0)
2310 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2311 }
2312 
2313 static int
2314 mwl_dma_setup(struct mwl_softc *sc)
2315 {
2316 	int error, i;
2317 
2318 	error = mwl_rxdma_setup(sc);
2319 	if (error != 0) {
2320 		mwl_rxdma_cleanup(sc);
2321 		return error;
2322 	}
2323 
2324 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2325 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2326 		if (error != 0) {
2327 			mwl_dma_cleanup(sc);
2328 			return error;
2329 		}
2330 	}
2331 	return 0;
2332 }
2333 
2334 static void
2335 mwl_dma_cleanup(struct mwl_softc *sc)
2336 {
2337 	int i;
2338 
2339 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2340 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2341 	mwl_rxdma_cleanup(sc);
2342 }
2343 
2344 static struct ieee80211_node *
2345 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2346 {
2347 	struct ieee80211com *ic = vap->iv_ic;
2348 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2349 	const size_t space = sizeof(struct mwl_node);
2350 	struct mwl_node *mn;
2351 
2352 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2353 	if (mn == NULL) {
2354 		/* XXX stat+msg */
2355 		return NULL;
2356 	}
2357 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2358 	return &mn->mn_node;
2359 }
2360 
2361 static void
2362 mwl_node_cleanup(struct ieee80211_node *ni)
2363 {
2364 	struct ieee80211com *ic = ni->ni_ic;
2365         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2366 	struct mwl_node *mn = MWL_NODE(ni);
2367 
2368 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2369 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2370 
2371 	if (mn->mn_staid != 0) {
2372 		struct ieee80211vap *vap = ni->ni_vap;
2373 
2374 		if (mn->mn_hvap != NULL) {
2375 			if (vap->iv_opmode == IEEE80211_M_STA)
2376 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2377 			else
2378 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2379 		}
2380 		/*
2381 		 * NB: legacy WDS peer sta db entry is installed using
2382 		 * the associate ap's hvap; use it again to delete it.
2383 		 * XXX can vap be NULL?
2384 		 */
2385 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2386 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2387 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2388 			    ni->ni_macaddr);
2389 		delstaid(sc, mn->mn_staid);
2390 		mn->mn_staid = 0;
2391 	}
2392 	sc->sc_node_cleanup(ni);
2393 }
2394 
2395 /*
2396  * Reclaim rx dma buffers from packets sitting on the ampdu
2397  * reorder queue for a station.  We replace buffers with a
2398  * system cluster (if available).
2399  */
2400 static void
2401 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2402 {
2403 #if 0
2404 	int i, n, off;
2405 	struct mbuf *m;
2406 	void *cl;
2407 
2408 	n = rap->rxa_qframes;
2409 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2410 		m = rap->rxa_m[i];
2411 		if (m == NULL)
2412 			continue;
2413 		n--;
2414 		/* our dma buffers have a well-known free routine */
2415 		if ((m->m_flags & M_EXT) == 0 ||
2416 		    m->m_ext.ext_free != mwl_ext_free)
2417 			continue;
2418 		/*
2419 		 * Try to allocate a cluster and move the data.
2420 		 */
2421 		off = m->m_data - m->m_ext.ext_buf;
2422 		if (off + m->m_pkthdr.len > MCLBYTES) {
2423 			/* XXX no AMSDU for now */
2424 			continue;
2425 		}
2426 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2427 		    &m->m_ext.ext_paddr);
2428 		if (cl != NULL) {
2429 			/*
2430 			 * Copy the existing data to the cluster, remove
2431 			 * the rx dma buffer, and attach the cluster in
2432 			 * its place.  Note we preserve the offset to the
2433 			 * data so frames being bridged can still prepend
2434 			 * their headers without adding another mbuf.
2435 			 */
2436 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2437 			MEXTREMOVE(m);
2438 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2439 			/* setup mbuf like _MCLGET does */
2440 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2441 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2442 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2443 			m->m_data += off;
2444 		}
2445 	}
2446 #endif
2447 }
2448 
2449 /*
2450  * Callback to reclaim resources.  We first let the
2451  * net80211 layer do it's thing, then if we are still
2452  * blocked by a lack of rx dma buffers we walk the ampdu
2453  * reorder q's to reclaim buffers by copying to a system
2454  * cluster.
2455  */
2456 static void
2457 mwl_node_drain(struct ieee80211_node *ni)
2458 {
2459 	struct ieee80211com *ic = ni->ni_ic;
2460         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2461 	struct mwl_node *mn = MWL_NODE(ni);
2462 
2463 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2464 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2465 
2466 	/* NB: call up first to age out ampdu q's */
2467 	sc->sc_node_drain(ni);
2468 
2469 	/* XXX better to not check low water mark? */
2470 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2471 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2472 		uint8_t tid;
2473 		/*
2474 		 * Walk the reorder q and reclaim rx dma buffers by copying
2475 		 * the packet contents into clusters.
2476 		 */
2477 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2478 			struct ieee80211_rx_ampdu *rap;
2479 
2480 			rap = &ni->ni_rx_ampdu[tid];
2481 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2482 				continue;
2483 			if (rap->rxa_qframes)
2484 				mwl_ampdu_rxdma_reclaim(rap);
2485 		}
2486 	}
2487 }
2488 
2489 static void
2490 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2491 {
2492 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2493 #ifdef MWL_ANT_INFO_SUPPORT
2494 #if 0
2495 	/* XXX need to smooth data */
2496 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2497 #else
2498 	*noise = -95;		/* XXX */
2499 #endif
2500 #else
2501 	*noise = -95;		/* XXX */
2502 #endif
2503 }
2504 
2505 /*
2506  * Convert Hardware per-antenna rssi info to common format:
2507  * Let a1, a2, a3 represent the amplitudes per chain
2508  * Let amax represent max[a1, a2, a3]
2509  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2510  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2511  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2512  * maintain some extra precision.
2513  *
2514  * Values are stored in .5 db format capped at 127.
2515  */
2516 static void
2517 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2518 	struct ieee80211_mimo_info *mi)
2519 {
2520 #define	CVT(_dst, _src) do {						\
2521 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2522 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2523 } while (0)
2524 	static const int8_t logdbtbl[32] = {
2525 	       0,   0,  24,  38,  48,  56,  62,  68,
2526 	      72,  76,  80,  83,  86,  89,  92,  94,
2527 	      96,  98, 100, 102, 104, 106, 107, 109,
2528 	     110, 112, 113, 115, 116, 117, 118, 119
2529 	};
2530 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2531 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2532 	uint32_t rssi_max;
2533 
2534 	rssi_max = mn->mn_ai.rssi_a;
2535 	if (mn->mn_ai.rssi_b > rssi_max)
2536 		rssi_max = mn->mn_ai.rssi_b;
2537 	if (mn->mn_ai.rssi_c > rssi_max)
2538 		rssi_max = mn->mn_ai.rssi_c;
2539 
2540 	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2541 	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2542 	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2543 
2544 	mi->noise[0] = mn->mn_ai.nf_a;
2545 	mi->noise[1] = mn->mn_ai.nf_b;
2546 	mi->noise[2] = mn->mn_ai.nf_c;
2547 #undef CVT
2548 }
2549 
2550 static __inline void *
2551 mwl_getrxdma(struct mwl_softc *sc)
2552 {
2553 	struct mwl_jumbo *buf;
2554 	void *data;
2555 
2556 	/*
2557 	 * Allocate from jumbo pool.
2558 	 */
2559 	MWL_RXFREE_LOCK(sc);
2560 	buf = SLIST_FIRST(&sc->sc_rxfree);
2561 	if (buf == NULL) {
2562 		DPRINTF(sc, MWL_DEBUG_ANY,
2563 		    "%s: out of rx dma buffers\n", __func__);
2564 		sc->sc_stats.mst_rx_nodmabuf++;
2565 		data = NULL;
2566 	} else {
2567 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2568 		sc->sc_nrxfree--;
2569 		data = MWL_JUMBO_BUF2DATA(buf);
2570 	}
2571 	MWL_RXFREE_UNLOCK(sc);
2572 	return data;
2573 }
2574 
2575 static __inline void
2576 mwl_putrxdma(struct mwl_softc *sc, void *data)
2577 {
2578 	struct mwl_jumbo *buf;
2579 
2580 	/* XXX bounds check data */
2581 	MWL_RXFREE_LOCK(sc);
2582 	buf = MWL_JUMBO_DATA2BUF(data);
2583 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2584 	sc->sc_nrxfree++;
2585 	MWL_RXFREE_UNLOCK(sc);
2586 }
2587 
2588 static int
2589 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2590 {
2591 	struct mwl_rxdesc *ds;
2592 
2593 	ds = bf->bf_desc;
2594 	if (bf->bf_data == NULL) {
2595 		bf->bf_data = mwl_getrxdma(sc);
2596 		if (bf->bf_data == NULL) {
2597 			/* mark descriptor to be skipped */
2598 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2599 			/* NB: don't need PREREAD */
2600 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2601 			sc->sc_stats.mst_rxbuf_failed++;
2602 			return ENOMEM;
2603 		}
2604 	}
2605 	/*
2606 	 * NB: DMA buffer contents is known to be unmodified
2607 	 *     so there's no need to flush the data cache.
2608 	 */
2609 
2610 	/*
2611 	 * Setup descriptor.
2612 	 */
2613 	ds->QosCtrl = 0;
2614 	ds->RSSI = 0;
2615 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2616 	ds->Channel = 0;
2617 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2618 	ds->SQ2 = 0;
2619 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2620 	/* NB: don't touch pPhysNext, set once */
2621 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2622 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2623 
2624 	return 0;
2625 }
2626 
2627 static int
2628 mwl_ext_free(struct mbuf *m, void *data, void *arg)
2629 {
2630 	struct mwl_softc *sc = arg;
2631 
2632 	/* XXX bounds check data */
2633 	mwl_putrxdma(sc, data);
2634 	/*
2635 	 * If we were previously blocked by a lack of rx dma buffers
2636 	 * check if we now have enough to restart rx interrupt handling.
2637 	 * NB: we know we are called at splvm which is above splnet.
2638 	 */
2639 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2640 		sc->sc_rxblocked = 0;
2641 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2642 	}
2643 	return (EXT_FREE_OK);
2644 }
2645 
2646 struct mwl_frame_bar {
2647 	u_int8_t	i_fc[2];
2648 	u_int8_t	i_dur[2];
2649 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2650 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2651 	/* ctl, seq, FCS */
2652 } __packed;
2653 
2654 /*
2655  * Like ieee80211_anyhdrsize, but handles BAR frames
2656  * specially so the logic below to piece the 802.11
2657  * header together works.
2658  */
2659 static __inline int
2660 mwl_anyhdrsize(const void *data)
2661 {
2662 	const struct ieee80211_frame *wh = data;
2663 
2664 	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2665 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2666 		case IEEE80211_FC0_SUBTYPE_CTS:
2667 		case IEEE80211_FC0_SUBTYPE_ACK:
2668 			return sizeof(struct ieee80211_frame_ack);
2669 		case IEEE80211_FC0_SUBTYPE_BAR:
2670 			return sizeof(struct mwl_frame_bar);
2671 		}
2672 		return sizeof(struct ieee80211_frame_min);
2673 	} else
2674 		return ieee80211_hdrsize(data);
2675 }
2676 
2677 static void
2678 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2679 {
2680 	const struct ieee80211_frame *wh;
2681 	struct ieee80211_node *ni;
2682 
2683 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2684 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2685 	if (ni != NULL) {
2686 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2687 		ieee80211_free_node(ni);
2688 	}
2689 }
2690 
2691 /*
2692  * Convert hardware signal strength to rssi.  The value
2693  * provided by the device has the noise floor added in;
2694  * we need to compensate for this but we don't have that
2695  * so we use a fixed value.
2696  *
2697  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2698  * offset is already set as part of the initial gain.  This
2699  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2700  */
2701 static __inline int
2702 cvtrssi(uint8_t ssi)
2703 {
2704 	int rssi = (int) ssi + 8;
2705 	/* XXX hack guess until we have a real noise floor */
2706 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2707 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2708 }
2709 
2710 static void
2711 mwl_rx_proc(void *arg, int npending)
2712 {
2713 #define	IEEE80211_DIR_DSTODS(wh) \
2714 	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2715 	struct mwl_softc *sc = arg;
2716 	struct ifnet *ifp = sc->sc_ifp;
2717 	struct ieee80211com *ic = ifp->if_l2com;
2718 	struct mwl_rxbuf *bf;
2719 	struct mwl_rxdesc *ds;
2720 	struct mbuf *m;
2721 	struct ieee80211_qosframe *wh;
2722 	struct ieee80211_qosframe_addr4 *wh4;
2723 	struct ieee80211_node *ni;
2724 	struct mwl_node *mn;
2725 	int off, len, hdrlen, pktlen, rssi, ntodo;
2726 	uint8_t *data, status;
2727 	void *newdata;
2728 	int16_t nf;
2729 
2730 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2731 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2732 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2733 	nf = -96;			/* XXX */
2734 	bf = sc->sc_rxnext;
2735 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2736 		if (bf == NULL)
2737 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2738 		ds = bf->bf_desc;
2739 		data = bf->bf_data;
2740 		if (data == NULL) {
2741 			/*
2742 			 * If data allocation failed previously there
2743 			 * will be no buffer; try again to re-populate it.
2744 			 * Note the firmware will not advance to the next
2745 			 * descriptor with a dma buffer so we must mimic
2746 			 * this or we'll get out of sync.
2747 			 */
2748 			DPRINTF(sc, MWL_DEBUG_ANY,
2749 			    "%s: rx buf w/o dma memory\n", __func__);
2750 			(void) mwl_rxbuf_init(sc, bf);
2751 			sc->sc_stats.mst_rx_dmabufmissing++;
2752 			break;
2753 		}
2754 		MWL_RXDESC_SYNC(sc, ds,
2755 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2756 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2757 			break;
2758 #ifdef MWL_DEBUG
2759 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2760 			mwl_printrxbuf(bf, 0);
2761 #endif
2762 		status = ds->Status;
2763 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2764 			ifp->if_ierrors++;
2765 			sc->sc_stats.mst_rx_crypto++;
2766 			/*
2767 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2768 			 *     for backwards compatibility.
2769 			 */
2770 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2771 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2772 				/*
2773 				 * MIC error, notify upper layers.
2774 				 */
2775 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2776 				    BUS_DMASYNC_POSTREAD);
2777 				mwl_handlemicerror(ic, data);
2778 				sc->sc_stats.mst_rx_tkipmic++;
2779 			}
2780 			/* XXX too painful to tap packets */
2781 			goto rx_next;
2782 		}
2783 		/*
2784 		 * Sync the data buffer.
2785 		 */
2786 		len = le16toh(ds->PktLen);
2787 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2788 		/*
2789 		 * The 802.11 header is provided all or in part at the front;
2790 		 * use it to calculate the true size of the header that we'll
2791 		 * construct below.  We use this to figure out where to copy
2792 		 * payload prior to constructing the header.
2793 		 */
2794 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2795 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2796 
2797 		/* calculate rssi early so we can re-use for each aggregate */
2798 		rssi = cvtrssi(ds->RSSI);
2799 
2800 		pktlen = hdrlen + (len - off);
2801 		/*
2802 		 * NB: we know our frame is at least as large as
2803 		 * IEEE80211_MIN_LEN because there is a 4-address
2804 		 * frame at the front.  Hence there's no need to
2805 		 * vet the packet length.  If the frame in fact
2806 		 * is too small it should be discarded at the
2807 		 * net80211 layer.
2808 		 */
2809 
2810 		/*
2811 		 * Attach dma buffer to an mbuf.  We tried
2812 		 * doing this based on the packet size (i.e.
2813 		 * copying small packets) but it turns out to
2814 		 * be a net loss.  The tradeoff might be system
2815 		 * dependent (cache architecture is important).
2816 		 */
2817 		MGETHDR(m, M_NOWAIT, MT_DATA);
2818 		if (m == NULL) {
2819 			DPRINTF(sc, MWL_DEBUG_ANY,
2820 			    "%s: no rx mbuf\n", __func__);
2821 			sc->sc_stats.mst_rx_nombuf++;
2822 			goto rx_next;
2823 		}
2824 		/*
2825 		 * Acquire the replacement dma buffer before
2826 		 * processing the frame.  If we're out of dma
2827 		 * buffers we disable rx interrupts and wait
2828 		 * for the free pool to reach mlw_rxdmalow buffers
2829 		 * before starting to do work again.  If the firmware
2830 		 * runs out of descriptors then it will toss frames
2831 		 * which is better than our doing it as that can
2832 		 * starve our processing.  It is also important that
2833 		 * we always process rx'd frames in case they are
2834 		 * A-MPDU as otherwise the host's view of the BA
2835 		 * window may get out of sync with the firmware.
2836 		 */
2837 		newdata = mwl_getrxdma(sc);
2838 		if (newdata == NULL) {
2839 			/* NB: stat+msg in mwl_getrxdma */
2840 			m_free(m);
2841 			/* disable RX interrupt and mark state */
2842 			mwl_hal_intrset(sc->sc_mh,
2843 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2844 			sc->sc_rxblocked = 1;
2845 			ieee80211_drain(ic);
2846 			/* XXX check rxblocked and immediately start again? */
2847 			goto rx_stop;
2848 		}
2849 		bf->bf_data = newdata;
2850 		/*
2851 		 * Attach the dma buffer to the mbuf;
2852 		 * mwl_rxbuf_init will re-setup the rx
2853 		 * descriptor using the replacement dma
2854 		 * buffer we just installed above.
2855 		 */
2856 		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2857 		    data, sc, 0, EXT_NET_DRV);
2858 		m->m_data += off - hdrlen;
2859 		m->m_pkthdr.len = m->m_len = pktlen;
2860 		m->m_pkthdr.rcvif = ifp;
2861 		/* NB: dma buffer assumed read-only */
2862 
2863 		/*
2864 		 * Piece 802.11 header together.
2865 		 */
2866 		wh = mtod(m, struct ieee80211_qosframe *);
2867 		/* NB: don't need to do this sometimes but ... */
2868 		/* XXX special case so we can memcpy after m_devget? */
2869 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2870 		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2871 			if (IEEE80211_DIR_DSTODS(wh)) {
2872 				wh4 = mtod(m,
2873 				    struct ieee80211_qosframe_addr4*);
2874 				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2875 			} else {
2876 				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2877 			}
2878 		}
2879 		/*
2880 		 * The f/w strips WEP header but doesn't clear
2881 		 * the WEP bit; mark the packet with M_WEP so
2882 		 * net80211 will treat the data as decrypted.
2883 		 * While here also clear the PWR_MGT bit since
2884 		 * power save is handled by the firmware and
2885 		 * passing this up will potentially cause the
2886 		 * upper layer to put a station in power save
2887 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2888 		 */
2889 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2890 			m->m_flags |= M_WEP;
2891 #ifdef MWL_HOST_PS_SUPPORT
2892 		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2893 #else
2894 		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2895 #endif
2896 
2897 		if (ieee80211_radiotap_active(ic)) {
2898 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2899 
2900 			tap->wr_flags = 0;
2901 			tap->wr_rate = ds->Rate;
2902 			tap->wr_antsignal = rssi + nf;
2903 			tap->wr_antnoise = nf;
2904 		}
2905 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2906 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2907 			    len, ds->Rate, rssi);
2908 		}
2909 		ifp->if_ipackets++;
2910 
2911 		/* dispatch */
2912 		ni = ieee80211_find_rxnode(ic,
2913 		    (const struct ieee80211_frame_min *) wh);
2914 		if (ni != NULL) {
2915 			mn = MWL_NODE(ni);
2916 #ifdef MWL_ANT_INFO_SUPPORT
2917 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2918 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2919 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2920 			mn->mn_ai.rsvd1 = rssi;
2921 #endif
2922 			/* tag AMPDU aggregates for reorder processing */
2923 			if (ni->ni_flags & IEEE80211_NODE_HT)
2924 				m->m_flags |= M_AMPDU;
2925 			(void) ieee80211_input(ni, m, rssi, nf);
2926 			ieee80211_free_node(ni);
2927 		} else
2928 			(void) ieee80211_input_all(ic, m, rssi, nf);
2929 rx_next:
2930 		/* NB: ignore ENOMEM so we process more descriptors */
2931 		(void) mwl_rxbuf_init(sc, bf);
2932 		bf = STAILQ_NEXT(bf, bf_list);
2933 	}
2934 rx_stop:
2935 	sc->sc_rxnext = bf;
2936 
2937 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2938 	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2939 		/* NB: kick fw; the tx thread may have been preempted */
2940 		mwl_hal_txstart(sc->sc_mh, 0);
2941 		mwl_start(ifp);
2942 	}
2943 #undef IEEE80211_DIR_DSTODS
2944 }
2945 
2946 static void
2947 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2948 {
2949 	struct mwl_txbuf *bf, *bn;
2950 	struct mwl_txdesc *ds;
2951 
2952 	MWL_TXQ_LOCK_INIT(sc, txq);
2953 	txq->qnum = qnum;
2954 	txq->txpri = 0;	/* XXX */
2955 #if 0
2956 	/* NB: q setup by mwl_txdma_setup XXX */
2957 	STAILQ_INIT(&txq->free);
2958 #endif
2959 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2960 		bf->bf_txq = txq;
2961 
2962 		ds = bf->bf_desc;
2963 		bn = STAILQ_NEXT(bf, bf_list);
2964 		if (bn == NULL)
2965 			bn = STAILQ_FIRST(&txq->free);
2966 		ds->pPhysNext = htole32(bn->bf_daddr);
2967 	}
2968 	STAILQ_INIT(&txq->active);
2969 }
2970 
2971 /*
2972  * Setup a hardware data transmit queue for the specified
2973  * access control.  We record the mapping from ac's
2974  * to h/w queues for use by mwl_tx_start.
2975  */
2976 static int
2977 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2978 {
2979 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2980 	struct mwl_txq *txq;
2981 
2982 	if (ac >= N(sc->sc_ac2q)) {
2983 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2984 			ac, N(sc->sc_ac2q));
2985 		return 0;
2986 	}
2987 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2988 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2989 			mvtype, MWL_NUM_TX_QUEUES);
2990 		return 0;
2991 	}
2992 	txq = &sc->sc_txq[mvtype];
2993 	mwl_txq_init(sc, txq, mvtype);
2994 	sc->sc_ac2q[ac] = txq;
2995 	return 1;
2996 #undef N
2997 }
2998 
2999 /*
3000  * Update WME parameters for a transmit queue.
3001  */
3002 static int
3003 mwl_txq_update(struct mwl_softc *sc, int ac)
3004 {
3005 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
3006 	struct ifnet *ifp = sc->sc_ifp;
3007 	struct ieee80211com *ic = ifp->if_l2com;
3008 	struct mwl_txq *txq = sc->sc_ac2q[ac];
3009 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3010 	struct mwl_hal *mh = sc->sc_mh;
3011 	int aifs, cwmin, cwmax, txoplim;
3012 
3013 	aifs = wmep->wmep_aifsn;
3014 	/* XXX in sta mode need to pass log values for cwmin/max */
3015 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3016 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3017 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
3018 
3019 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3020 		device_printf(sc->sc_dev, "unable to update hardware queue "
3021 			"parameters for %s traffic!\n",
3022 			ieee80211_wme_acnames[ac]);
3023 		return 0;
3024 	}
3025 	return 1;
3026 #undef MWL_EXPONENT_TO_VALUE
3027 }
3028 
3029 /*
3030  * Callback from the 802.11 layer to update WME parameters.
3031  */
3032 static int
3033 mwl_wme_update(struct ieee80211com *ic)
3034 {
3035 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3036 
3037 	return !mwl_txq_update(sc, WME_AC_BE) ||
3038 	    !mwl_txq_update(sc, WME_AC_BK) ||
3039 	    !mwl_txq_update(sc, WME_AC_VI) ||
3040 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3041 }
3042 
3043 /*
3044  * Reclaim resources for a setup queue.
3045  */
3046 static void
3047 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3048 {
3049 	/* XXX hal work? */
3050 	MWL_TXQ_LOCK_DESTROY(txq);
3051 }
3052 
3053 /*
3054  * Reclaim all tx queue resources.
3055  */
3056 static void
3057 mwl_tx_cleanup(struct mwl_softc *sc)
3058 {
3059 	int i;
3060 
3061 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3062 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3063 }
3064 
3065 static int
3066 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3067 {
3068 	struct mbuf *m;
3069 	int error;
3070 
3071 	/*
3072 	 * Load the DMA map so any coalescing is done.  This
3073 	 * also calculates the number of descriptors we need.
3074 	 */
3075 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3076 				     bf->bf_segs, &bf->bf_nseg,
3077 				     BUS_DMA_NOWAIT);
3078 	if (error == EFBIG) {
3079 		/* XXX packet requires too many descriptors */
3080 		bf->bf_nseg = MWL_TXDESC+1;
3081 	} else if (error != 0) {
3082 		sc->sc_stats.mst_tx_busdma++;
3083 		m_freem(m0);
3084 		return error;
3085 	}
3086 	/*
3087 	 * Discard null packets and check for packets that
3088 	 * require too many TX descriptors.  We try to convert
3089 	 * the latter to a cluster.
3090 	 */
3091 	if (error == EFBIG) {		/* too many desc's, linearize */
3092 		sc->sc_stats.mst_tx_linear++;
3093 #if MWL_TXDESC > 1
3094 		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3095 #else
3096 		m = m_defrag(m0, M_NOWAIT);
3097 #endif
3098 		if (m == NULL) {
3099 			m_freem(m0);
3100 			sc->sc_stats.mst_tx_nombuf++;
3101 			return ENOMEM;
3102 		}
3103 		m0 = m;
3104 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3105 					     bf->bf_segs, &bf->bf_nseg,
3106 					     BUS_DMA_NOWAIT);
3107 		if (error != 0) {
3108 			sc->sc_stats.mst_tx_busdma++;
3109 			m_freem(m0);
3110 			return error;
3111 		}
3112 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3113 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3114 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3115 		sc->sc_stats.mst_tx_nodata++;
3116 		m_freem(m0);
3117 		return EIO;
3118 	}
3119 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3120 		__func__, m0, m0->m_pkthdr.len);
3121 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3122 	bf->bf_m = m0;
3123 
3124 	return 0;
3125 }
3126 
3127 static __inline int
3128 mwl_cvtlegacyrate(int rate)
3129 {
3130 	switch (rate) {
3131 	case 2:	 return 0;
3132 	case 4:	 return 1;
3133 	case 11: return 2;
3134 	case 22: return 3;
3135 	case 44: return 4;
3136 	case 12: return 5;
3137 	case 18: return 6;
3138 	case 24: return 7;
3139 	case 36: return 8;
3140 	case 48: return 9;
3141 	case 72: return 10;
3142 	case 96: return 11;
3143 	case 108:return 12;
3144 	}
3145 	return 0;
3146 }
3147 
3148 /*
3149  * Calculate fixed tx rate information per client state;
3150  * this value is suitable for writing to the Format field
3151  * of a tx descriptor.
3152  */
3153 static uint16_t
3154 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3155 {
3156 	uint16_t fmt;
3157 
3158 	fmt = SM(3, EAGLE_TXD_ANTENNA)
3159 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3160 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3161 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3162 		fmt |= EAGLE_TXD_FORMAT_HT
3163 		    /* NB: 0x80 implicitly stripped from ucastrate */
3164 		    | SM(rate, EAGLE_TXD_RATE);
3165 		/* XXX short/long GI may be wrong; re-check */
3166 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3167 			fmt |= EAGLE_TXD_CHW_40
3168 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3169 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3170 		} else {
3171 			fmt |= EAGLE_TXD_CHW_20
3172 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3173 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3174 		}
3175 	} else {			/* legacy rate */
3176 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3177 		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3178 		    | EAGLE_TXD_CHW_20
3179 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3180 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3181 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3182 	}
3183 	return fmt;
3184 }
3185 
3186 static int
3187 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3188     struct mbuf *m0)
3189 {
3190 #define	IEEE80211_DIR_DSTODS(wh) \
3191 	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3192 	struct ifnet *ifp = sc->sc_ifp;
3193 	struct ieee80211com *ic = ifp->if_l2com;
3194 	struct ieee80211vap *vap = ni->ni_vap;
3195 	int error, iswep, ismcast;
3196 	int hdrlen, copyhdrlen, pktlen;
3197 	struct mwl_txdesc *ds;
3198 	struct mwl_txq *txq;
3199 	struct ieee80211_frame *wh;
3200 	struct mwltxrec *tr;
3201 	struct mwl_node *mn;
3202 	uint16_t qos;
3203 #if MWL_TXDESC > 1
3204 	int i;
3205 #endif
3206 
3207 	wh = mtod(m0, struct ieee80211_frame *);
3208 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3209 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3210 	hdrlen = ieee80211_anyhdrsize(wh);
3211 	copyhdrlen = hdrlen;
3212 	pktlen = m0->m_pkthdr.len;
3213 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3214 		if (IEEE80211_DIR_DSTODS(wh)) {
3215 			qos = *(uint16_t *)
3216 			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3217 			copyhdrlen -= sizeof(qos);
3218 		} else
3219 			qos = *(uint16_t *)
3220 			    (((struct ieee80211_qosframe *) wh)->i_qos);
3221 	} else
3222 		qos = 0;
3223 
3224 	if (iswep) {
3225 		const struct ieee80211_cipher *cip;
3226 		struct ieee80211_key *k;
3227 
3228 		/*
3229 		 * Construct the 802.11 header+trailer for an encrypted
3230 		 * frame. The only reason this can fail is because of an
3231 		 * unknown or unsupported cipher/key type.
3232 		 *
3233 		 * NB: we do this even though the firmware will ignore
3234 		 *     what we've done for WEP and TKIP as we need the
3235 		 *     ExtIV filled in for CCMP and this also adjusts
3236 		 *     the headers which simplifies our work below.
3237 		 */
3238 		k = ieee80211_crypto_encap(ni, m0);
3239 		if (k == NULL) {
3240 			/*
3241 			 * This can happen when the key is yanked after the
3242 			 * frame was queued.  Just discard the frame; the
3243 			 * 802.11 layer counts failures and provides
3244 			 * debugging/diagnostics.
3245 			 */
3246 			m_freem(m0);
3247 			return EIO;
3248 		}
3249 		/*
3250 		 * Adjust the packet length for the crypto additions
3251 		 * done during encap and any other bits that the f/w
3252 		 * will add later on.
3253 		 */
3254 		cip = k->wk_cipher;
3255 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3256 
3257 		/* packet header may have moved, reset our local pointer */
3258 		wh = mtod(m0, struct ieee80211_frame *);
3259 	}
3260 
3261 	if (ieee80211_radiotap_active_vap(vap)) {
3262 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3263 		if (iswep)
3264 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3265 #if 0
3266 		sc->sc_tx_th.wt_rate = ds->DataRate;
3267 #endif
3268 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3269 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3270 
3271 		ieee80211_radiotap_tx(vap, m0);
3272 	}
3273 	/*
3274 	 * Copy up/down the 802.11 header; the firmware requires
3275 	 * we present a 2-byte payload length followed by a
3276 	 * 4-address header (w/o QoS), followed (optionally) by
3277 	 * any WEP/ExtIV header (but only filled in for CCMP).
3278 	 * We are assured the mbuf has sufficient headroom to
3279 	 * prepend in-place by the setup of ic_headroom in
3280 	 * mwl_attach.
3281 	 */
3282 	if (hdrlen < sizeof(struct mwltxrec)) {
3283 		const int space = sizeof(struct mwltxrec) - hdrlen;
3284 		if (M_LEADINGSPACE(m0) < space) {
3285 			/* NB: should never happen */
3286 			device_printf(sc->sc_dev,
3287 			    "not enough headroom, need %d found %zd, "
3288 			    "m_flags 0x%x m_len %d\n",
3289 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3290 			ieee80211_dump_pkt(ic,
3291 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3292 			m_freem(m0);
3293 			sc->sc_stats.mst_tx_noheadroom++;
3294 			return EIO;
3295 		}
3296 		M_PREPEND(m0, space, M_NOWAIT);
3297 	}
3298 	tr = mtod(m0, struct mwltxrec *);
3299 	if (wh != (struct ieee80211_frame *) &tr->wh)
3300 		ovbcopy(wh, &tr->wh, hdrlen);
3301 	/*
3302 	 * Note: the "firmware length" is actually the length
3303 	 * of the fully formed "802.11 payload".  That is, it's
3304 	 * everything except for the 802.11 header.  In particular
3305 	 * this includes all crypto material including the MIC!
3306 	 */
3307 	tr->fwlen = htole16(pktlen - hdrlen);
3308 
3309 	/*
3310 	 * Load the DMA map so any coalescing is done.  This
3311 	 * also calculates the number of descriptors we need.
3312 	 */
3313 	error = mwl_tx_dmasetup(sc, bf, m0);
3314 	if (error != 0) {
3315 		/* NB: stat collected in mwl_tx_dmasetup */
3316 		DPRINTF(sc, MWL_DEBUG_XMIT,
3317 		    "%s: unable to setup dma\n", __func__);
3318 		return error;
3319 	}
3320 	bf->bf_node = ni;			/* NB: held reference */
3321 	m0 = bf->bf_m;				/* NB: may have changed */
3322 	tr = mtod(m0, struct mwltxrec *);
3323 	wh = (struct ieee80211_frame *)&tr->wh;
3324 
3325 	/*
3326 	 * Formulate tx descriptor.
3327 	 */
3328 	ds = bf->bf_desc;
3329 	txq = bf->bf_txq;
3330 
3331 	ds->QosCtrl = qos;			/* NB: already little-endian */
3332 #if MWL_TXDESC == 1
3333 	/*
3334 	 * NB: multiframes should be zero because the descriptors
3335 	 *     are initialized to zero.  This should handle the case
3336 	 *     where the driver is built with MWL_TXDESC=1 but we are
3337 	 *     using firmware with multi-segment support.
3338 	 */
3339 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3340 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3341 #else
3342 	ds->multiframes = htole32(bf->bf_nseg);
3343 	ds->PktLen = htole16(m0->m_pkthdr.len);
3344 	for (i = 0; i < bf->bf_nseg; i++) {
3345 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3346 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3347 	}
3348 #endif
3349 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3350 	ds->Format = 0;
3351 	ds->pad = 0;
3352 	ds->ack_wcb_addr = 0;
3353 
3354 	mn = MWL_NODE(ni);
3355 	/*
3356 	 * Select transmit rate.
3357 	 */
3358 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3359 	case IEEE80211_FC0_TYPE_MGT:
3360 		sc->sc_stats.mst_tx_mgmt++;
3361 		/* fall thru... */
3362 	case IEEE80211_FC0_TYPE_CTL:
3363 		/* NB: assign to BE q to avoid bursting */
3364 		ds->TxPriority = MWL_WME_AC_BE;
3365 		break;
3366 	case IEEE80211_FC0_TYPE_DATA:
3367 		if (!ismcast) {
3368 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3369 			/*
3370 			 * EAPOL frames get forced to a fixed rate and w/o
3371 			 * aggregation; otherwise check for any fixed rate
3372 			 * for the client (may depend on association state).
3373 			 */
3374 			if (m0->m_flags & M_EAPOL) {
3375 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3376 				ds->Format = mvp->mv_eapolformat;
3377 				ds->pad = htole16(
3378 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3379 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3380 				/* XXX pre-calculate per node */
3381 				ds->Format = htole16(
3382 				    mwl_calcformat(tp->ucastrate, ni));
3383 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3384 			}
3385 			/* NB: EAPOL frames will never have qos set */
3386 			if (qos == 0)
3387 				ds->TxPriority = txq->qnum;
3388 #if MWL_MAXBA > 3
3389 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3390 				ds->TxPriority = mn->mn_ba[3].txq;
3391 #endif
3392 #if MWL_MAXBA > 2
3393 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3394 				ds->TxPriority = mn->mn_ba[2].txq;
3395 #endif
3396 #if MWL_MAXBA > 1
3397 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3398 				ds->TxPriority = mn->mn_ba[1].txq;
3399 #endif
3400 #if MWL_MAXBA > 0
3401 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3402 				ds->TxPriority = mn->mn_ba[0].txq;
3403 #endif
3404 			else
3405 				ds->TxPriority = txq->qnum;
3406 		} else
3407 			ds->TxPriority = txq->qnum;
3408 		break;
3409 	default:
3410 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3411 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3412 		sc->sc_stats.mst_tx_badframetype++;
3413 		m_freem(m0);
3414 		return EIO;
3415 	}
3416 
3417 	if (IFF_DUMPPKTS_XMIT(sc))
3418 		ieee80211_dump_pkt(ic,
3419 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3420 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3421 
3422 	MWL_TXQ_LOCK(txq);
3423 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3424 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3425 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3426 
3427 	ifp->if_opackets++;
3428 	sc->sc_tx_timer = 5;
3429 	MWL_TXQ_UNLOCK(txq);
3430 
3431 	return 0;
3432 #undef	IEEE80211_DIR_DSTODS
3433 }
3434 
3435 static __inline int
3436 mwl_cvtlegacyrix(int rix)
3437 {
3438 #define	N(x)	(sizeof(x)/sizeof(x[0]))
3439 	static const int ieeerates[] =
3440 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3441 	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3442 #undef N
3443 }
3444 
3445 /*
3446  * Process completed xmit descriptors from the specified queue.
3447  */
3448 static int
3449 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3450 {
3451 #define	EAGLE_TXD_STATUS_MCAST \
3452 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3453 	struct ifnet *ifp = sc->sc_ifp;
3454 	struct ieee80211com *ic = ifp->if_l2com;
3455 	struct mwl_txbuf *bf;
3456 	struct mwl_txdesc *ds;
3457 	struct ieee80211_node *ni;
3458 	struct mwl_node *an;
3459 	int nreaped;
3460 	uint32_t status;
3461 
3462 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3463 	for (nreaped = 0;; nreaped++) {
3464 		MWL_TXQ_LOCK(txq);
3465 		bf = STAILQ_FIRST(&txq->active);
3466 		if (bf == NULL) {
3467 			MWL_TXQ_UNLOCK(txq);
3468 			break;
3469 		}
3470 		ds = bf->bf_desc;
3471 		MWL_TXDESC_SYNC(txq, ds,
3472 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3473 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3474 			MWL_TXQ_UNLOCK(txq);
3475 			break;
3476 		}
3477 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3478 		MWL_TXQ_UNLOCK(txq);
3479 
3480 #ifdef MWL_DEBUG
3481 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3482 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3483 #endif
3484 		ni = bf->bf_node;
3485 		if (ni != NULL) {
3486 			an = MWL_NODE(ni);
3487 			status = le32toh(ds->Status);
3488 			if (status & EAGLE_TXD_STATUS_OK) {
3489 				uint16_t Format = le16toh(ds->Format);
3490 				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3491 
3492 				sc->sc_stats.mst_ant_tx[txant]++;
3493 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3494 					sc->sc_stats.mst_tx_retries++;
3495 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3496 					sc->sc_stats.mst_tx_mretries++;
3497 				if (txq->qnum >= MWL_WME_AC_VO)
3498 					ic->ic_wme.wme_hipri_traffic++;
3499 				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3500 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3501 					ni->ni_txrate = mwl_cvtlegacyrix(
3502 					    ni->ni_txrate);
3503 				} else
3504 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3505 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3506 			} else {
3507 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3508 					sc->sc_stats.mst_tx_linkerror++;
3509 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3510 					sc->sc_stats.mst_tx_xretries++;
3511 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3512 					sc->sc_stats.mst_tx_aging++;
3513 				if (bf->bf_m->m_flags & M_FF)
3514 					sc->sc_stats.mst_ff_txerr++;
3515 			}
3516 			/*
3517 			 * Do any tx complete callback.  Note this must
3518 			 * be done before releasing the node reference.
3519 			 * XXX no way to figure out if frame was ACK'd
3520 			 */
3521 			if (bf->bf_m->m_flags & M_TXCB) {
3522 				/* XXX strip fw len in case header inspected */
3523 				m_adj(bf->bf_m, sizeof(uint16_t));
3524 				ieee80211_process_callback(ni, bf->bf_m,
3525 					(status & EAGLE_TXD_STATUS_OK) == 0);
3526 			}
3527 			/*
3528 			 * Reclaim reference to node.
3529 			 *
3530 			 * NB: the node may be reclaimed here if, for example
3531 			 *     this is a DEAUTH message that was sent and the
3532 			 *     node was timed out due to inactivity.
3533 			 */
3534 			ieee80211_free_node(ni);
3535 		}
3536 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3537 
3538 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3539 		    BUS_DMASYNC_POSTWRITE);
3540 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3541 		m_freem(bf->bf_m);
3542 
3543 		mwl_puttxbuf_tail(txq, bf);
3544 	}
3545 	return nreaped;
3546 #undef EAGLE_TXD_STATUS_MCAST
3547 }
3548 
3549 /*
3550  * Deferred processing of transmit interrupt; special-cased
3551  * for four hardware queues, 0-3.
3552  */
3553 static void
3554 mwl_tx_proc(void *arg, int npending)
3555 {
3556 	struct mwl_softc *sc = arg;
3557 	struct ifnet *ifp = sc->sc_ifp;
3558 	int nreaped;
3559 
3560 	/*
3561 	 * Process each active queue.
3562 	 */
3563 	nreaped = 0;
3564 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3565 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3566 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3567 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3568 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3569 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3570 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3571 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3572 
3573 	if (nreaped != 0) {
3574 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3575 		sc->sc_tx_timer = 0;
3576 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3577 			/* NB: kick fw; the tx thread may have been preempted */
3578 			mwl_hal_txstart(sc->sc_mh, 0);
3579 			mwl_start(ifp);
3580 		}
3581 	}
3582 }
3583 
3584 static void
3585 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3586 {
3587 	struct ieee80211_node *ni;
3588 	struct mwl_txbuf *bf;
3589 	u_int ix;
3590 
3591 	/*
3592 	 * NB: this assumes output has been stopped and
3593 	 *     we do not need to block mwl_tx_tasklet
3594 	 */
3595 	for (ix = 0;; ix++) {
3596 		MWL_TXQ_LOCK(txq);
3597 		bf = STAILQ_FIRST(&txq->active);
3598 		if (bf == NULL) {
3599 			MWL_TXQ_UNLOCK(txq);
3600 			break;
3601 		}
3602 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3603 		MWL_TXQ_UNLOCK(txq);
3604 #ifdef MWL_DEBUG
3605 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3606 			struct ifnet *ifp = sc->sc_ifp;
3607 			struct ieee80211com *ic = ifp->if_l2com;
3608 			const struct mwltxrec *tr =
3609 			    mtod(bf->bf_m, const struct mwltxrec *);
3610 			mwl_printtxbuf(bf, txq->qnum, ix);
3611 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3612 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3613 		}
3614 #endif /* MWL_DEBUG */
3615 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3616 		ni = bf->bf_node;
3617 		if (ni != NULL) {
3618 			/*
3619 			 * Reclaim node reference.
3620 			 */
3621 			ieee80211_free_node(ni);
3622 		}
3623 		m_freem(bf->bf_m);
3624 
3625 		mwl_puttxbuf_tail(txq, bf);
3626 	}
3627 }
3628 
3629 /*
3630  * Drain the transmit queues and reclaim resources.
3631  */
3632 static void
3633 mwl_draintxq(struct mwl_softc *sc)
3634 {
3635 	struct ifnet *ifp = sc->sc_ifp;
3636 	int i;
3637 
3638 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3639 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3640 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3641 	sc->sc_tx_timer = 0;
3642 }
3643 
3644 #ifdef MWL_DIAGAPI
3645 /*
3646  * Reset the transmit queues to a pristine state after a fw download.
3647  */
3648 static void
3649 mwl_resettxq(struct mwl_softc *sc)
3650 {
3651 	int i;
3652 
3653 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3654 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3655 }
3656 #endif /* MWL_DIAGAPI */
3657 
3658 /*
3659  * Clear the transmit queues of any frames submitted for the
3660  * specified vap.  This is done when the vap is deleted so we
3661  * don't potentially reference the vap after it is gone.
3662  * Note we cannot remove the frames; we only reclaim the node
3663  * reference.
3664  */
3665 static void
3666 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3667 {
3668 	struct mwl_txq *txq;
3669 	struct mwl_txbuf *bf;
3670 	int i;
3671 
3672 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3673 		txq = &sc->sc_txq[i];
3674 		MWL_TXQ_LOCK(txq);
3675 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3676 			struct ieee80211_node *ni = bf->bf_node;
3677 			if (ni != NULL && ni->ni_vap == vap) {
3678 				bf->bf_node = NULL;
3679 				ieee80211_free_node(ni);
3680 			}
3681 		}
3682 		MWL_TXQ_UNLOCK(txq);
3683 	}
3684 }
3685 
3686 static int
3687 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3688 	const uint8_t *frm, const uint8_t *efrm)
3689 {
3690 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3691 	const struct ieee80211_action *ia;
3692 
3693 	ia = (const struct ieee80211_action *) frm;
3694 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3695 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3696 		const struct ieee80211_action_ht_mimopowersave *mps =
3697 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3698 
3699 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3700 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3701 		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3702 		return 0;
3703 	} else
3704 		return sc->sc_recv_action(ni, wh, frm, efrm);
3705 }
3706 
3707 static int
3708 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3709 	int dialogtoken, int baparamset, int batimeout)
3710 {
3711 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3712 	struct ieee80211vap *vap = ni->ni_vap;
3713 	struct mwl_node *mn = MWL_NODE(ni);
3714 	struct mwl_bastate *bas;
3715 
3716 	bas = tap->txa_private;
3717 	if (bas == NULL) {
3718 		const MWL_HAL_BASTREAM *sp;
3719 		/*
3720 		 * Check for a free BA stream slot.
3721 		 */
3722 #if MWL_MAXBA > 3
3723 		if (mn->mn_ba[3].bastream == NULL)
3724 			bas = &mn->mn_ba[3];
3725 		else
3726 #endif
3727 #if MWL_MAXBA > 2
3728 		if (mn->mn_ba[2].bastream == NULL)
3729 			bas = &mn->mn_ba[2];
3730 		else
3731 #endif
3732 #if MWL_MAXBA > 1
3733 		if (mn->mn_ba[1].bastream == NULL)
3734 			bas = &mn->mn_ba[1];
3735 		else
3736 #endif
3737 #if MWL_MAXBA > 0
3738 		if (mn->mn_ba[0].bastream == NULL)
3739 			bas = &mn->mn_ba[0];
3740 		else
3741 #endif
3742 		{
3743 			/* sta already has max BA streams */
3744 			/* XXX assign BA stream to highest priority tid */
3745 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3746 			    "%s: already has max bastreams\n", __func__);
3747 			sc->sc_stats.mst_ampdu_reject++;
3748 			return 0;
3749 		}
3750 		/* NB: no held reference to ni */
3751 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3752 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3753 		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3754 		    ni, tap);
3755 		if (sp == NULL) {
3756 			/*
3757 			 * No available stream, return 0 so no
3758 			 * a-mpdu aggregation will be done.
3759 			 */
3760 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3761 			    "%s: no bastream available\n", __func__);
3762 			sc->sc_stats.mst_ampdu_nostream++;
3763 			return 0;
3764 		}
3765 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3766 		    __func__, sp);
3767 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3768 		bas->bastream = sp;
3769 		tap->txa_private = bas;
3770 	}
3771 	/* fetch current seq# from the firmware; if available */
3772 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3773 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3774 	    &tap->txa_start) != 0)
3775 		tap->txa_start = 0;
3776 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3777 }
3778 
3779 static int
3780 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3781 	int code, int baparamset, int batimeout)
3782 {
3783 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3784 	struct mwl_bastate *bas;
3785 
3786 	bas = tap->txa_private;
3787 	if (bas == NULL) {
3788 		/* XXX should not happen */
3789 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3790 		    "%s: no BA stream allocated, TID %d\n",
3791 		    __func__, tap->txa_tid);
3792 		sc->sc_stats.mst_addba_nostream++;
3793 		return 0;
3794 	}
3795 	if (code == IEEE80211_STATUS_SUCCESS) {
3796 		struct ieee80211vap *vap = ni->ni_vap;
3797 		int bufsiz, error;
3798 
3799 		/*
3800 		 * Tell the firmware to setup the BA stream;
3801 		 * we know resources are available because we
3802 		 * pre-allocated one before forming the request.
3803 		 */
3804 		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3805 		if (bufsiz == 0)
3806 			bufsiz = IEEE80211_AGGR_BAWMAX;
3807 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3808 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3809 		if (error != 0) {
3810 			/*
3811 			 * Setup failed, return immediately so no a-mpdu
3812 			 * aggregation will be done.
3813 			 */
3814 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3815 			mwl_bastream_free(bas);
3816 			tap->txa_private = NULL;
3817 
3818 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3819 			    "%s: create failed, error %d, bufsiz %d TID %d "
3820 			    "htparam 0x%x\n", __func__, error, bufsiz,
3821 			    tap->txa_tid, ni->ni_htparam);
3822 			sc->sc_stats.mst_bacreate_failed++;
3823 			return 0;
3824 		}
3825 		/* NB: cache txq to avoid ptr indirect */
3826 		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3827 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3828 		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3829 		    "htparam 0x%x\n", __func__, bas->bastream,
3830 		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3831 	} else {
3832 		/*
3833 		 * Other side NAK'd us; return the resources.
3834 		 */
3835 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3836 		    "%s: request failed with code %d, destroy bastream %p\n",
3837 		    __func__, code, bas->bastream);
3838 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3839 		mwl_bastream_free(bas);
3840 		tap->txa_private = NULL;
3841 	}
3842 	/* NB: firmware sends BAR so we don't need to */
3843 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3844 }
3845 
3846 static void
3847 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3848 {
3849 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3850 	struct mwl_bastate *bas;
3851 
3852 	bas = tap->txa_private;
3853 	if (bas != NULL) {
3854 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3855 		    __func__, bas->bastream);
3856 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3857 		mwl_bastream_free(bas);
3858 		tap->txa_private = NULL;
3859 	}
3860 	sc->sc_addba_stop(ni, tap);
3861 }
3862 
3863 /*
3864  * Setup the rx data structures.  This should only be
3865  * done once or we may get out of sync with the firmware.
3866  */
3867 static int
3868 mwl_startrecv(struct mwl_softc *sc)
3869 {
3870 	if (!sc->sc_recvsetup) {
3871 		struct mwl_rxbuf *bf, *prev;
3872 		struct mwl_rxdesc *ds;
3873 
3874 		prev = NULL;
3875 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3876 			int error = mwl_rxbuf_init(sc, bf);
3877 			if (error != 0) {
3878 				DPRINTF(sc, MWL_DEBUG_RECV,
3879 					"%s: mwl_rxbuf_init failed %d\n",
3880 					__func__, error);
3881 				return error;
3882 			}
3883 			if (prev != NULL) {
3884 				ds = prev->bf_desc;
3885 				ds->pPhysNext = htole32(bf->bf_daddr);
3886 			}
3887 			prev = bf;
3888 		}
3889 		if (prev != NULL) {
3890 			ds = prev->bf_desc;
3891 			ds->pPhysNext =
3892 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3893 		}
3894 		sc->sc_recvsetup = 1;
3895 	}
3896 	mwl_mode_init(sc);		/* set filters, etc. */
3897 	return 0;
3898 }
3899 
3900 static MWL_HAL_APMODE
3901 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3902 {
3903 	MWL_HAL_APMODE mode;
3904 
3905 	if (IEEE80211_IS_CHAN_HT(chan)) {
3906 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3907 			mode = AP_MODE_N_ONLY;
3908 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3909 			mode = AP_MODE_AandN;
3910 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3911 			mode = AP_MODE_GandN;
3912 		else
3913 			mode = AP_MODE_BandGandN;
3914 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3915 		if (vap->iv_flags & IEEE80211_F_PUREG)
3916 			mode = AP_MODE_G_ONLY;
3917 		else
3918 			mode = AP_MODE_MIXED;
3919 	} else if (IEEE80211_IS_CHAN_B(chan))
3920 		mode = AP_MODE_B_ONLY;
3921 	else if (IEEE80211_IS_CHAN_A(chan))
3922 		mode = AP_MODE_A_ONLY;
3923 	else
3924 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3925 	return mode;
3926 }
3927 
3928 static int
3929 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3930 {
3931 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3932 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3933 }
3934 
3935 /*
3936  * Set/change channels.
3937  */
3938 static int
3939 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3940 {
3941 	struct mwl_hal *mh = sc->sc_mh;
3942 	struct ifnet *ifp = sc->sc_ifp;
3943 	struct ieee80211com *ic = ifp->if_l2com;
3944 	MWL_HAL_CHANNEL hchan;
3945 	int maxtxpow;
3946 
3947 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3948 	    __func__, chan->ic_freq, chan->ic_flags);
3949 
3950 	/*
3951 	 * Convert to a HAL channel description with
3952 	 * the flags constrained to reflect the current
3953 	 * operating mode.
3954 	 */
3955 	mwl_mapchan(&hchan, chan);
3956 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3957 #if 0
3958 	mwl_draintxq(sc);		/* clear pending tx frames */
3959 #endif
3960 	mwl_hal_setchannel(mh, &hchan);
3961 	/*
3962 	 * Tx power is cap'd by the regulatory setting and
3963 	 * possibly a user-set limit.  We pass the min of
3964 	 * these to the hal to apply them to the cal data
3965 	 * for this channel.
3966 	 * XXX min bound?
3967 	 */
3968 	maxtxpow = 2*chan->ic_maxregpower;
3969 	if (maxtxpow > ic->ic_txpowlimit)
3970 		maxtxpow = ic->ic_txpowlimit;
3971 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3972 	/* NB: potentially change mcast/mgt rates */
3973 	mwl_setcurchanrates(sc);
3974 
3975 	/*
3976 	 * Update internal state.
3977 	 */
3978 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3979 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3980 	if (IEEE80211_IS_CHAN_A(chan)) {
3981 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3982 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3983 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3984 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3985 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3986 	} else {
3987 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3988 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3989 	}
3990 	sc->sc_curchan = hchan;
3991 	mwl_hal_intrset(mh, sc->sc_imask);
3992 
3993 	return 0;
3994 }
3995 
3996 static void
3997 mwl_scan_start(struct ieee80211com *ic)
3998 {
3999 	struct ifnet *ifp = ic->ic_ifp;
4000 	struct mwl_softc *sc = ifp->if_softc;
4001 
4002 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4003 }
4004 
4005 static void
4006 mwl_scan_end(struct ieee80211com *ic)
4007 {
4008 	struct ifnet *ifp = ic->ic_ifp;
4009 	struct mwl_softc *sc = ifp->if_softc;
4010 
4011 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4012 }
4013 
4014 static void
4015 mwl_set_channel(struct ieee80211com *ic)
4016 {
4017 	struct ifnet *ifp = ic->ic_ifp;
4018 	struct mwl_softc *sc = ifp->if_softc;
4019 
4020 	(void) mwl_chan_set(sc, ic->ic_curchan);
4021 }
4022 
4023 /*
4024  * Handle a channel switch request.  We inform the firmware
4025  * and mark the global state to suppress various actions.
4026  * NB: we issue only one request to the fw; we may be called
4027  * multiple times if there are multiple vap's.
4028  */
4029 static void
4030 mwl_startcsa(struct ieee80211vap *vap)
4031 {
4032 	struct ieee80211com *ic = vap->iv_ic;
4033 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4034 	MWL_HAL_CHANNEL hchan;
4035 
4036 	if (sc->sc_csapending)
4037 		return;
4038 
4039 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4040 	/* 1 =>'s quiet channel */
4041 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4042 	sc->sc_csapending = 1;
4043 }
4044 
4045 /*
4046  * Plumb any static WEP key for the station.  This is
4047  * necessary as we must propagate the key from the
4048  * global key table of the vap to each sta db entry.
4049  */
4050 static void
4051 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4052 {
4053 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4054 		IEEE80211_F_PRIVACY &&
4055 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4056 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4057 		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4058 }
4059 
4060 static int
4061 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4062 {
4063 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4064 	struct ieee80211vap *vap = ni->ni_vap;
4065 	struct mwl_hal_vap *hvap;
4066 	int error;
4067 
4068 	if (vap->iv_opmode == IEEE80211_M_WDS) {
4069 		/*
4070 		 * WDS vap's do not have a f/w vap; instead they piggyback
4071 		 * on an AP vap and we must install the sta db entry and
4072 		 * crypto state using that AP's handle (the WDS vap has none).
4073 		 */
4074 		hvap = MWL_VAP(vap)->mv_ap_hvap;
4075 	} else
4076 		hvap = MWL_VAP(vap)->mv_hvap;
4077 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4078 	    aid, staid, pi,
4079 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4080 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4081 	if (error == 0) {
4082 		/*
4083 		 * Setup security for this station.  For sta mode this is
4084 		 * needed even though do the same thing on transition to
4085 		 * AUTH state because the call to mwl_hal_newstation
4086 		 * clobbers the crypto state we setup.
4087 		 */
4088 		mwl_setanywepkey(vap, ni->ni_macaddr);
4089 	}
4090 	return error;
4091 #undef WME
4092 }
4093 
4094 static void
4095 mwl_setglobalkeys(struct ieee80211vap *vap)
4096 {
4097 	struct ieee80211_key *wk;
4098 
4099 	wk = &vap->iv_nw_keys[0];
4100 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4101 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4102 			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4103 }
4104 
4105 /*
4106  * Convert a legacy rate set to a firmware bitmask.
4107  */
4108 static uint32_t
4109 get_rate_bitmap(const struct ieee80211_rateset *rs)
4110 {
4111 	uint32_t rates;
4112 	int i;
4113 
4114 	rates = 0;
4115 	for (i = 0; i < rs->rs_nrates; i++)
4116 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4117 		case 2:	  rates |= 0x001; break;
4118 		case 4:	  rates |= 0x002; break;
4119 		case 11:  rates |= 0x004; break;
4120 		case 22:  rates |= 0x008; break;
4121 		case 44:  rates |= 0x010; break;
4122 		case 12:  rates |= 0x020; break;
4123 		case 18:  rates |= 0x040; break;
4124 		case 24:  rates |= 0x080; break;
4125 		case 36:  rates |= 0x100; break;
4126 		case 48:  rates |= 0x200; break;
4127 		case 72:  rates |= 0x400; break;
4128 		case 96:  rates |= 0x800; break;
4129 		case 108: rates |= 0x1000; break;
4130 		}
4131 	return rates;
4132 }
4133 
4134 /*
4135  * Construct an HT firmware bitmask from an HT rate set.
4136  */
4137 static uint32_t
4138 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4139 {
4140 	uint32_t rates;
4141 	int i;
4142 
4143 	rates = 0;
4144 	for (i = 0; i < rs->rs_nrates; i++) {
4145 		if (rs->rs_rates[i] < 16)
4146 			rates |= 1<<rs->rs_rates[i];
4147 	}
4148 	return rates;
4149 }
4150 
4151 /*
4152  * Craft station database entry for station.
4153  * NB: use host byte order here, the hal handles byte swapping.
4154  */
4155 static MWL_HAL_PEERINFO *
4156 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4157 {
4158 	const struct ieee80211vap *vap = ni->ni_vap;
4159 
4160 	memset(pi, 0, sizeof(*pi));
4161 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4162 	pi->CapInfo = ni->ni_capinfo;
4163 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4164 		/* HT capabilities, etc */
4165 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4166 		/* XXX pi.HTCapabilitiesInfo */
4167 	        pi->MacHTParamInfo = ni->ni_htparam;
4168 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4169 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4170 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4171 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4172 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4173 
4174 		/* constrain according to local configuration */
4175 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4176 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4177 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4178 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4179 		if (ni->ni_chw != 40)
4180 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4181 	}
4182 	return pi;
4183 }
4184 
4185 /*
4186  * Re-create the local sta db entry for a vap to ensure
4187  * up to date WME state is pushed to the firmware.  Because
4188  * this resets crypto state this must be followed by a
4189  * reload of any keys in the global key table.
4190  */
4191 static int
4192 mwl_localstadb(struct ieee80211vap *vap)
4193 {
4194 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4195 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4196 	struct ieee80211_node *bss;
4197 	MWL_HAL_PEERINFO pi;
4198 	int error;
4199 
4200 	switch (vap->iv_opmode) {
4201 	case IEEE80211_M_STA:
4202 		bss = vap->iv_bss;
4203 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4204 		    vap->iv_state == IEEE80211_S_RUN ?
4205 			mkpeerinfo(&pi, bss) : NULL,
4206 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4207 		    bss->ni_ies.wme_ie != NULL ?
4208 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4209 		if (error == 0)
4210 			mwl_setglobalkeys(vap);
4211 		break;
4212 	case IEEE80211_M_HOSTAP:
4213 	case IEEE80211_M_MBSS:
4214 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4215 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4216 		if (error == 0)
4217 			mwl_setglobalkeys(vap);
4218 		break;
4219 	default:
4220 		error = 0;
4221 		break;
4222 	}
4223 	return error;
4224 #undef WME
4225 }
4226 
4227 static int
4228 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4229 {
4230 	struct mwl_vap *mvp = MWL_VAP(vap);
4231 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4232 	struct ieee80211com *ic = vap->iv_ic;
4233 	struct ieee80211_node *ni = NULL;
4234 	struct ifnet *ifp = ic->ic_ifp;
4235 	struct mwl_softc *sc = ifp->if_softc;
4236 	struct mwl_hal *mh = sc->sc_mh;
4237 	enum ieee80211_state ostate = vap->iv_state;
4238 	int error;
4239 
4240 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4241 	    vap->iv_ifp->if_xname, __func__,
4242 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4243 
4244 	callout_stop(&sc->sc_timer);
4245 	/*
4246 	 * Clear current radar detection state.
4247 	 */
4248 	if (ostate == IEEE80211_S_CAC) {
4249 		/* stop quiet mode radar detection */
4250 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4251 	} else if (sc->sc_radarena) {
4252 		/* stop in-service radar detection */
4253 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4254 		sc->sc_radarena = 0;
4255 	}
4256 	/*
4257 	 * Carry out per-state actions before doing net80211 work.
4258 	 */
4259 	if (nstate == IEEE80211_S_INIT) {
4260 		/* NB: only ap+sta vap's have a fw entity */
4261 		if (hvap != NULL)
4262 			mwl_hal_stop(hvap);
4263 	} else if (nstate == IEEE80211_S_SCAN) {
4264 		mwl_hal_start(hvap);
4265 		/* NB: this disables beacon frames */
4266 		mwl_hal_setinframode(hvap);
4267 	} else if (nstate == IEEE80211_S_AUTH) {
4268 		/*
4269 		 * Must create a sta db entry in case a WEP key needs to
4270 		 * be plumbed.  This entry will be overwritten if we
4271 		 * associate; otherwise it will be reclaimed on node free.
4272 		 */
4273 		ni = vap->iv_bss;
4274 		MWL_NODE(ni)->mn_hvap = hvap;
4275 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4276 	} else if (nstate == IEEE80211_S_CSA) {
4277 		/* XXX move to below? */
4278 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4279 		    vap->iv_opmode == IEEE80211_M_MBSS)
4280 			mwl_startcsa(vap);
4281 	} else if (nstate == IEEE80211_S_CAC) {
4282 		/* XXX move to below? */
4283 		/* stop ap xmit and enable quiet mode radar detection */
4284 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4285 	}
4286 
4287 	/*
4288 	 * Invoke the parent method to do net80211 work.
4289 	 */
4290 	error = mvp->mv_newstate(vap, nstate, arg);
4291 
4292 	/*
4293 	 * Carry out work that must be done after net80211 runs;
4294 	 * this work requires up to date state (e.g. iv_bss).
4295 	 */
4296 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4297 		/* NB: collect bss node again, it may have changed */
4298 		ni = vap->iv_bss;
4299 
4300 		DPRINTF(sc, MWL_DEBUG_STATE,
4301 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4302 		    "capinfo 0x%04x chan %d\n",
4303 		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4304 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4305 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4306 
4307 		/*
4308 		 * Recreate local sta db entry to update WME/HT state.
4309 		 */
4310 		mwl_localstadb(vap);
4311 		switch (vap->iv_opmode) {
4312 		case IEEE80211_M_HOSTAP:
4313 		case IEEE80211_M_MBSS:
4314 			if (ostate == IEEE80211_S_CAC) {
4315 				/* enable in-service radar detection */
4316 				mwl_hal_setradardetection(mh,
4317 				    DR_IN_SERVICE_MONITOR_START);
4318 				sc->sc_radarena = 1;
4319 			}
4320 			/*
4321 			 * Allocate and setup the beacon frame
4322 			 * (and related state).
4323 			 */
4324 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4325 			if (error != 0) {
4326 				DPRINTF(sc, MWL_DEBUG_STATE,
4327 				    "%s: beacon setup failed, error %d\n",
4328 				    __func__, error);
4329 				goto bad;
4330 			}
4331 			/* NB: must be after setting up beacon */
4332 			mwl_hal_start(hvap);
4333 			break;
4334 		case IEEE80211_M_STA:
4335 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4336 			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4337 			/*
4338 			 * Set state now that we're associated.
4339 			 */
4340 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4341 			mwl_setrates(vap);
4342 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4343 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4344 			    sc->sc_ndwdsvaps++ == 0)
4345 				mwl_hal_setdwds(mh, 1);
4346 			break;
4347 		case IEEE80211_M_WDS:
4348 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4349 			    vap->iv_ifp->if_xname, __func__,
4350 			    ether_sprintf(ni->ni_bssid));
4351 			mwl_seteapolformat(vap);
4352 			break;
4353 		default:
4354 			break;
4355 		}
4356 		/*
4357 		 * Set CS mode according to operating channel;
4358 		 * this mostly an optimization for 5GHz.
4359 		 *
4360 		 * NB: must follow mwl_hal_start which resets csmode
4361 		 */
4362 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4363 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4364 		else
4365 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4366 		/*
4367 		 * Start timer to prod firmware.
4368 		 */
4369 		if (sc->sc_ageinterval != 0)
4370 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4371 			    mwl_agestations, sc);
4372 	} else if (nstate == IEEE80211_S_SLEEP) {
4373 		/* XXX set chip in power save */
4374 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4375 	    --sc->sc_ndwdsvaps == 0)
4376 		mwl_hal_setdwds(mh, 0);
4377 bad:
4378 	return error;
4379 }
4380 
4381 /*
4382  * Manage station id's; these are separate from AID's
4383  * as AID's may have values out of the range of possible
4384  * station id's acceptable to the firmware.
4385  */
4386 static int
4387 allocstaid(struct mwl_softc *sc, int aid)
4388 {
4389 	int staid;
4390 
4391 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4392 		/* NB: don't use 0 */
4393 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4394 			if (isclr(sc->sc_staid, staid))
4395 				break;
4396 	} else
4397 		staid = aid;
4398 	setbit(sc->sc_staid, staid);
4399 	return staid;
4400 }
4401 
4402 static void
4403 delstaid(struct mwl_softc *sc, int staid)
4404 {
4405 	clrbit(sc->sc_staid, staid);
4406 }
4407 
4408 /*
4409  * Setup driver-specific state for a newly associated node.
4410  * Note that we're called also on a re-associate, the isnew
4411  * param tells us if this is the first time or not.
4412  */
4413 static void
4414 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4415 {
4416 	struct ieee80211vap *vap = ni->ni_vap;
4417         struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4418 	struct mwl_node *mn = MWL_NODE(ni);
4419 	MWL_HAL_PEERINFO pi;
4420 	uint16_t aid;
4421 	int error;
4422 
4423 	aid = IEEE80211_AID(ni->ni_associd);
4424 	if (isnew) {
4425 		mn->mn_staid = allocstaid(sc, aid);
4426 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4427 	} else {
4428 		mn = MWL_NODE(ni);
4429 		/* XXX reset BA stream? */
4430 	}
4431 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4432 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4433 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4434 	if (error != 0) {
4435 		DPRINTF(sc, MWL_DEBUG_NODE,
4436 		    "%s: error %d creating sta db entry\n",
4437 		    __func__, error);
4438 		/* XXX how to deal with error? */
4439 	}
4440 }
4441 
4442 /*
4443  * Periodically poke the firmware to age out station state
4444  * (power save queues, pending tx aggregates).
4445  */
4446 static void
4447 mwl_agestations(void *arg)
4448 {
4449 	struct mwl_softc *sc = arg;
4450 
4451 	mwl_hal_setkeepalive(sc->sc_mh);
4452 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4453 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4454 }
4455 
4456 static const struct mwl_hal_channel *
4457 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4458 {
4459 	int i;
4460 
4461 	for (i = 0; i < ci->nchannels; i++) {
4462 		const struct mwl_hal_channel *hc = &ci->channels[i];
4463 		if (hc->ieee == ieee)
4464 			return hc;
4465 	}
4466 	return NULL;
4467 }
4468 
4469 static int
4470 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4471 	int nchan, struct ieee80211_channel chans[])
4472 {
4473 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4474 	struct mwl_hal *mh = sc->sc_mh;
4475 	const MWL_HAL_CHANNELINFO *ci;
4476 	int i;
4477 
4478 	for (i = 0; i < nchan; i++) {
4479 		struct ieee80211_channel *c = &chans[i];
4480 		const struct mwl_hal_channel *hc;
4481 
4482 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4483 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4484 			    IEEE80211_IS_CHAN_HT40(c) ?
4485 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4486 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4487 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4488 			    IEEE80211_IS_CHAN_HT40(c) ?
4489 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4490 		} else {
4491 			if_printf(ic->ic_ifp,
4492 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4493 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4494 			return EINVAL;
4495 		}
4496 		/*
4497 		 * Verify channel has cal data and cap tx power.
4498 		 */
4499 		hc = findhalchannel(ci, c->ic_ieee);
4500 		if (hc != NULL) {
4501 			if (c->ic_maxpower > 2*hc->maxTxPow)
4502 				c->ic_maxpower = 2*hc->maxTxPow;
4503 			goto next;
4504 		}
4505 		if (IEEE80211_IS_CHAN_HT40(c)) {
4506 			/*
4507 			 * Look for the extension channel since the
4508 			 * hal table only has the primary channel.
4509 			 */
4510 			hc = findhalchannel(ci, c->ic_extieee);
4511 			if (hc != NULL) {
4512 				if (c->ic_maxpower > 2*hc->maxTxPow)
4513 					c->ic_maxpower = 2*hc->maxTxPow;
4514 				goto next;
4515 			}
4516 		}
4517 		if_printf(ic->ic_ifp,
4518 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4519 		    __func__, c->ic_ieee, c->ic_extieee,
4520 		    c->ic_freq, c->ic_flags);
4521 		return EINVAL;
4522 	next:
4523 		;
4524 	}
4525 	return 0;
4526 }
4527 
4528 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4529 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4530 
4531 static void
4532 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4533 {
4534 	c->ic_freq = freq;
4535 	c->ic_flags = flags;
4536 	c->ic_ieee = ieee;
4537 	c->ic_minpower = 0;
4538 	c->ic_maxpower = 2*txpow;
4539 	c->ic_maxregpower = txpow;
4540 }
4541 
4542 static const struct ieee80211_channel *
4543 findchannel(const struct ieee80211_channel chans[], int nchans,
4544 	int freq, int flags)
4545 {
4546 	const struct ieee80211_channel *c;
4547 	int i;
4548 
4549 	for (i = 0; i < nchans; i++) {
4550 		c = &chans[i];
4551 		if (c->ic_freq == freq && c->ic_flags == flags)
4552 			return c;
4553 	}
4554 	return NULL;
4555 }
4556 
4557 static void
4558 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4559 	const MWL_HAL_CHANNELINFO *ci, int flags)
4560 {
4561 	struct ieee80211_channel *c;
4562 	const struct ieee80211_channel *extc;
4563 	const struct mwl_hal_channel *hc;
4564 	int i;
4565 
4566 	c = &chans[*nchans];
4567 
4568 	flags &= ~IEEE80211_CHAN_HT;
4569 	for (i = 0; i < ci->nchannels; i++) {
4570 		/*
4571 		 * Each entry defines an HT40 channel pair; find the
4572 		 * extension channel above and the insert the pair.
4573 		 */
4574 		hc = &ci->channels[i];
4575 		extc = findchannel(chans, *nchans, hc->freq+20,
4576 		    flags | IEEE80211_CHAN_HT20);
4577 		if (extc != NULL) {
4578 			if (*nchans >= maxchans)
4579 				break;
4580 			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4581 			    hc->ieee, hc->maxTxPow);
4582 			c->ic_extieee = extc->ic_ieee;
4583 			c++, (*nchans)++;
4584 			if (*nchans >= maxchans)
4585 				break;
4586 			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4587 			    extc->ic_ieee, hc->maxTxPow);
4588 			c->ic_extieee = hc->ieee;
4589 			c++, (*nchans)++;
4590 		}
4591 	}
4592 }
4593 
4594 static void
4595 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4596 	const MWL_HAL_CHANNELINFO *ci, int flags)
4597 {
4598 	struct ieee80211_channel *c;
4599 	int i;
4600 
4601 	c = &chans[*nchans];
4602 
4603 	for (i = 0; i < ci->nchannels; i++) {
4604 		const struct mwl_hal_channel *hc;
4605 
4606 		hc = &ci->channels[i];
4607 		if (*nchans >= maxchans)
4608 			break;
4609 		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4610 		c++, (*nchans)++;
4611 		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4612 			/* g channel have a separate b-only entry */
4613 			if (*nchans >= maxchans)
4614 				break;
4615 			c[0] = c[-1];
4616 			c[-1].ic_flags = IEEE80211_CHAN_B;
4617 			c++, (*nchans)++;
4618 		}
4619 		if (flags == IEEE80211_CHAN_HTG) {
4620 			/* HT g channel have a separate g-only entry */
4621 			if (*nchans >= maxchans)
4622 				break;
4623 			c[-1].ic_flags = IEEE80211_CHAN_G;
4624 			c[0] = c[-1];
4625 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4626 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4627 			c++, (*nchans)++;
4628 		}
4629 		if (flags == IEEE80211_CHAN_HTA) {
4630 			/* HT a channel have a separate a-only entry */
4631 			if (*nchans >= maxchans)
4632 				break;
4633 			c[-1].ic_flags = IEEE80211_CHAN_A;
4634 			c[0] = c[-1];
4635 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4636 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4637 			c++, (*nchans)++;
4638 		}
4639 	}
4640 }
4641 
4642 static void
4643 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4644 	struct ieee80211_channel chans[])
4645 {
4646 	const MWL_HAL_CHANNELINFO *ci;
4647 
4648 	/*
4649 	 * Use the channel info from the hal to craft the
4650 	 * channel list.  Note that we pass back an unsorted
4651 	 * list; the caller is required to sort it for us
4652 	 * (if desired).
4653 	 */
4654 	*nchans = 0;
4655 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4656 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4657 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4658 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4659 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4660 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4661 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4662 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4663 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4664 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4665 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4666 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4667 }
4668 
4669 static void
4670 mwl_getradiocaps(struct ieee80211com *ic,
4671 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4672 {
4673 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4674 
4675 	getchannels(sc, maxchans, nchans, chans);
4676 }
4677 
4678 static int
4679 mwl_getchannels(struct mwl_softc *sc)
4680 {
4681 	struct ifnet *ifp = sc->sc_ifp;
4682 	struct ieee80211com *ic = ifp->if_l2com;
4683 
4684 	/*
4685 	 * Use the channel info from the hal to craft the
4686 	 * channel list for net80211.  Note that we pass up
4687 	 * an unsorted list; net80211 will sort it for us.
4688 	 */
4689 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4690 	ic->ic_nchans = 0;
4691 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4692 
4693 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4694 	ic->ic_regdomain.country = CTRY_DEFAULT;
4695 	ic->ic_regdomain.location = 'I';
4696 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4697 	ic->ic_regdomain.isocc[1] = ' ';
4698 	return (ic->ic_nchans == 0 ? EIO : 0);
4699 }
4700 #undef IEEE80211_CHAN_HTA
4701 #undef IEEE80211_CHAN_HTG
4702 
4703 #ifdef MWL_DEBUG
4704 static void
4705 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4706 {
4707 	const struct mwl_rxdesc *ds = bf->bf_desc;
4708 	uint32_t status = le32toh(ds->Status);
4709 
4710 	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4711 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4712 	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4713 	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4714 	    ds->RxControl,
4715 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4716 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4717 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4718 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4719 }
4720 
4721 static void
4722 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4723 {
4724 	const struct mwl_txdesc *ds = bf->bf_desc;
4725 	uint32_t status = le32toh(ds->Status);
4726 
4727 	printf("Q%u[%3u]", qnum, ix);
4728 	printf(" (DS.V:%p DS.P:%p)\n",
4729 	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4730 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4731 	    le32toh(ds->pPhysNext),
4732 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4733 	    status & EAGLE_TXD_STATUS_USED ?
4734 		"" : (status & 3) != 0 ? " *" : " !");
4735 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4736 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4737 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4738 #if MWL_TXDESC > 1
4739 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4740 	    , le32toh(ds->multiframes)
4741 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4742 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4743 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4744 	);
4745 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4746 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4747 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4748 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4749 	);
4750 #endif
4751 #if 0
4752 { const uint8_t *cp = (const uint8_t *) ds;
4753   int i;
4754   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4755 	printf("%02x ", cp[i]);
4756 	if (((i+1) % 16) == 0)
4757 		printf("\n");
4758   }
4759   printf("\n");
4760 }
4761 #endif
4762 }
4763 #endif /* MWL_DEBUG */
4764 
4765 #if 0
4766 static void
4767 mwl_txq_dump(struct mwl_txq *txq)
4768 {
4769 	struct mwl_txbuf *bf;
4770 	int i = 0;
4771 
4772 	MWL_TXQ_LOCK(txq);
4773 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4774 		struct mwl_txdesc *ds = bf->bf_desc;
4775 		MWL_TXDESC_SYNC(txq, ds,
4776 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4777 #ifdef MWL_DEBUG
4778 		mwl_printtxbuf(bf, txq->qnum, i);
4779 #endif
4780 		i++;
4781 	}
4782 	MWL_TXQ_UNLOCK(txq);
4783 }
4784 #endif
4785 
4786 static void
4787 mwl_watchdog(void *arg)
4788 {
4789 	struct mwl_softc *sc;
4790 	struct ifnet *ifp;
4791 
4792 	sc = arg;
4793 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4794 	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4795 		return;
4796 
4797 	ifp = sc->sc_ifp;
4798 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4799 		if (mwl_hal_setkeepalive(sc->sc_mh))
4800 			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4801 		else
4802 			if_printf(ifp, "transmit timeout\n");
4803 #if 0
4804 		mwl_reset(ifp);
4805 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4806 #endif
4807 		ifp->if_oerrors++;
4808 		sc->sc_stats.mst_watchdog++;
4809 	}
4810 }
4811 
4812 #ifdef MWL_DIAGAPI
4813 /*
4814  * Diagnostic interface to the HAL.  This is used by various
4815  * tools to do things like retrieve register contents for
4816  * debugging.  The mechanism is intentionally opaque so that
4817  * it can change frequently w/o concern for compatiblity.
4818  */
4819 static int
4820 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4821 {
4822 	struct mwl_hal *mh = sc->sc_mh;
4823 	u_int id = md->md_id & MWL_DIAG_ID;
4824 	void *indata = NULL;
4825 	void *outdata = NULL;
4826 	u_int32_t insize = md->md_in_size;
4827 	u_int32_t outsize = md->md_out_size;
4828 	int error = 0;
4829 
4830 	if (md->md_id & MWL_DIAG_IN) {
4831 		/*
4832 		 * Copy in data.
4833 		 */
4834 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4835 		if (indata == NULL) {
4836 			error = ENOMEM;
4837 			goto bad;
4838 		}
4839 		error = copyin(md->md_in_data, indata, insize);
4840 		if (error)
4841 			goto bad;
4842 	}
4843 	if (md->md_id & MWL_DIAG_DYN) {
4844 		/*
4845 		 * Allocate a buffer for the results (otherwise the HAL
4846 		 * returns a pointer to a buffer where we can read the
4847 		 * results).  Note that we depend on the HAL leaving this
4848 		 * pointer for us to use below in reclaiming the buffer;
4849 		 * may want to be more defensive.
4850 		 */
4851 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4852 		if (outdata == NULL) {
4853 			error = ENOMEM;
4854 			goto bad;
4855 		}
4856 	}
4857 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4858 		if (outsize < md->md_out_size)
4859 			md->md_out_size = outsize;
4860 		if (outdata != NULL)
4861 			error = copyout(outdata, md->md_out_data,
4862 					md->md_out_size);
4863 	} else {
4864 		error = EINVAL;
4865 	}
4866 bad:
4867 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4868 		free(indata, M_TEMP);
4869 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4870 		free(outdata, M_TEMP);
4871 	return error;
4872 }
4873 
4874 static int
4875 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4876 {
4877 	struct mwl_hal *mh = sc->sc_mh;
4878 	int error;
4879 
4880 	MWL_LOCK_ASSERT(sc);
4881 
4882 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4883 		device_printf(sc->sc_dev, "unable to load firmware\n");
4884 		return EIO;
4885 	}
4886 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4887 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4888 		return EIO;
4889 	}
4890 	error = mwl_setupdma(sc);
4891 	if (error != 0) {
4892 		/* NB: mwl_setupdma prints a msg */
4893 		return error;
4894 	}
4895 	/*
4896 	 * Reset tx/rx data structures; after reload we must
4897 	 * re-start the driver's notion of the next xmit/recv.
4898 	 */
4899 	mwl_draintxq(sc);		/* clear pending frames */
4900 	mwl_resettxq(sc);		/* rebuild tx q lists */
4901 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4902 	return 0;
4903 }
4904 #endif /* MWL_DIAGAPI */
4905 
4906 static int
4907 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4908 {
4909 #define	IS_RUNNING(ifp) \
4910 	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4911 	struct mwl_softc *sc = ifp->if_softc;
4912 	struct ieee80211com *ic = ifp->if_l2com;
4913 	struct ifreq *ifr = (struct ifreq *)data;
4914 	int error = 0, startall;
4915 
4916 	switch (cmd) {
4917 	case SIOCSIFFLAGS:
4918 		MWL_LOCK(sc);
4919 		startall = 0;
4920 		if (IS_RUNNING(ifp)) {
4921 			/*
4922 			 * To avoid rescanning another access point,
4923 			 * do not call mwl_init() here.  Instead,
4924 			 * only reflect promisc mode settings.
4925 			 */
4926 			mwl_mode_init(sc);
4927 		} else if (ifp->if_flags & IFF_UP) {
4928 			/*
4929 			 * Beware of being called during attach/detach
4930 			 * to reset promiscuous mode.  In that case we
4931 			 * will still be marked UP but not RUNNING.
4932 			 * However trying to re-init the interface
4933 			 * is the wrong thing to do as we've already
4934 			 * torn down much of our state.  There's
4935 			 * probably a better way to deal with this.
4936 			 */
4937 			if (!sc->sc_invalid) {
4938 				mwl_init_locked(sc);	/* XXX lose error */
4939 				startall = 1;
4940 			}
4941 		} else
4942 			mwl_stop_locked(ifp, 1);
4943 		MWL_UNLOCK(sc);
4944 		if (startall)
4945 			ieee80211_start_all(ic);
4946 		break;
4947 	case SIOCGMVSTATS:
4948 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4949 		/* NB: embed these numbers to get a consistent view */
4950 		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4951 		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4952 		/*
4953 		 * NB: Drop the softc lock in case of a page fault;
4954 		 * we'll accept any potential inconsisentcy in the
4955 		 * statistics.  The alternative is to copy the data
4956 		 * to a local structure.
4957 		 */
4958 		return copyout(&sc->sc_stats,
4959 				ifr->ifr_data, sizeof (sc->sc_stats));
4960 #ifdef MWL_DIAGAPI
4961 	case SIOCGMVDIAG:
4962 		/* XXX check privs */
4963 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4964 	case SIOCGMVRESET:
4965 		/* XXX check privs */
4966 		MWL_LOCK(sc);
4967 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4968 		MWL_UNLOCK(sc);
4969 		break;
4970 #endif /* MWL_DIAGAPI */
4971 	case SIOCGIFMEDIA:
4972 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4973 		break;
4974 	case SIOCGIFADDR:
4975 		error = ether_ioctl(ifp, cmd, data);
4976 		break;
4977 	default:
4978 		error = EINVAL;
4979 		break;
4980 	}
4981 	return error;
4982 #undef IS_RUNNING
4983 }
4984 
4985 #ifdef	MWL_DEBUG
4986 static int
4987 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4988 {
4989 	struct mwl_softc *sc = arg1;
4990 	int debug, error;
4991 
4992 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4993 	error = sysctl_handle_int(oidp, &debug, 0, req);
4994 	if (error || !req->newptr)
4995 		return error;
4996 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4997 	sc->sc_debug = debug & 0x00ffffff;
4998 	return 0;
4999 }
5000 #endif /* MWL_DEBUG */
5001 
5002 static void
5003 mwl_sysctlattach(struct mwl_softc *sc)
5004 {
5005 #ifdef	MWL_DEBUG
5006 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
5007 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
5008 
5009 	sc->sc_debug = mwl_debug;
5010 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5011 		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5012 		mwl_sysctl_debug, "I", "control debugging printfs");
5013 #endif
5014 }
5015 
5016 /*
5017  * Announce various information on device/driver attach.
5018  */
5019 static void
5020 mwl_announce(struct mwl_softc *sc)
5021 {
5022 	struct ifnet *ifp = sc->sc_ifp;
5023 
5024 	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5025 		sc->sc_hwspecs.hwVersion,
5026 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5027 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5028 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5029 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5030 		sc->sc_hwspecs.regionCode);
5031 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5032 
5033 	if (bootverbose) {
5034 		int i;
5035 		for (i = 0; i <= WME_AC_VO; i++) {
5036 			struct mwl_txq *txq = sc->sc_ac2q[i];
5037 			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5038 				txq->qnum, ieee80211_wme_acnames[i]);
5039 		}
5040 	}
5041 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5042 		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5043 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5044 		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5045 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5046 		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5047 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5048 		if_printf(ifp, "multi-bss support\n");
5049 #ifdef MWL_TX_NODROP
5050 	if (bootverbose)
5051 		if_printf(ifp, "no tx drop\n");
5052 #endif
5053 }
5054