xref: /freebsd/sys/dev/mwl/if_mwl.c (revision 59abbffacd1e61792097e0d467fa40e1749d27e8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16  *    redistribution must be conditioned upon including a substantially
17  *    similar Disclaimer requirement for further binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGES.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Marvell 88W8363 Wireless LAN controller.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_mwl.h"
42 #include "opt_wlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 
61 #include <machine/bus.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71 
72 #include <net/bpf.h>
73 
74 #include <net80211/ieee80211_var.h>
75 #include <net80211/ieee80211_input.h>
76 #include <net80211/ieee80211_regdomain.h>
77 
78 #ifdef INET
79 #include <netinet/in.h>
80 #include <netinet/if_ether.h>
81 #endif /* INET */
82 
83 #include <dev/mwl/if_mwlvar.h>
84 #include <dev/mwl/mwldiag.h>
85 
86 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
87 #define	MS(v,x)	(((v) & x) >> x##_S)
88 #define	SM(v,x)	(((v) << x##_S) & x)
89 
90 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
91 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
92 		    const uint8_t [IEEE80211_ADDR_LEN],
93 		    const uint8_t [IEEE80211_ADDR_LEN]);
94 static void	mwl_vap_delete(struct ieee80211vap *);
95 static int	mwl_setupdma(struct mwl_softc *);
96 static int	mwl_hal_reset(struct mwl_softc *sc);
97 static int	mwl_init(struct mwl_softc *);
98 static void	mwl_parent(struct ieee80211com *);
99 static int	mwl_reset(struct ieee80211vap *, u_long);
100 static void	mwl_stop(struct mwl_softc *);
101 static void	mwl_start(struct mwl_softc *);
102 static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
103 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
104 			const struct ieee80211_bpf_params *);
105 static int	mwl_media_change(struct ifnet *);
106 static void	mwl_watchdog(void *);
107 static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
108 static void	mwl_radar_proc(void *, int);
109 static void	mwl_chanswitch_proc(void *, int);
110 static void	mwl_bawatchdog_proc(void *, int);
111 static int	mwl_key_alloc(struct ieee80211vap *,
112 			struct ieee80211_key *,
113 			ieee80211_keyix *, ieee80211_keyix *);
114 static int	mwl_key_delete(struct ieee80211vap *,
115 			const struct ieee80211_key *);
116 static int	mwl_key_set(struct ieee80211vap *,
117 			const struct ieee80211_key *);
118 static int	_mwl_key_set(struct ieee80211vap *,
119 			const struct ieee80211_key *,
120 			const uint8_t mac[IEEE80211_ADDR_LEN]);
121 static int	mwl_mode_init(struct mwl_softc *);
122 static void	mwl_update_mcast(struct ieee80211com *);
123 static void	mwl_update_promisc(struct ieee80211com *);
124 static void	mwl_updateslot(struct ieee80211com *);
125 static int	mwl_beacon_setup(struct ieee80211vap *);
126 static void	mwl_beacon_update(struct ieee80211vap *, int);
127 #ifdef MWL_HOST_PS_SUPPORT
128 static void	mwl_update_ps(struct ieee80211vap *, int);
129 static int	mwl_set_tim(struct ieee80211_node *, int);
130 #endif
131 static int	mwl_dma_setup(struct mwl_softc *);
132 static void	mwl_dma_cleanup(struct mwl_softc *);
133 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
134 		    const uint8_t [IEEE80211_ADDR_LEN]);
135 static void	mwl_node_cleanup(struct ieee80211_node *);
136 static void	mwl_node_drain(struct ieee80211_node *);
137 static void	mwl_node_getsignal(const struct ieee80211_node *,
138 			int8_t *, int8_t *);
139 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
140 			struct ieee80211_mimo_info *);
141 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
142 static void	mwl_rx_proc(void *, int);
143 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
144 static int	mwl_tx_setup(struct mwl_softc *, int, int);
145 static int	mwl_wme_update(struct ieee80211com *);
146 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
147 static void	mwl_tx_cleanup(struct mwl_softc *);
148 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
149 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
150 			     struct mwl_txbuf *, struct mbuf *);
151 static void	mwl_tx_proc(void *, int);
152 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
153 static void	mwl_draintxq(struct mwl_softc *);
154 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
155 static int	mwl_recv_action(struct ieee80211_node *,
156 			const struct ieee80211_frame *,
157 			const uint8_t *, const uint8_t *);
158 static int	mwl_addba_request(struct ieee80211_node *,
159 			struct ieee80211_tx_ampdu *, int dialogtoken,
160 			int baparamset, int batimeout);
161 static int	mwl_addba_response(struct ieee80211_node *,
162 			struct ieee80211_tx_ampdu *, int status,
163 			int baparamset, int batimeout);
164 static void	mwl_addba_stop(struct ieee80211_node *,
165 			struct ieee80211_tx_ampdu *);
166 static int	mwl_startrecv(struct mwl_softc *);
167 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
168 			struct ieee80211_channel *);
169 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
170 static void	mwl_scan_start(struct ieee80211com *);
171 static void	mwl_scan_end(struct ieee80211com *);
172 static void	mwl_set_channel(struct ieee80211com *);
173 static int	mwl_peerstadb(struct ieee80211_node *,
174 			int aid, int staid, MWL_HAL_PEERINFO *pi);
175 static int	mwl_localstadb(struct ieee80211vap *);
176 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
177 static int	allocstaid(struct mwl_softc *sc, int aid);
178 static void	delstaid(struct mwl_softc *sc, int staid);
179 static void	mwl_newassoc(struct ieee80211_node *, int);
180 static void	mwl_agestations(void *);
181 static int	mwl_setregdomain(struct ieee80211com *,
182 			struct ieee80211_regdomain *, int,
183 			struct ieee80211_channel []);
184 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
185 			struct ieee80211_channel []);
186 static int	mwl_getchannels(struct mwl_softc *);
187 
188 static void	mwl_sysctlattach(struct mwl_softc *);
189 static void	mwl_announce(struct mwl_softc *);
190 
191 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
192 
193 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
194 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
195 	    0, "rx descriptors allocated");
196 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
197 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
198 	    0, "rx buffers allocated");
199 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
200 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
201 	    0, "tx buffers allocated");
202 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
203 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
204 	    0, "tx buffers to send at once");
205 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
206 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
207 	    0, "max rx buffers to process per interrupt");
208 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
209 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
210 	    0, "min free rx buffers before restarting traffic");
211 
212 #ifdef MWL_DEBUG
213 static	int mwl_debug = 0;
214 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
215 	    0, "control debugging printfs");
216 enum {
217 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
218 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
219 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
220 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
221 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
222 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
223 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
224 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
225 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
226 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
227 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
228 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
229 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
230 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
231 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
232 	MWL_DEBUG_ANY		= 0xffffffff
233 };
234 #define	IS_BEACON(wh) \
235     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
236 	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
237 #define	IFF_DUMPPKTS_RECV(sc, wh) \
238     ((sc->sc_debug & MWL_DEBUG_RECV) && \
239       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
240 #define	IFF_DUMPPKTS_XMIT(sc) \
241 	(sc->sc_debug & MWL_DEBUG_XMIT)
242 
243 #define	DPRINTF(sc, m, fmt, ...) do {				\
244 	if (sc->sc_debug & (m))					\
245 		printf(fmt, __VA_ARGS__);			\
246 } while (0)
247 #define	KEYPRINTF(sc, hk, mac) do {				\
248 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
249 		mwl_keyprint(sc, __func__, hk, mac);		\
250 } while (0)
251 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
252 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
253 #else
254 #define	IFF_DUMPPKTS_RECV(sc, wh)	0
255 #define	IFF_DUMPPKTS_XMIT(sc)		0
256 #define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
257 #define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
258 #endif
259 
260 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
261 
262 /*
263  * Each packet has fixed front matter: a 2-byte length
264  * of the payload, followed by a 4-address 802.11 header
265  * (regardless of the actual header and always w/o any
266  * QoS header).  The payload then follows.
267  */
268 struct mwltxrec {
269 	uint16_t fwlen;
270 	struct ieee80211_frame_addr4 wh;
271 } __packed;
272 
273 /*
274  * Read/Write shorthands for accesses to BAR 0.  Note
275  * that all BAR 1 operations are done in the "hal" and
276  * there should be no reference to them here.
277  */
278 #ifdef MWL_DEBUG
279 static __inline uint32_t
280 RD4(struct mwl_softc *sc, bus_size_t off)
281 {
282 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
283 }
284 #endif
285 
286 static __inline void
287 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
288 {
289 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
290 }
291 
292 int
293 mwl_attach(uint16_t devid, struct mwl_softc *sc)
294 {
295 	struct ieee80211com *ic = &sc->sc_ic;
296 	struct mwl_hal *mh;
297 	int error = 0;
298 
299 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
300 
301 	/*
302 	 * Setup the RX free list lock early, so it can be consistently
303 	 * removed.
304 	 */
305 	MWL_RXFREE_INIT(sc);
306 
307 	mh = mwl_hal_attach(sc->sc_dev, devid,
308 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
309 	if (mh == NULL) {
310 		device_printf(sc->sc_dev, "unable to attach HAL\n");
311 		error = EIO;
312 		goto bad;
313 	}
314 	sc->sc_mh = mh;
315 	/*
316 	 * Load firmware so we can get setup.  We arbitrarily
317 	 * pick station firmware; we'll re-load firmware as
318 	 * needed so setting up the wrong mode isn't a big deal.
319 	 */
320 	if (mwl_hal_fwload(mh, NULL) != 0) {
321 		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
322 		error = EIO;
323 		goto bad1;
324 	}
325 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
326 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
327 		error = EIO;
328 		goto bad1;
329 	}
330 	error = mwl_getchannels(sc);
331 	if (error != 0)
332 		goto bad1;
333 
334 	sc->sc_txantenna = 0;		/* h/w default */
335 	sc->sc_rxantenna = 0;		/* h/w default */
336 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
337 	sc->sc_ageinterval = MWL_AGEINTERVAL;
338 
339 	/*
340 	 * Allocate tx+rx descriptors and populate the lists.
341 	 * We immediately push the information to the firmware
342 	 * as otherwise it gets upset.
343 	 */
344 	error = mwl_dma_setup(sc);
345 	if (error != 0) {
346 		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
347 		    error);
348 		goto bad1;
349 	}
350 	error = mwl_setupdma(sc);	/* push to firmware */
351 	if (error != 0)			/* NB: mwl_setupdma prints msg */
352 		goto bad1;
353 
354 	callout_init(&sc->sc_timer, 1);
355 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
356 	mbufq_init(&sc->sc_snd, ifqmaxlen);
357 
358 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
359 		taskqueue_thread_enqueue, &sc->sc_tq);
360 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
361 		"%s taskq", device_get_nameunit(sc->sc_dev));
362 
363 	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
364 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
365 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
366 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
367 
368 	/* NB: insure BK queue is the lowest priority h/w queue */
369 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
370 		device_printf(sc->sc_dev,
371 		    "unable to setup xmit queue for %s traffic!\n",
372 		     ieee80211_wme_acnames[WME_AC_BK]);
373 		error = EIO;
374 		goto bad2;
375 	}
376 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
377 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
378 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
379 		/*
380 		 * Not enough hardware tx queues to properly do WME;
381 		 * just punt and assign them all to the same h/w queue.
382 		 * We could do a better job of this if, for example,
383 		 * we allocate queues when we switch from station to
384 		 * AP mode.
385 		 */
386 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
387 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
388 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
389 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
390 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
391 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
392 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
393 	}
394 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
395 
396 	ic->ic_softc = sc;
397 	ic->ic_name = device_get_nameunit(sc->sc_dev);
398 	/* XXX not right but it's not used anywhere important */
399 	ic->ic_phytype = IEEE80211_T_OFDM;
400 	ic->ic_opmode = IEEE80211_M_STA;
401 	ic->ic_caps =
402 		  IEEE80211_C_STA		/* station mode supported */
403 		| IEEE80211_C_HOSTAP		/* hostap mode */
404 		| IEEE80211_C_MONITOR		/* monitor mode */
405 #if 0
406 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
407 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
408 #endif
409 		| IEEE80211_C_MBSS		/* mesh point link mode */
410 		| IEEE80211_C_WDS		/* WDS supported */
411 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
412 		| IEEE80211_C_SHSLOT		/* short slot time supported */
413 		| IEEE80211_C_WME		/* WME/WMM supported */
414 		| IEEE80211_C_BURST		/* xmit bursting supported */
415 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
416 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
417 		| IEEE80211_C_TXFRAG		/* handle tx frags */
418 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
419 		| IEEE80211_C_DFS		/* DFS supported */
420 		;
421 
422 	ic->ic_htcaps =
423 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
424 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
425 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
426 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
427 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
428 #if MWL_AGGR_SIZE == 7935
429 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
430 #else
431 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
432 #endif
433 #if 0
434 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
435 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
436 #endif
437 		/* s/w capabilities */
438 		| IEEE80211_HTC_HT		/* HT operation */
439 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
440 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
441 		| IEEE80211_HTC_SMPS		/* SMPS available */
442 		;
443 
444 	/*
445 	 * Mark h/w crypto support.
446 	 * XXX no way to query h/w support.
447 	 */
448 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
449 			  |  IEEE80211_CRYPTO_AES_CCM
450 			  |  IEEE80211_CRYPTO_TKIP
451 			  |  IEEE80211_CRYPTO_TKIPMIC
452 			  ;
453 	/*
454 	 * Transmit requires space in the packet for a special
455 	 * format transmit record and optional padding between
456 	 * this record and the payload.  Ask the net80211 layer
457 	 * to arrange this when encapsulating packets so we can
458 	 * add it efficiently.
459 	 */
460 	ic->ic_headroom = sizeof(struct mwltxrec) -
461 		sizeof(struct ieee80211_frame);
462 
463 	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
464 
465 	/* call MI attach routine. */
466 	ieee80211_ifattach(ic);
467 	ic->ic_setregdomain = mwl_setregdomain;
468 	ic->ic_getradiocaps = mwl_getradiocaps;
469 	/* override default methods */
470 	ic->ic_raw_xmit = mwl_raw_xmit;
471 	ic->ic_newassoc = mwl_newassoc;
472 	ic->ic_updateslot = mwl_updateslot;
473 	ic->ic_update_mcast = mwl_update_mcast;
474 	ic->ic_update_promisc = mwl_update_promisc;
475 	ic->ic_wme.wme_update = mwl_wme_update;
476 	ic->ic_transmit = mwl_transmit;
477 	ic->ic_ioctl = mwl_ioctl;
478 	ic->ic_parent = mwl_parent;
479 
480 	ic->ic_node_alloc = mwl_node_alloc;
481 	sc->sc_node_cleanup = ic->ic_node_cleanup;
482 	ic->ic_node_cleanup = mwl_node_cleanup;
483 	sc->sc_node_drain = ic->ic_node_drain;
484 	ic->ic_node_drain = mwl_node_drain;
485 	ic->ic_node_getsignal = mwl_node_getsignal;
486 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
487 
488 	ic->ic_scan_start = mwl_scan_start;
489 	ic->ic_scan_end = mwl_scan_end;
490 	ic->ic_set_channel = mwl_set_channel;
491 
492 	sc->sc_recv_action = ic->ic_recv_action;
493 	ic->ic_recv_action = mwl_recv_action;
494 	sc->sc_addba_request = ic->ic_addba_request;
495 	ic->ic_addba_request = mwl_addba_request;
496 	sc->sc_addba_response = ic->ic_addba_response;
497 	ic->ic_addba_response = mwl_addba_response;
498 	sc->sc_addba_stop = ic->ic_addba_stop;
499 	ic->ic_addba_stop = mwl_addba_stop;
500 
501 	ic->ic_vap_create = mwl_vap_create;
502 	ic->ic_vap_delete = mwl_vap_delete;
503 
504 	ieee80211_radiotap_attach(ic,
505 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
506 		MWL_TX_RADIOTAP_PRESENT,
507 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
508 		MWL_RX_RADIOTAP_PRESENT);
509 	/*
510 	 * Setup dynamic sysctl's now that country code and
511 	 * regdomain are available from the hal.
512 	 */
513 	mwl_sysctlattach(sc);
514 
515 	if (bootverbose)
516 		ieee80211_announce(ic);
517 	mwl_announce(sc);
518 	return 0;
519 bad2:
520 	mwl_dma_cleanup(sc);
521 bad1:
522 	mwl_hal_detach(mh);
523 bad:
524 	MWL_RXFREE_DESTROY(sc);
525 	sc->sc_invalid = 1;
526 	return error;
527 }
528 
529 int
530 mwl_detach(struct mwl_softc *sc)
531 {
532 	struct ieee80211com *ic = &sc->sc_ic;
533 
534 	MWL_LOCK(sc);
535 	mwl_stop(sc);
536 	MWL_UNLOCK(sc);
537 	/*
538 	 * NB: the order of these is important:
539 	 * o call the 802.11 layer before detaching the hal to
540 	 *   insure callbacks into the driver to delete global
541 	 *   key cache entries can be handled
542 	 * o reclaim the tx queue data structures after calling
543 	 *   the 802.11 layer as we'll get called back to reclaim
544 	 *   node state and potentially want to use them
545 	 * o to cleanup the tx queues the hal is called, so detach
546 	 *   it last
547 	 * Other than that, it's straightforward...
548 	 */
549 	ieee80211_ifdetach(ic);
550 	callout_drain(&sc->sc_watchdog);
551 	mwl_dma_cleanup(sc);
552 	MWL_RXFREE_DESTROY(sc);
553 	mwl_tx_cleanup(sc);
554 	mwl_hal_detach(sc->sc_mh);
555 	mbufq_drain(&sc->sc_snd);
556 
557 	return 0;
558 }
559 
560 /*
561  * MAC address handling for multiple BSS on the same radio.
562  * The first vap uses the MAC address from the EEPROM.  For
563  * subsequent vap's we set the U/L bit (bit 1) in the MAC
564  * address and use the next six bits as an index.
565  */
566 static void
567 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
568 {
569 	int i;
570 
571 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
572 		/* NB: we only do this if h/w supports multiple bssid */
573 		for (i = 0; i < 32; i++)
574 			if ((sc->sc_bssidmask & (1<<i)) == 0)
575 				break;
576 		if (i != 0)
577 			mac[0] |= (i << 2)|0x2;
578 	} else
579 		i = 0;
580 	sc->sc_bssidmask |= 1<<i;
581 	if (i == 0)
582 		sc->sc_nbssid0++;
583 }
584 
585 static void
586 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
587 {
588 	int i = mac[0] >> 2;
589 	if (i != 0 || --sc->sc_nbssid0 == 0)
590 		sc->sc_bssidmask &= ~(1<<i);
591 }
592 
593 static struct ieee80211vap *
594 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
595     enum ieee80211_opmode opmode, int flags,
596     const uint8_t bssid[IEEE80211_ADDR_LEN],
597     const uint8_t mac0[IEEE80211_ADDR_LEN])
598 {
599 	struct mwl_softc *sc = ic->ic_softc;
600 	struct mwl_hal *mh = sc->sc_mh;
601 	struct ieee80211vap *vap, *apvap;
602 	struct mwl_hal_vap *hvap;
603 	struct mwl_vap *mvp;
604 	uint8_t mac[IEEE80211_ADDR_LEN];
605 
606 	IEEE80211_ADDR_COPY(mac, mac0);
607 	switch (opmode) {
608 	case IEEE80211_M_HOSTAP:
609 	case IEEE80211_M_MBSS:
610 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
611 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
612 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
613 		if (hvap == NULL) {
614 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
615 				reclaim_address(sc, mac);
616 			return NULL;
617 		}
618 		break;
619 	case IEEE80211_M_STA:
620 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
621 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
622 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
623 		if (hvap == NULL) {
624 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
625 				reclaim_address(sc, mac);
626 			return NULL;
627 		}
628 		/* no h/w beacon miss support; always use s/w */
629 		flags |= IEEE80211_CLONE_NOBEACONS;
630 		break;
631 	case IEEE80211_M_WDS:
632 		hvap = NULL;		/* NB: we use associated AP vap */
633 		if (sc->sc_napvaps == 0)
634 			return NULL;	/* no existing AP vap */
635 		break;
636 	case IEEE80211_M_MONITOR:
637 		hvap = NULL;
638 		break;
639 	case IEEE80211_M_IBSS:
640 	case IEEE80211_M_AHDEMO:
641 	default:
642 		return NULL;
643 	}
644 
645 	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
646 	mvp->mv_hvap = hvap;
647 	if (opmode == IEEE80211_M_WDS) {
648 		/*
649 		 * WDS vaps must have an associated AP vap; find one.
650 		 * XXX not right.
651 		 */
652 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
653 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
654 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
655 				break;
656 			}
657 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
658 	}
659 	vap = &mvp->mv_vap;
660 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
661 	/* override with driver methods */
662 	mvp->mv_newstate = vap->iv_newstate;
663 	vap->iv_newstate = mwl_newstate;
664 	vap->iv_max_keyix = 0;	/* XXX */
665 	vap->iv_key_alloc = mwl_key_alloc;
666 	vap->iv_key_delete = mwl_key_delete;
667 	vap->iv_key_set = mwl_key_set;
668 #ifdef MWL_HOST_PS_SUPPORT
669 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
670 		vap->iv_update_ps = mwl_update_ps;
671 		mvp->mv_set_tim = vap->iv_set_tim;
672 		vap->iv_set_tim = mwl_set_tim;
673 	}
674 #endif
675 	vap->iv_reset = mwl_reset;
676 	vap->iv_update_beacon = mwl_beacon_update;
677 
678 	/* override max aid so sta's cannot assoc when we're out of sta id's */
679 	vap->iv_max_aid = MWL_MAXSTAID;
680 	/* override default A-MPDU rx parameters */
681 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
682 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
683 
684 	/* complete setup */
685 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
686 	    mac);
687 
688 	switch (vap->iv_opmode) {
689 	case IEEE80211_M_HOSTAP:
690 	case IEEE80211_M_MBSS:
691 	case IEEE80211_M_STA:
692 		/*
693 		 * Setup sta db entry for local address.
694 		 */
695 		mwl_localstadb(vap);
696 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
697 		    vap->iv_opmode == IEEE80211_M_MBSS)
698 			sc->sc_napvaps++;
699 		else
700 			sc->sc_nstavaps++;
701 		break;
702 	case IEEE80211_M_WDS:
703 		sc->sc_nwdsvaps++;
704 		break;
705 	default:
706 		break;
707 	}
708 	/*
709 	 * Setup overall operating mode.
710 	 */
711 	if (sc->sc_napvaps)
712 		ic->ic_opmode = IEEE80211_M_HOSTAP;
713 	else if (sc->sc_nstavaps)
714 		ic->ic_opmode = IEEE80211_M_STA;
715 	else
716 		ic->ic_opmode = opmode;
717 
718 	return vap;
719 }
720 
721 static void
722 mwl_vap_delete(struct ieee80211vap *vap)
723 {
724 	struct mwl_vap *mvp = MWL_VAP(vap);
725 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
726 	struct mwl_hal *mh = sc->sc_mh;
727 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
728 	enum ieee80211_opmode opmode = vap->iv_opmode;
729 
730 	/* XXX disallow ap vap delete if WDS still present */
731 	if (sc->sc_running) {
732 		/* quiesce h/w while we remove the vap */
733 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
734 	}
735 	ieee80211_vap_detach(vap);
736 	switch (opmode) {
737 	case IEEE80211_M_HOSTAP:
738 	case IEEE80211_M_MBSS:
739 	case IEEE80211_M_STA:
740 		KASSERT(hvap != NULL, ("no hal vap handle"));
741 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
742 		mwl_hal_delvap(hvap);
743 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
744 			sc->sc_napvaps--;
745 		else
746 			sc->sc_nstavaps--;
747 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
748 		reclaim_address(sc, vap->iv_myaddr);
749 		break;
750 	case IEEE80211_M_WDS:
751 		sc->sc_nwdsvaps--;
752 		break;
753 	default:
754 		break;
755 	}
756 	mwl_cleartxq(sc, vap);
757 	free(mvp, M_80211_VAP);
758 	if (sc->sc_running)
759 		mwl_hal_intrset(mh, sc->sc_imask);
760 }
761 
762 void
763 mwl_suspend(struct mwl_softc *sc)
764 {
765 
766 	MWL_LOCK(sc);
767 	mwl_stop(sc);
768 	MWL_UNLOCK(sc);
769 }
770 
771 void
772 mwl_resume(struct mwl_softc *sc)
773 {
774 	int error = EDOOFUS;
775 
776 	MWL_LOCK(sc);
777 	if (sc->sc_ic.ic_nrunning > 0)
778 		error = mwl_init(sc);
779 	MWL_UNLOCK(sc);
780 
781 	if (error == 0)
782 		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
783 }
784 
785 void
786 mwl_shutdown(void *arg)
787 {
788 	struct mwl_softc *sc = arg;
789 
790 	MWL_LOCK(sc);
791 	mwl_stop(sc);
792 	MWL_UNLOCK(sc);
793 }
794 
795 /*
796  * Interrupt handler.  Most of the actual processing is deferred.
797  */
798 void
799 mwl_intr(void *arg)
800 {
801 	struct mwl_softc *sc = arg;
802 	struct mwl_hal *mh = sc->sc_mh;
803 	uint32_t status;
804 
805 	if (sc->sc_invalid) {
806 		/*
807 		 * The hardware is not ready/present, don't touch anything.
808 		 * Note this can happen early on if the IRQ is shared.
809 		 */
810 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
811 		return;
812 	}
813 	/*
814 	 * Figure out the reason(s) for the interrupt.
815 	 */
816 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
817 	if (status == 0)			/* must be a shared irq */
818 		return;
819 
820 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
821 	    __func__, status, sc->sc_imask);
822 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
823 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
824 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
825 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
826 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
827 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
828 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
829 		mwl_hal_cmddone(mh);
830 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
831 		;
832 	}
833 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
834 		/* TKIP ICV error */
835 		sc->sc_stats.mst_rx_badtkipicv++;
836 	}
837 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
838 		/* 11n aggregation queue is empty, re-fill */
839 		;
840 	}
841 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
842 		;
843 	}
844 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
845 		/* radar detected, process event */
846 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
847 	}
848 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
849 		/* DFS channel switch */
850 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
851 	}
852 }
853 
854 static void
855 mwl_radar_proc(void *arg, int pending)
856 {
857 	struct mwl_softc *sc = arg;
858 	struct ieee80211com *ic = &sc->sc_ic;
859 
860 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
861 	    __func__, pending);
862 
863 	sc->sc_stats.mst_radardetect++;
864 	/* XXX stop h/w BA streams? */
865 
866 	IEEE80211_LOCK(ic);
867 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
868 	IEEE80211_UNLOCK(ic);
869 }
870 
871 static void
872 mwl_chanswitch_proc(void *arg, int pending)
873 {
874 	struct mwl_softc *sc = arg;
875 	struct ieee80211com *ic = &sc->sc_ic;
876 
877 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
878 	    __func__, pending);
879 
880 	IEEE80211_LOCK(ic);
881 	sc->sc_csapending = 0;
882 	ieee80211_csa_completeswitch(ic);
883 	IEEE80211_UNLOCK(ic);
884 }
885 
886 static void
887 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
888 {
889 	struct ieee80211_node *ni = sp->data[0];
890 
891 	/* send DELBA and drop the stream */
892 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
893 }
894 
895 static void
896 mwl_bawatchdog_proc(void *arg, int pending)
897 {
898 	struct mwl_softc *sc = arg;
899 	struct mwl_hal *mh = sc->sc_mh;
900 	const MWL_HAL_BASTREAM *sp;
901 	uint8_t bitmap, n;
902 
903 	sc->sc_stats.mst_bawatchdog++;
904 
905 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
906 		DPRINTF(sc, MWL_DEBUG_AMPDU,
907 		    "%s: could not get bitmap\n", __func__);
908 		sc->sc_stats.mst_bawatchdog_failed++;
909 		return;
910 	}
911 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
912 	if (bitmap == 0xff) {
913 		n = 0;
914 		/* disable all ba streams */
915 		for (bitmap = 0; bitmap < 8; bitmap++) {
916 			sp = mwl_hal_bastream_lookup(mh, bitmap);
917 			if (sp != NULL) {
918 				mwl_bawatchdog(sp);
919 				n++;
920 			}
921 		}
922 		if (n == 0) {
923 			DPRINTF(sc, MWL_DEBUG_AMPDU,
924 			    "%s: no BA streams found\n", __func__);
925 			sc->sc_stats.mst_bawatchdog_empty++;
926 		}
927 	} else if (bitmap != 0xaa) {
928 		/* disable a single ba stream */
929 		sp = mwl_hal_bastream_lookup(mh, bitmap);
930 		if (sp != NULL) {
931 			mwl_bawatchdog(sp);
932 		} else {
933 			DPRINTF(sc, MWL_DEBUG_AMPDU,
934 			    "%s: no BA stream %d\n", __func__, bitmap);
935 			sc->sc_stats.mst_bawatchdog_notfound++;
936 		}
937 	}
938 }
939 
940 /*
941  * Convert net80211 channel to a HAL channel.
942  */
943 static void
944 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
945 {
946 	hc->channel = chan->ic_ieee;
947 
948 	*(uint32_t *)&hc->channelFlags = 0;
949 	if (IEEE80211_IS_CHAN_2GHZ(chan))
950 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
951 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
952 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
953 	if (IEEE80211_IS_CHAN_HT40(chan)) {
954 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
955 		if (IEEE80211_IS_CHAN_HT40U(chan))
956 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
957 		else
958 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
959 	} else
960 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
961 	/* XXX 10MHz channels */
962 }
963 
964 /*
965  * Inform firmware of our tx/rx dma setup.  The BAR 0
966  * writes below are for compatibility with older firmware.
967  * For current firmware we send this information with a
968  * cmd block via mwl_hal_sethwdma.
969  */
970 static int
971 mwl_setupdma(struct mwl_softc *sc)
972 {
973 	int error, i;
974 
975 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
976 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
977 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
978 
979 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
980 		struct mwl_txq *txq = &sc->sc_txq[i];
981 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
982 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
983 	}
984 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
985 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
986 
987 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
988 	if (error != 0) {
989 		device_printf(sc->sc_dev,
990 		    "unable to setup tx/rx dma; hal status %u\n", error);
991 		/* XXX */
992 	}
993 	return error;
994 }
995 
996 /*
997  * Inform firmware of tx rate parameters.
998  * Called after a channel change.
999  */
1000 static int
1001 mwl_setcurchanrates(struct mwl_softc *sc)
1002 {
1003 	struct ieee80211com *ic = &sc->sc_ic;
1004 	const struct ieee80211_rateset *rs;
1005 	MWL_HAL_TXRATE rates;
1006 
1007 	memset(&rates, 0, sizeof(rates));
1008 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1009 	/* rate used to send management frames */
1010 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1011 	/* rate used to send multicast frames */
1012 	rates.McastRate = rates.MgtRate;
1013 
1014 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1015 }
1016 
1017 /*
1018  * Inform firmware of tx rate parameters.  Called whenever
1019  * user-settable params change and after a channel change.
1020  */
1021 static int
1022 mwl_setrates(struct ieee80211vap *vap)
1023 {
1024 	struct mwl_vap *mvp = MWL_VAP(vap);
1025 	struct ieee80211_node *ni = vap->iv_bss;
1026 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1027 	MWL_HAL_TXRATE rates;
1028 
1029 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1030 
1031 	/*
1032 	 * Update the h/w rate map.
1033 	 * NB: 0x80 for MCS is passed through unchanged
1034 	 */
1035 	memset(&rates, 0, sizeof(rates));
1036 	/* rate used to send management frames */
1037 	rates.MgtRate = tp->mgmtrate;
1038 	/* rate used to send multicast frames */
1039 	rates.McastRate = tp->mcastrate;
1040 
1041 	/* while here calculate EAPOL fixed rate cookie */
1042 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1043 
1044 	return mwl_hal_settxrate(mvp->mv_hvap,
1045 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1046 		RATE_FIXED : RATE_AUTO, &rates);
1047 }
1048 
1049 /*
1050  * Setup a fixed xmit rate cookie for EAPOL frames.
1051  */
1052 static void
1053 mwl_seteapolformat(struct ieee80211vap *vap)
1054 {
1055 	struct mwl_vap *mvp = MWL_VAP(vap);
1056 	struct ieee80211_node *ni = vap->iv_bss;
1057 	enum ieee80211_phymode mode;
1058 	uint8_t rate;
1059 
1060 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1061 
1062 	mode = ieee80211_chan2mode(ni->ni_chan);
1063 	/*
1064 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1065 	 * NB: this may violate POLA for sta and wds vap's.
1066 	 */
1067 	if (mode == IEEE80211_MODE_11NA &&
1068 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1069 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1070 	else if (mode == IEEE80211_MODE_11NG &&
1071 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1072 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1073 	else
1074 		rate = vap->iv_txparms[mode].mgmtrate;
1075 
1076 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1077 }
1078 
1079 /*
1080  * Map SKU+country code to region code for radar bin'ing.
1081  */
1082 static int
1083 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1084 {
1085 	switch (rd->regdomain) {
1086 	case SKU_FCC:
1087 	case SKU_FCC3:
1088 		return DOMAIN_CODE_FCC;
1089 	case SKU_CA:
1090 		return DOMAIN_CODE_IC;
1091 	case SKU_ETSI:
1092 	case SKU_ETSI2:
1093 	case SKU_ETSI3:
1094 		if (rd->country == CTRY_SPAIN)
1095 			return DOMAIN_CODE_SPAIN;
1096 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1097 			return DOMAIN_CODE_FRANCE;
1098 		/* XXX force 1.3.1 radar type */
1099 		return DOMAIN_CODE_ETSI_131;
1100 	case SKU_JAPAN:
1101 		return DOMAIN_CODE_MKK;
1102 	case SKU_ROW:
1103 		return DOMAIN_CODE_DGT;	/* Taiwan */
1104 	case SKU_APAC:
1105 	case SKU_APAC2:
1106 	case SKU_APAC3:
1107 		return DOMAIN_CODE_AUS;	/* Australia */
1108 	}
1109 	/* XXX KOREA? */
1110 	return DOMAIN_CODE_FCC;			/* XXX? */
1111 }
1112 
1113 static int
1114 mwl_hal_reset(struct mwl_softc *sc)
1115 {
1116 	struct ieee80211com *ic = &sc->sc_ic;
1117 	struct mwl_hal *mh = sc->sc_mh;
1118 
1119 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1120 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1121 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1122 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1123 	mwl_chan_set(sc, ic->ic_curchan);
1124 	/* NB: RF/RA performance tuned for indoor mode */
1125 	mwl_hal_setrateadaptmode(mh, 0);
1126 	mwl_hal_setoptimizationlevel(mh,
1127 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1128 
1129 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1130 
1131 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1132 	mwl_hal_setcfend(mh, 0);			/* XXX */
1133 
1134 	return 1;
1135 }
1136 
1137 static int
1138 mwl_init(struct mwl_softc *sc)
1139 {
1140 	struct mwl_hal *mh = sc->sc_mh;
1141 	int error = 0;
1142 
1143 	MWL_LOCK_ASSERT(sc);
1144 
1145 	/*
1146 	 * Stop anything previously setup.  This is safe
1147 	 * whether this is the first time through or not.
1148 	 */
1149 	mwl_stop(sc);
1150 
1151 	/*
1152 	 * Push vap-independent state to the firmware.
1153 	 */
1154 	if (!mwl_hal_reset(sc)) {
1155 		device_printf(sc->sc_dev, "unable to reset hardware\n");
1156 		return EIO;
1157 	}
1158 
1159 	/*
1160 	 * Setup recv (once); transmit is already good to go.
1161 	 */
1162 	error = mwl_startrecv(sc);
1163 	if (error != 0) {
1164 		device_printf(sc->sc_dev, "unable to start recv logic\n");
1165 		return error;
1166 	}
1167 
1168 	/*
1169 	 * Enable interrupts.
1170 	 */
1171 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1172 		     | MACREG_A2HRIC_BIT_TX_DONE
1173 		     | MACREG_A2HRIC_BIT_OPC_DONE
1174 #if 0
1175 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1176 #endif
1177 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1178 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1179 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1180 #if 0
1181 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1182 #endif
1183 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1184 		     | MACREQ_A2HRIC_BIT_TX_ACK
1185 		     ;
1186 
1187 	sc->sc_running = 1;
1188 	mwl_hal_intrset(mh, sc->sc_imask);
1189 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1190 
1191 	return 0;
1192 }
1193 
1194 static void
1195 mwl_stop(struct mwl_softc *sc)
1196 {
1197 
1198 	MWL_LOCK_ASSERT(sc);
1199 	if (sc->sc_running) {
1200 		/*
1201 		 * Shutdown the hardware and driver.
1202 		 */
1203 		sc->sc_running = 0;
1204 		callout_stop(&sc->sc_watchdog);
1205 		sc->sc_tx_timer = 0;
1206 		mwl_draintxq(sc);
1207 	}
1208 }
1209 
1210 static int
1211 mwl_reset_vap(struct ieee80211vap *vap, int state)
1212 {
1213 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1214 	struct ieee80211com *ic = vap->iv_ic;
1215 
1216 	if (state == IEEE80211_S_RUN)
1217 		mwl_setrates(vap);
1218 	/* XXX off by 1? */
1219 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1220 	/* XXX auto? 20/40 split? */
1221 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1222 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1223 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1224 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1225 	/* XXX txpower cap */
1226 
1227 	/* re-setup beacons */
1228 	if (state == IEEE80211_S_RUN &&
1229 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1230 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1231 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1232 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1233 		mwl_hal_setnprotmode(hvap,
1234 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1235 		return mwl_beacon_setup(vap);
1236 	}
1237 	return 0;
1238 }
1239 
1240 /*
1241  * Reset the hardware w/o losing operational state.
1242  * Used to reset or reload hardware state for a vap.
1243  */
1244 static int
1245 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1246 {
1247 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1248 	int error = 0;
1249 
1250 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1251 		struct ieee80211com *ic = vap->iv_ic;
1252 		struct mwl_softc *sc = ic->ic_softc;
1253 		struct mwl_hal *mh = sc->sc_mh;
1254 
1255 		/* XXX handle DWDS sta vap change */
1256 		/* XXX do we need to disable interrupts? */
1257 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1258 		error = mwl_reset_vap(vap, vap->iv_state);
1259 		mwl_hal_intrset(mh, sc->sc_imask);
1260 	}
1261 	return error;
1262 }
1263 
1264 /*
1265  * Allocate a tx buffer for sending a frame.  The
1266  * packet is assumed to have the WME AC stored so
1267  * we can use it to select the appropriate h/w queue.
1268  */
1269 static struct mwl_txbuf *
1270 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1271 {
1272 	struct mwl_txbuf *bf;
1273 
1274 	/*
1275 	 * Grab a TX buffer and associated resources.
1276 	 */
1277 	MWL_TXQ_LOCK(txq);
1278 	bf = STAILQ_FIRST(&txq->free);
1279 	if (bf != NULL) {
1280 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1281 		txq->nfree--;
1282 	}
1283 	MWL_TXQ_UNLOCK(txq);
1284 	if (bf == NULL)
1285 		DPRINTF(sc, MWL_DEBUG_XMIT,
1286 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1287 	return bf;
1288 }
1289 
1290 /*
1291  * Return a tx buffer to the queue it came from.  Note there
1292  * are two cases because we must preserve the order of buffers
1293  * as it reflects the fixed order of descriptors in memory
1294  * (the firmware pre-fetches descriptors so we cannot reorder).
1295  */
1296 static void
1297 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1298 {
1299 	bf->bf_m = NULL;
1300 	bf->bf_node = NULL;
1301 	MWL_TXQ_LOCK(txq);
1302 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1303 	txq->nfree++;
1304 	MWL_TXQ_UNLOCK(txq);
1305 }
1306 
1307 static void
1308 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1309 {
1310 	bf->bf_m = NULL;
1311 	bf->bf_node = NULL;
1312 	MWL_TXQ_LOCK(txq);
1313 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1314 	txq->nfree++;
1315 	MWL_TXQ_UNLOCK(txq);
1316 }
1317 
1318 static int
1319 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1320 {
1321 	struct mwl_softc *sc = ic->ic_softc;
1322 	int error;
1323 
1324 	MWL_LOCK(sc);
1325 	if (!sc->sc_running) {
1326 		MWL_UNLOCK(sc);
1327 		return (ENXIO);
1328 	}
1329 	error = mbufq_enqueue(&sc->sc_snd, m);
1330 	if (error) {
1331 		MWL_UNLOCK(sc);
1332 		return (error);
1333 	}
1334 	mwl_start(sc);
1335 	MWL_UNLOCK(sc);
1336 	return (0);
1337 }
1338 
1339 static void
1340 mwl_start(struct mwl_softc *sc)
1341 {
1342 	struct ieee80211_node *ni;
1343 	struct mwl_txbuf *bf;
1344 	struct mbuf *m;
1345 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1346 	int nqueued;
1347 
1348 	MWL_LOCK_ASSERT(sc);
1349 	if (!sc->sc_running || sc->sc_invalid)
1350 		return;
1351 	nqueued = 0;
1352 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1353 		/*
1354 		 * Grab the node for the destination.
1355 		 */
1356 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1357 		KASSERT(ni != NULL, ("no node"));
1358 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1359 		/*
1360 		 * Grab a TX buffer and associated resources.
1361 		 * We honor the classification by the 802.11 layer.
1362 		 */
1363 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1364 		bf = mwl_gettxbuf(sc, txq);
1365 		if (bf == NULL) {
1366 			m_freem(m);
1367 			ieee80211_free_node(ni);
1368 #ifdef MWL_TX_NODROP
1369 			sc->sc_stats.mst_tx_qstop++;
1370 			break;
1371 #else
1372 			DPRINTF(sc, MWL_DEBUG_XMIT,
1373 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1374 			sc->sc_stats.mst_tx_qdrop++;
1375 			continue;
1376 #endif /* MWL_TX_NODROP */
1377 		}
1378 
1379 		/*
1380 		 * Pass the frame to the h/w for transmission.
1381 		 */
1382 		if (mwl_tx_start(sc, ni, bf, m)) {
1383 			if_inc_counter(ni->ni_vap->iv_ifp,
1384 			    IFCOUNTER_OERRORS, 1);
1385 			mwl_puttxbuf_head(txq, bf);
1386 			ieee80211_free_node(ni);
1387 			continue;
1388 		}
1389 		nqueued++;
1390 		if (nqueued >= mwl_txcoalesce) {
1391 			/*
1392 			 * Poke the firmware to process queued frames;
1393 			 * see below about (lack of) locking.
1394 			 */
1395 			nqueued = 0;
1396 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1397 		}
1398 	}
1399 	if (nqueued) {
1400 		/*
1401 		 * NB: We don't need to lock against tx done because
1402 		 * this just prods the firmware to check the transmit
1403 		 * descriptors.  The firmware will also start fetching
1404 		 * descriptors by itself if it notices new ones are
1405 		 * present when it goes to deliver a tx done interrupt
1406 		 * to the host. So if we race with tx done processing
1407 		 * it's ok.  Delivering the kick here rather than in
1408 		 * mwl_tx_start is an optimization to avoid poking the
1409 		 * firmware for each packet.
1410 		 *
1411 		 * NB: the queue id isn't used so 0 is ok.
1412 		 */
1413 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1414 	}
1415 }
1416 
1417 static int
1418 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1419 	const struct ieee80211_bpf_params *params)
1420 {
1421 	struct ieee80211com *ic = ni->ni_ic;
1422 	struct mwl_softc *sc = ic->ic_softc;
1423 	struct mwl_txbuf *bf;
1424 	struct mwl_txq *txq;
1425 
1426 	if (!sc->sc_running || sc->sc_invalid) {
1427 		m_freem(m);
1428 		return ENETDOWN;
1429 	}
1430 	/*
1431 	 * Grab a TX buffer and associated resources.
1432 	 * Note that we depend on the classification
1433 	 * by the 802.11 layer to get to the right h/w
1434 	 * queue.  Management frames must ALWAYS go on
1435 	 * queue 1 but we cannot just force that here
1436 	 * because we may receive non-mgt frames.
1437 	 */
1438 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1439 	bf = mwl_gettxbuf(sc, txq);
1440 	if (bf == NULL) {
1441 		sc->sc_stats.mst_tx_qstop++;
1442 		m_freem(m);
1443 		return ENOBUFS;
1444 	}
1445 	/*
1446 	 * Pass the frame to the h/w for transmission.
1447 	 */
1448 	if (mwl_tx_start(sc, ni, bf, m)) {
1449 		mwl_puttxbuf_head(txq, bf);
1450 
1451 		return EIO;		/* XXX */
1452 	}
1453 	/*
1454 	 * NB: We don't need to lock against tx done because
1455 	 * this just prods the firmware to check the transmit
1456 	 * descriptors.  The firmware will also start fetching
1457 	 * descriptors by itself if it notices new ones are
1458 	 * present when it goes to deliver a tx done interrupt
1459 	 * to the host. So if we race with tx done processing
1460 	 * it's ok.  Delivering the kick here rather than in
1461 	 * mwl_tx_start is an optimization to avoid poking the
1462 	 * firmware for each packet.
1463 	 *
1464 	 * NB: the queue id isn't used so 0 is ok.
1465 	 */
1466 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1467 	return 0;
1468 }
1469 
1470 static int
1471 mwl_media_change(struct ifnet *ifp)
1472 {
1473 	struct ieee80211vap *vap = ifp->if_softc;
1474 	int error;
1475 
1476 	error = ieee80211_media_change(ifp);
1477 	/* NB: only the fixed rate can change and that doesn't need a reset */
1478 	if (error == ENETRESET) {
1479 		mwl_setrates(vap);
1480 		error = 0;
1481 	}
1482 	return error;
1483 }
1484 
1485 #ifdef MWL_DEBUG
1486 static void
1487 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1488 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1489 {
1490 	static const char *ciphers[] = {
1491 		"WEP",
1492 		"TKIP",
1493 		"AES-CCM",
1494 	};
1495 	int i, n;
1496 
1497 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1498 	for (i = 0, n = hk->keyLen; i < n; i++)
1499 		printf(" %02x", hk->key.aes[i]);
1500 	printf(" mac %s", ether_sprintf(mac));
1501 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1502 		printf(" %s", "rxmic");
1503 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1504 			printf(" %02x", hk->key.tkip.rxMic[i]);
1505 		printf(" txmic");
1506 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1507 			printf(" %02x", hk->key.tkip.txMic[i]);
1508 	}
1509 	printf(" flags 0x%x\n", hk->keyFlags);
1510 }
1511 #endif
1512 
1513 /*
1514  * Allocate a key cache slot for a unicast key.  The
1515  * firmware handles key allocation and every station is
1516  * guaranteed key space so we are always successful.
1517  */
1518 static int
1519 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1520 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1521 {
1522 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1523 
1524 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1525 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1526 		if (!(&vap->iv_nw_keys[0] <= k &&
1527 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1528 			/* should not happen */
1529 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1530 				"%s: bogus group key\n", __func__);
1531 			return 0;
1532 		}
1533 		/* give the caller what they requested */
1534 		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1535 	} else {
1536 		/*
1537 		 * Firmware handles key allocation.
1538 		 */
1539 		*keyix = *rxkeyix = 0;
1540 	}
1541 	return 1;
1542 }
1543 
1544 /*
1545  * Delete a key entry allocated by mwl_key_alloc.
1546  */
1547 static int
1548 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1549 {
1550 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1551 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1552 	MWL_HAL_KEYVAL hk;
1553 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1554 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1555 
1556 	if (hvap == NULL) {
1557 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1558 			/* XXX monitor mode? */
1559 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1560 			    "%s: no hvap for opmode %d\n", __func__,
1561 			    vap->iv_opmode);
1562 			return 0;
1563 		}
1564 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1565 	}
1566 
1567 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1568 	    __func__, k->wk_keyix);
1569 
1570 	memset(&hk, 0, sizeof(hk));
1571 	hk.keyIndex = k->wk_keyix;
1572 	switch (k->wk_cipher->ic_cipher) {
1573 	case IEEE80211_CIPHER_WEP:
1574 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1575 		break;
1576 	case IEEE80211_CIPHER_TKIP:
1577 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1578 		break;
1579 	case IEEE80211_CIPHER_AES_CCM:
1580 		hk.keyTypeId = KEY_TYPE_ID_AES;
1581 		break;
1582 	default:
1583 		/* XXX should not happen */
1584 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1585 		    __func__, k->wk_cipher->ic_cipher);
1586 		return 0;
1587 	}
1588 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1589 }
1590 
1591 static __inline int
1592 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1593 {
1594 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1595 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1596 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1597 		if (k->wk_flags & IEEE80211_KEY_RECV)
1598 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1599 		return 1;
1600 	} else
1601 		return 0;
1602 }
1603 
1604 /*
1605  * Set the key cache contents for the specified key.  Key cache
1606  * slot(s) must already have been allocated by mwl_key_alloc.
1607  */
1608 static int
1609 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1610 {
1611 	return (_mwl_key_set(vap, k, k->wk_macaddr));
1612 }
1613 
1614 static int
1615 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1616 	const uint8_t mac[IEEE80211_ADDR_LEN])
1617 {
1618 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1619 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1620 #define	IEEE80211_IS_STATICKEY(k) \
1621 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1622 	 (GRPXMIT|IEEE80211_KEY_RECV))
1623 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1624 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1625 	const struct ieee80211_cipher *cip = k->wk_cipher;
1626 	const uint8_t *macaddr;
1627 	MWL_HAL_KEYVAL hk;
1628 
1629 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1630 		("s/w crypto set?"));
1631 
1632 	if (hvap == NULL) {
1633 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1634 			/* XXX monitor mode? */
1635 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1636 			    "%s: no hvap for opmode %d\n", __func__,
1637 			    vap->iv_opmode);
1638 			return 0;
1639 		}
1640 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1641 	}
1642 	memset(&hk, 0, sizeof(hk));
1643 	hk.keyIndex = k->wk_keyix;
1644 	switch (cip->ic_cipher) {
1645 	case IEEE80211_CIPHER_WEP:
1646 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1647 		hk.keyLen = k->wk_keylen;
1648 		if (k->wk_keyix == vap->iv_def_txkey)
1649 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1650 		if (!IEEE80211_IS_STATICKEY(k)) {
1651 			/* NB: WEP is never used for the PTK */
1652 			(void) addgroupflags(&hk, k);
1653 		}
1654 		break;
1655 	case IEEE80211_CIPHER_TKIP:
1656 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1657 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1658 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1659 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1660 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1661 		if (!addgroupflags(&hk, k))
1662 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1663 		break;
1664 	case IEEE80211_CIPHER_AES_CCM:
1665 		hk.keyTypeId = KEY_TYPE_ID_AES;
1666 		hk.keyLen = k->wk_keylen;
1667 		if (!addgroupflags(&hk, k))
1668 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1669 		break;
1670 	default:
1671 		/* XXX should not happen */
1672 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1673 		    __func__, k->wk_cipher->ic_cipher);
1674 		return 0;
1675 	}
1676 	/*
1677 	 * NB: tkip mic keys get copied here too; the layout
1678 	 *     just happens to match that in ieee80211_key.
1679 	 */
1680 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1681 
1682 	/*
1683 	 * Locate address of sta db entry for writing key;
1684 	 * the convention unfortunately is somewhat different
1685 	 * than how net80211, hostapd, and wpa_supplicant think.
1686 	 */
1687 	if (vap->iv_opmode == IEEE80211_M_STA) {
1688 		/*
1689 		 * NB: keys plumbed before the sta reaches AUTH state
1690 		 * will be discarded or written to the wrong sta db
1691 		 * entry because iv_bss is meaningless.  This is ok
1692 		 * (right now) because we handle deferred plumbing of
1693 		 * WEP keys when the sta reaches AUTH state.
1694 		 */
1695 		macaddr = vap->iv_bss->ni_bssid;
1696 		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1697 			/* XXX plumb to local sta db too for static key wep */
1698 			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1699 		}
1700 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1701 	    vap->iv_state != IEEE80211_S_RUN) {
1702 		/*
1703 		 * Prior to RUN state a WDS vap will not it's BSS node
1704 		 * setup so we will plumb the key to the wrong mac
1705 		 * address (it'll be our local address).  Workaround
1706 		 * this for the moment by grabbing the correct address.
1707 		 */
1708 		macaddr = vap->iv_des_bssid;
1709 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1710 		macaddr = vap->iv_myaddr;
1711 	else
1712 		macaddr = mac;
1713 	KEYPRINTF(sc, &hk, macaddr);
1714 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1715 #undef IEEE80211_IS_STATICKEY
1716 #undef GRPXMIT
1717 }
1718 
1719 /*
1720  * Set the multicast filter contents into the hardware.
1721  * XXX f/w has no support; just defer to the os.
1722  */
1723 static void
1724 mwl_setmcastfilter(struct mwl_softc *sc)
1725 {
1726 #if 0
1727 	struct ether_multi *enm;
1728 	struct ether_multistep estep;
1729 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1730 	uint8_t *mp;
1731 	int nmc;
1732 
1733 	mp = macs;
1734 	nmc = 0;
1735 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1736 	while (enm != NULL) {
1737 		/* XXX Punt on ranges. */
1738 		if (nmc == MWL_HAL_MCAST_MAX ||
1739 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1740 			ifp->if_flags |= IFF_ALLMULTI;
1741 			return;
1742 		}
1743 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1744 		mp += IEEE80211_ADDR_LEN, nmc++;
1745 		ETHER_NEXT_MULTI(estep, enm);
1746 	}
1747 	ifp->if_flags &= ~IFF_ALLMULTI;
1748 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1749 #endif
1750 }
1751 
1752 static int
1753 mwl_mode_init(struct mwl_softc *sc)
1754 {
1755 	struct ieee80211com *ic = &sc->sc_ic;
1756 	struct mwl_hal *mh = sc->sc_mh;
1757 
1758 	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1759 	mwl_setmcastfilter(sc);
1760 
1761 	return 0;
1762 }
1763 
1764 /*
1765  * Callback from the 802.11 layer after a multicast state change.
1766  */
1767 static void
1768 mwl_update_mcast(struct ieee80211com *ic)
1769 {
1770 	struct mwl_softc *sc = ic->ic_softc;
1771 
1772 	mwl_setmcastfilter(sc);
1773 }
1774 
1775 /*
1776  * Callback from the 802.11 layer after a promiscuous mode change.
1777  * Note this interface does not check the operating mode as this
1778  * is an internal callback and we are expected to honor the current
1779  * state (e.g. this is used for setting the interface in promiscuous
1780  * mode when operating in hostap mode to do ACS).
1781  */
1782 static void
1783 mwl_update_promisc(struct ieee80211com *ic)
1784 {
1785 	struct mwl_softc *sc = ic->ic_softc;
1786 
1787 	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1788 }
1789 
1790 /*
1791  * Callback from the 802.11 layer to update the slot time
1792  * based on the current setting.  We use it to notify the
1793  * firmware of ERP changes and the f/w takes care of things
1794  * like slot time and preamble.
1795  */
1796 static void
1797 mwl_updateslot(struct ieee80211com *ic)
1798 {
1799 	struct mwl_softc *sc = ic->ic_softc;
1800 	struct mwl_hal *mh = sc->sc_mh;
1801 	int prot;
1802 
1803 	/* NB: can be called early; suppress needless cmds */
1804 	if (!sc->sc_running)
1805 		return;
1806 
1807 	/*
1808 	 * Calculate the ERP flags.  The firwmare will use
1809 	 * this to carry out the appropriate measures.
1810 	 */
1811 	prot = 0;
1812 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1813 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1814 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1815 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1816 			prot |= IEEE80211_ERP_USE_PROTECTION;
1817 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1818 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1819 	}
1820 
1821 	DPRINTF(sc, MWL_DEBUG_RESET,
1822 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1823 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1824 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1825 	    ic->ic_flags);
1826 
1827 	mwl_hal_setgprot(mh, prot);
1828 }
1829 
1830 /*
1831  * Setup the beacon frame.
1832  */
1833 static int
1834 mwl_beacon_setup(struct ieee80211vap *vap)
1835 {
1836 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1837 	struct ieee80211_node *ni = vap->iv_bss;
1838 	struct mbuf *m;
1839 
1840 	m = ieee80211_beacon_alloc(ni);
1841 	if (m == NULL)
1842 		return ENOBUFS;
1843 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1844 	m_free(m);
1845 
1846 	return 0;
1847 }
1848 
1849 /*
1850  * Update the beacon frame in response to a change.
1851  */
1852 static void
1853 mwl_beacon_update(struct ieee80211vap *vap, int item)
1854 {
1855 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1856 	struct ieee80211com *ic = vap->iv_ic;
1857 
1858 	KASSERT(hvap != NULL, ("no beacon"));
1859 	switch (item) {
1860 	case IEEE80211_BEACON_ERP:
1861 		mwl_updateslot(ic);
1862 		break;
1863 	case IEEE80211_BEACON_HTINFO:
1864 		mwl_hal_setnprotmode(hvap,
1865 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1866 		break;
1867 	case IEEE80211_BEACON_CAPS:
1868 	case IEEE80211_BEACON_WME:
1869 	case IEEE80211_BEACON_APPIE:
1870 	case IEEE80211_BEACON_CSA:
1871 		break;
1872 	case IEEE80211_BEACON_TIM:
1873 		/* NB: firmware always forms TIM */
1874 		return;
1875 	}
1876 	/* XXX retain beacon frame and update */
1877 	mwl_beacon_setup(vap);
1878 }
1879 
1880 static void
1881 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1882 {
1883 	bus_addr_t *paddr = (bus_addr_t*) arg;
1884 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1885 	*paddr = segs->ds_addr;
1886 }
1887 
1888 #ifdef MWL_HOST_PS_SUPPORT
1889 /*
1890  * Handle power save station occupancy changes.
1891  */
1892 static void
1893 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1894 {
1895 	struct mwl_vap *mvp = MWL_VAP(vap);
1896 
1897 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1898 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1899 	mvp->mv_last_ps_sta = nsta;
1900 }
1901 
1902 /*
1903  * Handle associated station power save state changes.
1904  */
1905 static int
1906 mwl_set_tim(struct ieee80211_node *ni, int set)
1907 {
1908 	struct ieee80211vap *vap = ni->ni_vap;
1909 	struct mwl_vap *mvp = MWL_VAP(vap);
1910 
1911 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1912 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1913 		    IEEE80211_AID(ni->ni_associd), set);
1914 		return 1;
1915 	} else
1916 		return 0;
1917 }
1918 #endif /* MWL_HOST_PS_SUPPORT */
1919 
1920 static int
1921 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1922 	struct mwl_descdma *dd,
1923 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1924 {
1925 	uint8_t *ds;
1926 	int error;
1927 
1928 	DPRINTF(sc, MWL_DEBUG_RESET,
1929 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1930 	    __func__, name, nbuf, (uintmax_t) bufsize,
1931 	    ndesc, (uintmax_t) descsize);
1932 
1933 	dd->dd_name = name;
1934 	dd->dd_desc_len = nbuf * ndesc * descsize;
1935 
1936 	/*
1937 	 * Setup DMA descriptor area.
1938 	 */
1939 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1940 		       PAGE_SIZE, 0,		/* alignment, bounds */
1941 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1942 		       BUS_SPACE_MAXADDR,	/* highaddr */
1943 		       NULL, NULL,		/* filter, filterarg */
1944 		       dd->dd_desc_len,		/* maxsize */
1945 		       1,			/* nsegments */
1946 		       dd->dd_desc_len,		/* maxsegsize */
1947 		       BUS_DMA_ALLOCNOW,	/* flags */
1948 		       NULL,			/* lockfunc */
1949 		       NULL,			/* lockarg */
1950 		       &dd->dd_dmat);
1951 	if (error != 0) {
1952 		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1953 		return error;
1954 	}
1955 
1956 	/* allocate descriptors */
1957 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1958 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1959 				 &dd->dd_dmamap);
1960 	if (error != 0) {
1961 		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1962 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1963 		goto fail1;
1964 	}
1965 
1966 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1967 				dd->dd_desc, dd->dd_desc_len,
1968 				mwl_load_cb, &dd->dd_desc_paddr,
1969 				BUS_DMA_NOWAIT);
1970 	if (error != 0) {
1971 		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1972 			dd->dd_name, error);
1973 		goto fail2;
1974 	}
1975 
1976 	ds = dd->dd_desc;
1977 	memset(ds, 0, dd->dd_desc_len);
1978 	DPRINTF(sc, MWL_DEBUG_RESET,
1979 	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1980 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1981 	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1982 
1983 	return 0;
1984 fail2:
1985 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1986 fail1:
1987 	bus_dma_tag_destroy(dd->dd_dmat);
1988 	memset(dd, 0, sizeof(*dd));
1989 	return error;
1990 #undef DS2PHYS
1991 }
1992 
1993 static void
1994 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1995 {
1996 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1997 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1998 	bus_dma_tag_destroy(dd->dd_dmat);
1999 
2000 	memset(dd, 0, sizeof(*dd));
2001 }
2002 
2003 /*
2004  * Construct a tx q's free list.  The order of entries on
2005  * the list must reflect the physical layout of tx descriptors
2006  * because the firmware pre-fetches descriptors.
2007  *
2008  * XXX might be better to use indices into the buffer array.
2009  */
2010 static void
2011 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2012 {
2013 	struct mwl_txbuf *bf;
2014 	int i;
2015 
2016 	bf = txq->dma.dd_bufptr;
2017 	STAILQ_INIT(&txq->free);
2018 	for (i = 0; i < mwl_txbuf; i++, bf++)
2019 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2020 	txq->nfree = i;
2021 }
2022 
2023 #define	DS2PHYS(_dd, _ds) \
2024 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2025 
2026 static int
2027 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2028 {
2029 	int error, bsize, i;
2030 	struct mwl_txbuf *bf;
2031 	struct mwl_txdesc *ds;
2032 
2033 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2034 			mwl_txbuf, sizeof(struct mwl_txbuf),
2035 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2036 	if (error != 0)
2037 		return error;
2038 
2039 	/* allocate and setup tx buffers */
2040 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2041 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2042 	if (bf == NULL) {
2043 		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2044 			mwl_txbuf);
2045 		return ENOMEM;
2046 	}
2047 	txq->dma.dd_bufptr = bf;
2048 
2049 	ds = txq->dma.dd_desc;
2050 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2051 		bf->bf_desc = ds;
2052 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2053 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2054 				&bf->bf_dmamap);
2055 		if (error != 0) {
2056 			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2057 				"buffer %u, error %u\n", i, error);
2058 			return error;
2059 		}
2060 	}
2061 	mwl_txq_reset(sc, txq);
2062 	return 0;
2063 }
2064 
2065 static void
2066 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2067 {
2068 	struct mwl_txbuf *bf;
2069 	int i;
2070 
2071 	bf = txq->dma.dd_bufptr;
2072 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2073 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2074 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2075 		if (bf->bf_dmamap != NULL)
2076 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2077 	}
2078 	STAILQ_INIT(&txq->free);
2079 	txq->nfree = 0;
2080 	if (txq->dma.dd_bufptr != NULL) {
2081 		free(txq->dma.dd_bufptr, M_MWLDEV);
2082 		txq->dma.dd_bufptr = NULL;
2083 	}
2084 	if (txq->dma.dd_desc_len != 0)
2085 		mwl_desc_cleanup(sc, &txq->dma);
2086 }
2087 
2088 static int
2089 mwl_rxdma_setup(struct mwl_softc *sc)
2090 {
2091 	int error, jumbosize, bsize, i;
2092 	struct mwl_rxbuf *bf;
2093 	struct mwl_jumbo *rbuf;
2094 	struct mwl_rxdesc *ds;
2095 	caddr_t data;
2096 
2097 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2098 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2099 			1, sizeof(struct mwl_rxdesc));
2100 	if (error != 0)
2101 		return error;
2102 
2103 	/*
2104 	 * Receive is done to a private pool of jumbo buffers.
2105 	 * This allows us to attach to mbuf's and avoid re-mapping
2106 	 * memory on each rx we post.  We allocate a large chunk
2107 	 * of memory and manage it in the driver.  The mbuf free
2108 	 * callback method is used to reclaim frames after sending
2109 	 * them up the stack.  By default we allocate 2x the number of
2110 	 * rx descriptors configured so we have some slop to hold
2111 	 * us while frames are processed.
2112 	 */
2113 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2114 		device_printf(sc->sc_dev,
2115 		    "too few rx dma buffers (%d); increasing to %d\n",
2116 		    mwl_rxbuf, 2*mwl_rxdesc);
2117 		mwl_rxbuf = 2*mwl_rxdesc;
2118 	}
2119 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2120 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2121 
2122 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2123 		       PAGE_SIZE, 0,		/* alignment, bounds */
2124 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2125 		       BUS_SPACE_MAXADDR,	/* highaddr */
2126 		       NULL, NULL,		/* filter, filterarg */
2127 		       sc->sc_rxmemsize,	/* maxsize */
2128 		       1,			/* nsegments */
2129 		       sc->sc_rxmemsize,	/* maxsegsize */
2130 		       BUS_DMA_ALLOCNOW,	/* flags */
2131 		       NULL,			/* lockfunc */
2132 		       NULL,			/* lockarg */
2133 		       &sc->sc_rxdmat);
2134 	if (error != 0) {
2135 		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2136 		return error;
2137 	}
2138 
2139 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2140 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2141 				 &sc->sc_rxmap);
2142 	if (error != 0) {
2143 		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2144 		    (uintmax_t) sc->sc_rxmemsize);
2145 		return error;
2146 	}
2147 
2148 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2149 				sc->sc_rxmem, sc->sc_rxmemsize,
2150 				mwl_load_cb, &sc->sc_rxmem_paddr,
2151 				BUS_DMA_NOWAIT);
2152 	if (error != 0) {
2153 		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2154 		return error;
2155 	}
2156 
2157 	/*
2158 	 * Allocate rx buffers and set them up.
2159 	 */
2160 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2161 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2162 	if (bf == NULL) {
2163 		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2164 		return error;
2165 	}
2166 	sc->sc_rxdma.dd_bufptr = bf;
2167 
2168 	STAILQ_INIT(&sc->sc_rxbuf);
2169 	ds = sc->sc_rxdma.dd_desc;
2170 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2171 		bf->bf_desc = ds;
2172 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2173 		/* pre-assign dma buffer */
2174 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2175 		/* NB: tail is intentional to preserve descriptor order */
2176 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2177 	}
2178 
2179 	/*
2180 	 * Place remainder of dma memory buffers on the free list.
2181 	 */
2182 	SLIST_INIT(&sc->sc_rxfree);
2183 	for (; i < mwl_rxbuf; i++) {
2184 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2185 		rbuf = MWL_JUMBO_DATA2BUF(data);
2186 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2187 		sc->sc_nrxfree++;
2188 	}
2189 	return 0;
2190 }
2191 #undef DS2PHYS
2192 
2193 static void
2194 mwl_rxdma_cleanup(struct mwl_softc *sc)
2195 {
2196 	if (sc->sc_rxmem_paddr != 0) {
2197 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2198 		sc->sc_rxmem_paddr = 0;
2199 	}
2200 	if (sc->sc_rxmem != NULL) {
2201 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2202 		sc->sc_rxmem = NULL;
2203 	}
2204 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2205 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2206 		sc->sc_rxdma.dd_bufptr = NULL;
2207 	}
2208 	if (sc->sc_rxdma.dd_desc_len != 0)
2209 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2210 }
2211 
2212 static int
2213 mwl_dma_setup(struct mwl_softc *sc)
2214 {
2215 	int error, i;
2216 
2217 	error = mwl_rxdma_setup(sc);
2218 	if (error != 0) {
2219 		mwl_rxdma_cleanup(sc);
2220 		return error;
2221 	}
2222 
2223 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2224 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2225 		if (error != 0) {
2226 			mwl_dma_cleanup(sc);
2227 			return error;
2228 		}
2229 	}
2230 	return 0;
2231 }
2232 
2233 static void
2234 mwl_dma_cleanup(struct mwl_softc *sc)
2235 {
2236 	int i;
2237 
2238 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2239 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2240 	mwl_rxdma_cleanup(sc);
2241 }
2242 
2243 static struct ieee80211_node *
2244 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2245 {
2246 	struct ieee80211com *ic = vap->iv_ic;
2247 	struct mwl_softc *sc = ic->ic_softc;
2248 	const size_t space = sizeof(struct mwl_node);
2249 	struct mwl_node *mn;
2250 
2251 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2252 	if (mn == NULL) {
2253 		/* XXX stat+msg */
2254 		return NULL;
2255 	}
2256 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2257 	return &mn->mn_node;
2258 }
2259 
2260 static void
2261 mwl_node_cleanup(struct ieee80211_node *ni)
2262 {
2263 	struct ieee80211com *ic = ni->ni_ic;
2264         struct mwl_softc *sc = ic->ic_softc;
2265 	struct mwl_node *mn = MWL_NODE(ni);
2266 
2267 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2268 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2269 
2270 	if (mn->mn_staid != 0) {
2271 		struct ieee80211vap *vap = ni->ni_vap;
2272 
2273 		if (mn->mn_hvap != NULL) {
2274 			if (vap->iv_opmode == IEEE80211_M_STA)
2275 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2276 			else
2277 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2278 		}
2279 		/*
2280 		 * NB: legacy WDS peer sta db entry is installed using
2281 		 * the associate ap's hvap; use it again to delete it.
2282 		 * XXX can vap be NULL?
2283 		 */
2284 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2285 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2286 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2287 			    ni->ni_macaddr);
2288 		delstaid(sc, mn->mn_staid);
2289 		mn->mn_staid = 0;
2290 	}
2291 	sc->sc_node_cleanup(ni);
2292 }
2293 
2294 /*
2295  * Reclaim rx dma buffers from packets sitting on the ampdu
2296  * reorder queue for a station.  We replace buffers with a
2297  * system cluster (if available).
2298  */
2299 static void
2300 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2301 {
2302 #if 0
2303 	int i, n, off;
2304 	struct mbuf *m;
2305 	void *cl;
2306 
2307 	n = rap->rxa_qframes;
2308 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2309 		m = rap->rxa_m[i];
2310 		if (m == NULL)
2311 			continue;
2312 		n--;
2313 		/* our dma buffers have a well-known free routine */
2314 		if ((m->m_flags & M_EXT) == 0 ||
2315 		    m->m_ext.ext_free != mwl_ext_free)
2316 			continue;
2317 		/*
2318 		 * Try to allocate a cluster and move the data.
2319 		 */
2320 		off = m->m_data - m->m_ext.ext_buf;
2321 		if (off + m->m_pkthdr.len > MCLBYTES) {
2322 			/* XXX no AMSDU for now */
2323 			continue;
2324 		}
2325 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2326 		    &m->m_ext.ext_paddr);
2327 		if (cl != NULL) {
2328 			/*
2329 			 * Copy the existing data to the cluster, remove
2330 			 * the rx dma buffer, and attach the cluster in
2331 			 * its place.  Note we preserve the offset to the
2332 			 * data so frames being bridged can still prepend
2333 			 * their headers without adding another mbuf.
2334 			 */
2335 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2336 			MEXTREMOVE(m);
2337 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2338 			/* setup mbuf like _MCLGET does */
2339 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2340 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2341 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2342 			m->m_data += off;
2343 		}
2344 	}
2345 #endif
2346 }
2347 
2348 /*
2349  * Callback to reclaim resources.  We first let the
2350  * net80211 layer do it's thing, then if we are still
2351  * blocked by a lack of rx dma buffers we walk the ampdu
2352  * reorder q's to reclaim buffers by copying to a system
2353  * cluster.
2354  */
2355 static void
2356 mwl_node_drain(struct ieee80211_node *ni)
2357 {
2358 	struct ieee80211com *ic = ni->ni_ic;
2359         struct mwl_softc *sc = ic->ic_softc;
2360 	struct mwl_node *mn = MWL_NODE(ni);
2361 
2362 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2363 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2364 
2365 	/* NB: call up first to age out ampdu q's */
2366 	sc->sc_node_drain(ni);
2367 
2368 	/* XXX better to not check low water mark? */
2369 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2370 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2371 		uint8_t tid;
2372 		/*
2373 		 * Walk the reorder q and reclaim rx dma buffers by copying
2374 		 * the packet contents into clusters.
2375 		 */
2376 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2377 			struct ieee80211_rx_ampdu *rap;
2378 
2379 			rap = &ni->ni_rx_ampdu[tid];
2380 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2381 				continue;
2382 			if (rap->rxa_qframes)
2383 				mwl_ampdu_rxdma_reclaim(rap);
2384 		}
2385 	}
2386 }
2387 
2388 static void
2389 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2390 {
2391 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2392 #ifdef MWL_ANT_INFO_SUPPORT
2393 #if 0
2394 	/* XXX need to smooth data */
2395 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2396 #else
2397 	*noise = -95;		/* XXX */
2398 #endif
2399 #else
2400 	*noise = -95;		/* XXX */
2401 #endif
2402 }
2403 
2404 /*
2405  * Convert Hardware per-antenna rssi info to common format:
2406  * Let a1, a2, a3 represent the amplitudes per chain
2407  * Let amax represent max[a1, a2, a3]
2408  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2409  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2410  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2411  * maintain some extra precision.
2412  *
2413  * Values are stored in .5 db format capped at 127.
2414  */
2415 static void
2416 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2417 	struct ieee80211_mimo_info *mi)
2418 {
2419 #define	CVT(_dst, _src) do {						\
2420 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2421 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2422 } while (0)
2423 	static const int8_t logdbtbl[32] = {
2424 	       0,   0,  24,  38,  48,  56,  62,  68,
2425 	      72,  76,  80,  83,  86,  89,  92,  94,
2426 	      96,  98, 100, 102, 104, 106, 107, 109,
2427 	     110, 112, 113, 115, 116, 117, 118, 119
2428 	};
2429 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2430 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2431 	uint32_t rssi_max;
2432 
2433 	rssi_max = mn->mn_ai.rssi_a;
2434 	if (mn->mn_ai.rssi_b > rssi_max)
2435 		rssi_max = mn->mn_ai.rssi_b;
2436 	if (mn->mn_ai.rssi_c > rssi_max)
2437 		rssi_max = mn->mn_ai.rssi_c;
2438 
2439 	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2440 	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2441 	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2442 
2443 	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2444 	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2445 	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2446 #undef CVT
2447 }
2448 
2449 static __inline void *
2450 mwl_getrxdma(struct mwl_softc *sc)
2451 {
2452 	struct mwl_jumbo *buf;
2453 	void *data;
2454 
2455 	/*
2456 	 * Allocate from jumbo pool.
2457 	 */
2458 	MWL_RXFREE_LOCK(sc);
2459 	buf = SLIST_FIRST(&sc->sc_rxfree);
2460 	if (buf == NULL) {
2461 		DPRINTF(sc, MWL_DEBUG_ANY,
2462 		    "%s: out of rx dma buffers\n", __func__);
2463 		sc->sc_stats.mst_rx_nodmabuf++;
2464 		data = NULL;
2465 	} else {
2466 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2467 		sc->sc_nrxfree--;
2468 		data = MWL_JUMBO_BUF2DATA(buf);
2469 	}
2470 	MWL_RXFREE_UNLOCK(sc);
2471 	return data;
2472 }
2473 
2474 static __inline void
2475 mwl_putrxdma(struct mwl_softc *sc, void *data)
2476 {
2477 	struct mwl_jumbo *buf;
2478 
2479 	/* XXX bounds check data */
2480 	MWL_RXFREE_LOCK(sc);
2481 	buf = MWL_JUMBO_DATA2BUF(data);
2482 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2483 	sc->sc_nrxfree++;
2484 	MWL_RXFREE_UNLOCK(sc);
2485 }
2486 
2487 static int
2488 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2489 {
2490 	struct mwl_rxdesc *ds;
2491 
2492 	ds = bf->bf_desc;
2493 	if (bf->bf_data == NULL) {
2494 		bf->bf_data = mwl_getrxdma(sc);
2495 		if (bf->bf_data == NULL) {
2496 			/* mark descriptor to be skipped */
2497 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2498 			/* NB: don't need PREREAD */
2499 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2500 			sc->sc_stats.mst_rxbuf_failed++;
2501 			return ENOMEM;
2502 		}
2503 	}
2504 	/*
2505 	 * NB: DMA buffer contents is known to be unmodified
2506 	 *     so there's no need to flush the data cache.
2507 	 */
2508 
2509 	/*
2510 	 * Setup descriptor.
2511 	 */
2512 	ds->QosCtrl = 0;
2513 	ds->RSSI = 0;
2514 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2515 	ds->Channel = 0;
2516 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2517 	ds->SQ2 = 0;
2518 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2519 	/* NB: don't touch pPhysNext, set once */
2520 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2521 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2522 
2523 	return 0;
2524 }
2525 
2526 static void
2527 mwl_ext_free(struct mbuf *m)
2528 {
2529 	struct mwl_softc *sc = m->m_ext.ext_arg1;
2530 
2531 	/* XXX bounds check data */
2532 	mwl_putrxdma(sc, m->m_ext.ext_buf);
2533 	/*
2534 	 * If we were previously blocked by a lack of rx dma buffers
2535 	 * check if we now have enough to restart rx interrupt handling.
2536 	 * NB: we know we are called at splvm which is above splnet.
2537 	 */
2538 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2539 		sc->sc_rxblocked = 0;
2540 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2541 	}
2542 }
2543 
2544 struct mwl_frame_bar {
2545 	u_int8_t	i_fc[2];
2546 	u_int8_t	i_dur[2];
2547 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2548 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2549 	/* ctl, seq, FCS */
2550 } __packed;
2551 
2552 /*
2553  * Like ieee80211_anyhdrsize, but handles BAR frames
2554  * specially so the logic below to piece the 802.11
2555  * header together works.
2556  */
2557 static __inline int
2558 mwl_anyhdrsize(const void *data)
2559 {
2560 	const struct ieee80211_frame *wh = data;
2561 
2562 	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2563 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2564 		case IEEE80211_FC0_SUBTYPE_CTS:
2565 		case IEEE80211_FC0_SUBTYPE_ACK:
2566 			return sizeof(struct ieee80211_frame_ack);
2567 		case IEEE80211_FC0_SUBTYPE_BAR:
2568 			return sizeof(struct mwl_frame_bar);
2569 		}
2570 		return sizeof(struct ieee80211_frame_min);
2571 	} else
2572 		return ieee80211_hdrsize(data);
2573 }
2574 
2575 static void
2576 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2577 {
2578 	const struct ieee80211_frame *wh;
2579 	struct ieee80211_node *ni;
2580 
2581 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2582 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2583 	if (ni != NULL) {
2584 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2585 		ieee80211_free_node(ni);
2586 	}
2587 }
2588 
2589 /*
2590  * Convert hardware signal strength to rssi.  The value
2591  * provided by the device has the noise floor added in;
2592  * we need to compensate for this but we don't have that
2593  * so we use a fixed value.
2594  *
2595  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2596  * offset is already set as part of the initial gain.  This
2597  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2598  */
2599 static __inline int
2600 cvtrssi(uint8_t ssi)
2601 {
2602 	int rssi = (int) ssi + 8;
2603 	/* XXX hack guess until we have a real noise floor */
2604 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2605 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2606 }
2607 
2608 static void
2609 mwl_rx_proc(void *arg, int npending)
2610 {
2611 	struct epoch_tracker et;
2612 	struct mwl_softc *sc = arg;
2613 	struct ieee80211com *ic = &sc->sc_ic;
2614 	struct mwl_rxbuf *bf;
2615 	struct mwl_rxdesc *ds;
2616 	struct mbuf *m;
2617 	struct ieee80211_qosframe *wh;
2618 	struct ieee80211_node *ni;
2619 	struct mwl_node *mn;
2620 	int off, len, hdrlen, pktlen, rssi, ntodo;
2621 	uint8_t *data, status;
2622 	void *newdata;
2623 	int16_t nf;
2624 
2625 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2626 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2627 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2628 	nf = -96;			/* XXX */
2629 	bf = sc->sc_rxnext;
2630 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2631 		if (bf == NULL)
2632 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2633 		ds = bf->bf_desc;
2634 		data = bf->bf_data;
2635 		if (data == NULL) {
2636 			/*
2637 			 * If data allocation failed previously there
2638 			 * will be no buffer; try again to re-populate it.
2639 			 * Note the firmware will not advance to the next
2640 			 * descriptor with a dma buffer so we must mimic
2641 			 * this or we'll get out of sync.
2642 			 */
2643 			DPRINTF(sc, MWL_DEBUG_ANY,
2644 			    "%s: rx buf w/o dma memory\n", __func__);
2645 			(void) mwl_rxbuf_init(sc, bf);
2646 			sc->sc_stats.mst_rx_dmabufmissing++;
2647 			break;
2648 		}
2649 		MWL_RXDESC_SYNC(sc, ds,
2650 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2651 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2652 			break;
2653 #ifdef MWL_DEBUG
2654 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2655 			mwl_printrxbuf(bf, 0);
2656 #endif
2657 		status = ds->Status;
2658 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2659 			counter_u64_add(ic->ic_ierrors, 1);
2660 			sc->sc_stats.mst_rx_crypto++;
2661 			/*
2662 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2663 			 *     for backwards compatibility.
2664 			 */
2665 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2666 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2667 				/*
2668 				 * MIC error, notify upper layers.
2669 				 */
2670 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2671 				    BUS_DMASYNC_POSTREAD);
2672 				mwl_handlemicerror(ic, data);
2673 				sc->sc_stats.mst_rx_tkipmic++;
2674 			}
2675 			/* XXX too painful to tap packets */
2676 			goto rx_next;
2677 		}
2678 		/*
2679 		 * Sync the data buffer.
2680 		 */
2681 		len = le16toh(ds->PktLen);
2682 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2683 		/*
2684 		 * The 802.11 header is provided all or in part at the front;
2685 		 * use it to calculate the true size of the header that we'll
2686 		 * construct below.  We use this to figure out where to copy
2687 		 * payload prior to constructing the header.
2688 		 */
2689 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2690 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2691 
2692 		/* calculate rssi early so we can re-use for each aggregate */
2693 		rssi = cvtrssi(ds->RSSI);
2694 
2695 		pktlen = hdrlen + (len - off);
2696 		/*
2697 		 * NB: we know our frame is at least as large as
2698 		 * IEEE80211_MIN_LEN because there is a 4-address
2699 		 * frame at the front.  Hence there's no need to
2700 		 * vet the packet length.  If the frame in fact
2701 		 * is too small it should be discarded at the
2702 		 * net80211 layer.
2703 		 */
2704 
2705 		/*
2706 		 * Attach dma buffer to an mbuf.  We tried
2707 		 * doing this based on the packet size (i.e.
2708 		 * copying small packets) but it turns out to
2709 		 * be a net loss.  The tradeoff might be system
2710 		 * dependent (cache architecture is important).
2711 		 */
2712 		MGETHDR(m, M_NOWAIT, MT_DATA);
2713 		if (m == NULL) {
2714 			DPRINTF(sc, MWL_DEBUG_ANY,
2715 			    "%s: no rx mbuf\n", __func__);
2716 			sc->sc_stats.mst_rx_nombuf++;
2717 			goto rx_next;
2718 		}
2719 		/*
2720 		 * Acquire the replacement dma buffer before
2721 		 * processing the frame.  If we're out of dma
2722 		 * buffers we disable rx interrupts and wait
2723 		 * for the free pool to reach mlw_rxdmalow buffers
2724 		 * before starting to do work again.  If the firmware
2725 		 * runs out of descriptors then it will toss frames
2726 		 * which is better than our doing it as that can
2727 		 * starve our processing.  It is also important that
2728 		 * we always process rx'd frames in case they are
2729 		 * A-MPDU as otherwise the host's view of the BA
2730 		 * window may get out of sync with the firmware.
2731 		 */
2732 		newdata = mwl_getrxdma(sc);
2733 		if (newdata == NULL) {
2734 			/* NB: stat+msg in mwl_getrxdma */
2735 			m_free(m);
2736 			/* disable RX interrupt and mark state */
2737 			mwl_hal_intrset(sc->sc_mh,
2738 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2739 			sc->sc_rxblocked = 1;
2740 			ieee80211_drain(ic);
2741 			/* XXX check rxblocked and immediately start again? */
2742 			goto rx_stop;
2743 		}
2744 		bf->bf_data = newdata;
2745 		/*
2746 		 * Attach the dma buffer to the mbuf;
2747 		 * mwl_rxbuf_init will re-setup the rx
2748 		 * descriptor using the replacement dma
2749 		 * buffer we just installed above.
2750 		 */
2751 		m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2752 		    EXT_NET_DRV);
2753 		m->m_data += off - hdrlen;
2754 		m->m_pkthdr.len = m->m_len = pktlen;
2755 		/* NB: dma buffer assumed read-only */
2756 
2757 		/*
2758 		 * Piece 802.11 header together.
2759 		 */
2760 		wh = mtod(m, struct ieee80211_qosframe *);
2761 		/* NB: don't need to do this sometimes but ... */
2762 		/* XXX special case so we can memcpy after m_devget? */
2763 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2764 		if (IEEE80211_QOS_HAS_SEQ(wh))
2765 			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2766 		/*
2767 		 * The f/w strips WEP header but doesn't clear
2768 		 * the WEP bit; mark the packet with M_WEP so
2769 		 * net80211 will treat the data as decrypted.
2770 		 * While here also clear the PWR_MGT bit since
2771 		 * power save is handled by the firmware and
2772 		 * passing this up will potentially cause the
2773 		 * upper layer to put a station in power save
2774 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2775 		 */
2776 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2777 			m->m_flags |= M_WEP;
2778 #ifdef MWL_HOST_PS_SUPPORT
2779 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2780 #else
2781 		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2782 		    IEEE80211_FC1_PWR_MGT);
2783 #endif
2784 
2785 		if (ieee80211_radiotap_active(ic)) {
2786 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2787 
2788 			tap->wr_flags = 0;
2789 			tap->wr_rate = ds->Rate;
2790 			tap->wr_antsignal = rssi + nf;
2791 			tap->wr_antnoise = nf;
2792 		}
2793 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2794 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2795 			    len, ds->Rate, rssi);
2796 		}
2797 		/* dispatch */
2798 		ni = ieee80211_find_rxnode(ic,
2799 		    (const struct ieee80211_frame_min *) wh);
2800 
2801 		NET_EPOCH_ENTER(et);
2802 		if (ni != NULL) {
2803 			mn = MWL_NODE(ni);
2804 #ifdef MWL_ANT_INFO_SUPPORT
2805 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2806 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2807 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2808 			mn->mn_ai.rsvd1 = rssi;
2809 #endif
2810 			/* tag AMPDU aggregates for reorder processing */
2811 			if (ni->ni_flags & IEEE80211_NODE_HT)
2812 				m->m_flags |= M_AMPDU;
2813 			(void) ieee80211_input(ni, m, rssi, nf);
2814 			ieee80211_free_node(ni);
2815 		} else
2816 			(void) ieee80211_input_all(ic, m, rssi, nf);
2817 		NET_EPOCH_EXIT(et);
2818 rx_next:
2819 		/* NB: ignore ENOMEM so we process more descriptors */
2820 		(void) mwl_rxbuf_init(sc, bf);
2821 		bf = STAILQ_NEXT(bf, bf_list);
2822 	}
2823 rx_stop:
2824 	sc->sc_rxnext = bf;
2825 
2826 	if (mbufq_first(&sc->sc_snd) != NULL) {
2827 		/* NB: kick fw; the tx thread may have been preempted */
2828 		mwl_hal_txstart(sc->sc_mh, 0);
2829 		mwl_start(sc);
2830 	}
2831 }
2832 
2833 static void
2834 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2835 {
2836 	struct mwl_txbuf *bf, *bn;
2837 	struct mwl_txdesc *ds;
2838 
2839 	MWL_TXQ_LOCK_INIT(sc, txq);
2840 	txq->qnum = qnum;
2841 	txq->txpri = 0;	/* XXX */
2842 #if 0
2843 	/* NB: q setup by mwl_txdma_setup XXX */
2844 	STAILQ_INIT(&txq->free);
2845 #endif
2846 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2847 		bf->bf_txq = txq;
2848 
2849 		ds = bf->bf_desc;
2850 		bn = STAILQ_NEXT(bf, bf_list);
2851 		if (bn == NULL)
2852 			bn = STAILQ_FIRST(&txq->free);
2853 		ds->pPhysNext = htole32(bn->bf_daddr);
2854 	}
2855 	STAILQ_INIT(&txq->active);
2856 }
2857 
2858 /*
2859  * Setup a hardware data transmit queue for the specified
2860  * access control.  We record the mapping from ac's
2861  * to h/w queues for use by mwl_tx_start.
2862  */
2863 static int
2864 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2865 {
2866 	struct mwl_txq *txq;
2867 
2868 	if (ac >= nitems(sc->sc_ac2q)) {
2869 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2870 			ac, nitems(sc->sc_ac2q));
2871 		return 0;
2872 	}
2873 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2874 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2875 			mvtype, MWL_NUM_TX_QUEUES);
2876 		return 0;
2877 	}
2878 	txq = &sc->sc_txq[mvtype];
2879 	mwl_txq_init(sc, txq, mvtype);
2880 	sc->sc_ac2q[ac] = txq;
2881 	return 1;
2882 }
2883 
2884 /*
2885  * Update WME parameters for a transmit queue.
2886  */
2887 static int
2888 mwl_txq_update(struct mwl_softc *sc, int ac)
2889 {
2890 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2891 	struct ieee80211com *ic = &sc->sc_ic;
2892 	struct chanAccParams chp;
2893 	struct mwl_txq *txq = sc->sc_ac2q[ac];
2894 	struct wmeParams *wmep;
2895 	struct mwl_hal *mh = sc->sc_mh;
2896 	int aifs, cwmin, cwmax, txoplim;
2897 
2898 	ieee80211_wme_ic_getparams(ic, &chp);
2899 	wmep = &chp.cap_wmeParams[ac];
2900 
2901 	aifs = wmep->wmep_aifsn;
2902 	/* XXX in sta mode need to pass log values for cwmin/max */
2903 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2904 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2905 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2906 
2907 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2908 		device_printf(sc->sc_dev, "unable to update hardware queue "
2909 			"parameters for %s traffic!\n",
2910 			ieee80211_wme_acnames[ac]);
2911 		return 0;
2912 	}
2913 	return 1;
2914 #undef MWL_EXPONENT_TO_VALUE
2915 }
2916 
2917 /*
2918  * Callback from the 802.11 layer to update WME parameters.
2919  */
2920 static int
2921 mwl_wme_update(struct ieee80211com *ic)
2922 {
2923 	struct mwl_softc *sc = ic->ic_softc;
2924 
2925 	return !mwl_txq_update(sc, WME_AC_BE) ||
2926 	    !mwl_txq_update(sc, WME_AC_BK) ||
2927 	    !mwl_txq_update(sc, WME_AC_VI) ||
2928 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2929 }
2930 
2931 /*
2932  * Reclaim resources for a setup queue.
2933  */
2934 static void
2935 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2936 {
2937 	/* XXX hal work? */
2938 	MWL_TXQ_LOCK_DESTROY(txq);
2939 }
2940 
2941 /*
2942  * Reclaim all tx queue resources.
2943  */
2944 static void
2945 mwl_tx_cleanup(struct mwl_softc *sc)
2946 {
2947 	int i;
2948 
2949 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2950 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2951 }
2952 
2953 static int
2954 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2955 {
2956 	struct mbuf *m;
2957 	int error;
2958 
2959 	/*
2960 	 * Load the DMA map so any coalescing is done.  This
2961 	 * also calculates the number of descriptors we need.
2962 	 */
2963 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2964 				     bf->bf_segs, &bf->bf_nseg,
2965 				     BUS_DMA_NOWAIT);
2966 	if (error == EFBIG) {
2967 		/* XXX packet requires too many descriptors */
2968 		bf->bf_nseg = MWL_TXDESC+1;
2969 	} else if (error != 0) {
2970 		sc->sc_stats.mst_tx_busdma++;
2971 		m_freem(m0);
2972 		return error;
2973 	}
2974 	/*
2975 	 * Discard null packets and check for packets that
2976 	 * require too many TX descriptors.  We try to convert
2977 	 * the latter to a cluster.
2978 	 */
2979 	if (error == EFBIG) {		/* too many desc's, linearize */
2980 		sc->sc_stats.mst_tx_linear++;
2981 #if MWL_TXDESC > 1
2982 		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2983 #else
2984 		m = m_defrag(m0, M_NOWAIT);
2985 #endif
2986 		if (m == NULL) {
2987 			m_freem(m0);
2988 			sc->sc_stats.mst_tx_nombuf++;
2989 			return ENOMEM;
2990 		}
2991 		m0 = m;
2992 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2993 					     bf->bf_segs, &bf->bf_nseg,
2994 					     BUS_DMA_NOWAIT);
2995 		if (error != 0) {
2996 			sc->sc_stats.mst_tx_busdma++;
2997 			m_freem(m0);
2998 			return error;
2999 		}
3000 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3001 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3002 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3003 		sc->sc_stats.mst_tx_nodata++;
3004 		m_freem(m0);
3005 		return EIO;
3006 	}
3007 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3008 		__func__, m0, m0->m_pkthdr.len);
3009 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3010 	bf->bf_m = m0;
3011 
3012 	return 0;
3013 }
3014 
3015 static __inline int
3016 mwl_cvtlegacyrate(int rate)
3017 {
3018 	switch (rate) {
3019 	case 2:	 return 0;
3020 	case 4:	 return 1;
3021 	case 11: return 2;
3022 	case 22: return 3;
3023 	case 44: return 4;
3024 	case 12: return 5;
3025 	case 18: return 6;
3026 	case 24: return 7;
3027 	case 36: return 8;
3028 	case 48: return 9;
3029 	case 72: return 10;
3030 	case 96: return 11;
3031 	case 108:return 12;
3032 	}
3033 	return 0;
3034 }
3035 
3036 /*
3037  * Calculate fixed tx rate information per client state;
3038  * this value is suitable for writing to the Format field
3039  * of a tx descriptor.
3040  */
3041 static uint16_t
3042 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3043 {
3044 	uint16_t fmt;
3045 
3046 	fmt = SM(3, EAGLE_TXD_ANTENNA)
3047 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3048 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3049 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3050 		fmt |= EAGLE_TXD_FORMAT_HT
3051 		    /* NB: 0x80 implicitly stripped from ucastrate */
3052 		    | SM(rate, EAGLE_TXD_RATE);
3053 		/* XXX short/long GI may be wrong; re-check */
3054 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3055 			fmt |= EAGLE_TXD_CHW_40
3056 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3057 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3058 		} else {
3059 			fmt |= EAGLE_TXD_CHW_20
3060 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3061 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3062 		}
3063 	} else {			/* legacy rate */
3064 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3065 		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3066 		    | EAGLE_TXD_CHW_20
3067 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3068 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3069 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3070 	}
3071 	return fmt;
3072 }
3073 
3074 static int
3075 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3076     struct mbuf *m0)
3077 {
3078 	struct ieee80211com *ic = &sc->sc_ic;
3079 	struct ieee80211vap *vap = ni->ni_vap;
3080 	int error, iswep, ismcast;
3081 	int hdrlen, copyhdrlen, pktlen;
3082 	struct mwl_txdesc *ds;
3083 	struct mwl_txq *txq;
3084 	struct ieee80211_frame *wh;
3085 	struct mwltxrec *tr;
3086 	struct mwl_node *mn;
3087 	uint16_t qos;
3088 #if MWL_TXDESC > 1
3089 	int i;
3090 #endif
3091 
3092 	wh = mtod(m0, struct ieee80211_frame *);
3093 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3094 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3095 	hdrlen = ieee80211_anyhdrsize(wh);
3096 	copyhdrlen = hdrlen;
3097 	pktlen = m0->m_pkthdr.len;
3098 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3099 		qos = *(uint16_t *)ieee80211_getqos(wh);
3100 		if (IEEE80211_IS_DSTODS(wh))
3101 			copyhdrlen -= sizeof(qos);
3102 	} else
3103 		qos = 0;
3104 
3105 	if (iswep) {
3106 		const struct ieee80211_cipher *cip;
3107 		struct ieee80211_key *k;
3108 
3109 		/*
3110 		 * Construct the 802.11 header+trailer for an encrypted
3111 		 * frame. The only reason this can fail is because of an
3112 		 * unknown or unsupported cipher/key type.
3113 		 *
3114 		 * NB: we do this even though the firmware will ignore
3115 		 *     what we've done for WEP and TKIP as we need the
3116 		 *     ExtIV filled in for CCMP and this also adjusts
3117 		 *     the headers which simplifies our work below.
3118 		 */
3119 		k = ieee80211_crypto_encap(ni, m0);
3120 		if (k == NULL) {
3121 			/*
3122 			 * This can happen when the key is yanked after the
3123 			 * frame was queued.  Just discard the frame; the
3124 			 * 802.11 layer counts failures and provides
3125 			 * debugging/diagnostics.
3126 			 */
3127 			m_freem(m0);
3128 			return EIO;
3129 		}
3130 		/*
3131 		 * Adjust the packet length for the crypto additions
3132 		 * done during encap and any other bits that the f/w
3133 		 * will add later on.
3134 		 */
3135 		cip = k->wk_cipher;
3136 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3137 
3138 		/* packet header may have moved, reset our local pointer */
3139 		wh = mtod(m0, struct ieee80211_frame *);
3140 	}
3141 
3142 	if (ieee80211_radiotap_active_vap(vap)) {
3143 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3144 		if (iswep)
3145 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3146 #if 0
3147 		sc->sc_tx_th.wt_rate = ds->DataRate;
3148 #endif
3149 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3150 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3151 
3152 		ieee80211_radiotap_tx(vap, m0);
3153 	}
3154 	/*
3155 	 * Copy up/down the 802.11 header; the firmware requires
3156 	 * we present a 2-byte payload length followed by a
3157 	 * 4-address header (w/o QoS), followed (optionally) by
3158 	 * any WEP/ExtIV header (but only filled in for CCMP).
3159 	 * We are assured the mbuf has sufficient headroom to
3160 	 * prepend in-place by the setup of ic_headroom in
3161 	 * mwl_attach.
3162 	 */
3163 	if (hdrlen < sizeof(struct mwltxrec)) {
3164 		const int space = sizeof(struct mwltxrec) - hdrlen;
3165 		if (M_LEADINGSPACE(m0) < space) {
3166 			/* NB: should never happen */
3167 			device_printf(sc->sc_dev,
3168 			    "not enough headroom, need %d found %zd, "
3169 			    "m_flags 0x%x m_len %d\n",
3170 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3171 			ieee80211_dump_pkt(ic,
3172 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3173 			m_freem(m0);
3174 			sc->sc_stats.mst_tx_noheadroom++;
3175 			return EIO;
3176 		}
3177 		M_PREPEND(m0, space, M_NOWAIT);
3178 	}
3179 	tr = mtod(m0, struct mwltxrec *);
3180 	if (wh != (struct ieee80211_frame *) &tr->wh)
3181 		ovbcopy(wh, &tr->wh, hdrlen);
3182 	/*
3183 	 * Note: the "firmware length" is actually the length
3184 	 * of the fully formed "802.11 payload".  That is, it's
3185 	 * everything except for the 802.11 header.  In particular
3186 	 * this includes all crypto material including the MIC!
3187 	 */
3188 	tr->fwlen = htole16(pktlen - hdrlen);
3189 
3190 	/*
3191 	 * Load the DMA map so any coalescing is done.  This
3192 	 * also calculates the number of descriptors we need.
3193 	 */
3194 	error = mwl_tx_dmasetup(sc, bf, m0);
3195 	if (error != 0) {
3196 		/* NB: stat collected in mwl_tx_dmasetup */
3197 		DPRINTF(sc, MWL_DEBUG_XMIT,
3198 		    "%s: unable to setup dma\n", __func__);
3199 		return error;
3200 	}
3201 	bf->bf_node = ni;			/* NB: held reference */
3202 	m0 = bf->bf_m;				/* NB: may have changed */
3203 	tr = mtod(m0, struct mwltxrec *);
3204 	wh = (struct ieee80211_frame *)&tr->wh;
3205 
3206 	/*
3207 	 * Formulate tx descriptor.
3208 	 */
3209 	ds = bf->bf_desc;
3210 	txq = bf->bf_txq;
3211 
3212 	ds->QosCtrl = qos;			/* NB: already little-endian */
3213 #if MWL_TXDESC == 1
3214 	/*
3215 	 * NB: multiframes should be zero because the descriptors
3216 	 *     are initialized to zero.  This should handle the case
3217 	 *     where the driver is built with MWL_TXDESC=1 but we are
3218 	 *     using firmware with multi-segment support.
3219 	 */
3220 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3221 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3222 #else
3223 	ds->multiframes = htole32(bf->bf_nseg);
3224 	ds->PktLen = htole16(m0->m_pkthdr.len);
3225 	for (i = 0; i < bf->bf_nseg; i++) {
3226 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3227 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3228 	}
3229 #endif
3230 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3231 	ds->Format = 0;
3232 	ds->pad = 0;
3233 	ds->ack_wcb_addr = 0;
3234 
3235 	mn = MWL_NODE(ni);
3236 	/*
3237 	 * Select transmit rate.
3238 	 */
3239 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3240 	case IEEE80211_FC0_TYPE_MGT:
3241 		sc->sc_stats.mst_tx_mgmt++;
3242 		/* fall thru... */
3243 	case IEEE80211_FC0_TYPE_CTL:
3244 		/* NB: assign to BE q to avoid bursting */
3245 		ds->TxPriority = MWL_WME_AC_BE;
3246 		break;
3247 	case IEEE80211_FC0_TYPE_DATA:
3248 		if (!ismcast) {
3249 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3250 			/*
3251 			 * EAPOL frames get forced to a fixed rate and w/o
3252 			 * aggregation; otherwise check for any fixed rate
3253 			 * for the client (may depend on association state).
3254 			 */
3255 			if (m0->m_flags & M_EAPOL) {
3256 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3257 				ds->Format = mvp->mv_eapolformat;
3258 				ds->pad = htole16(
3259 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3260 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3261 				/* XXX pre-calculate per node */
3262 				ds->Format = htole16(
3263 				    mwl_calcformat(tp->ucastrate, ni));
3264 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3265 			}
3266 			/* NB: EAPOL frames will never have qos set */
3267 			if (qos == 0)
3268 				ds->TxPriority = txq->qnum;
3269 #if MWL_MAXBA > 3
3270 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3271 				ds->TxPriority = mn->mn_ba[3].txq;
3272 #endif
3273 #if MWL_MAXBA > 2
3274 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3275 				ds->TxPriority = mn->mn_ba[2].txq;
3276 #endif
3277 #if MWL_MAXBA > 1
3278 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3279 				ds->TxPriority = mn->mn_ba[1].txq;
3280 #endif
3281 #if MWL_MAXBA > 0
3282 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3283 				ds->TxPriority = mn->mn_ba[0].txq;
3284 #endif
3285 			else
3286 				ds->TxPriority = txq->qnum;
3287 		} else
3288 			ds->TxPriority = txq->qnum;
3289 		break;
3290 	default:
3291 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3292 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3293 		sc->sc_stats.mst_tx_badframetype++;
3294 		m_freem(m0);
3295 		return EIO;
3296 	}
3297 
3298 	if (IFF_DUMPPKTS_XMIT(sc))
3299 		ieee80211_dump_pkt(ic,
3300 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3301 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3302 
3303 	MWL_TXQ_LOCK(txq);
3304 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3305 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3306 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3307 
3308 	sc->sc_tx_timer = 5;
3309 	MWL_TXQ_UNLOCK(txq);
3310 
3311 	return 0;
3312 }
3313 
3314 static __inline int
3315 mwl_cvtlegacyrix(int rix)
3316 {
3317 	static const int ieeerates[] =
3318 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3319 	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3320 }
3321 
3322 /*
3323  * Process completed xmit descriptors from the specified queue.
3324  */
3325 static int
3326 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3327 {
3328 #define	EAGLE_TXD_STATUS_MCAST \
3329 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3330 	struct ieee80211com *ic = &sc->sc_ic;
3331 	struct mwl_txbuf *bf;
3332 	struct mwl_txdesc *ds;
3333 	struct ieee80211_node *ni;
3334 	struct mwl_node *an;
3335 	int nreaped;
3336 	uint32_t status;
3337 
3338 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3339 	for (nreaped = 0;; nreaped++) {
3340 		MWL_TXQ_LOCK(txq);
3341 		bf = STAILQ_FIRST(&txq->active);
3342 		if (bf == NULL) {
3343 			MWL_TXQ_UNLOCK(txq);
3344 			break;
3345 		}
3346 		ds = bf->bf_desc;
3347 		MWL_TXDESC_SYNC(txq, ds,
3348 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3349 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3350 			MWL_TXQ_UNLOCK(txq);
3351 			break;
3352 		}
3353 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3354 		MWL_TXQ_UNLOCK(txq);
3355 
3356 #ifdef MWL_DEBUG
3357 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3358 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3359 #endif
3360 		ni = bf->bf_node;
3361 		if (ni != NULL) {
3362 			an = MWL_NODE(ni);
3363 			status = le32toh(ds->Status);
3364 			if (status & EAGLE_TXD_STATUS_OK) {
3365 				uint16_t Format = le16toh(ds->Format);
3366 				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3367 
3368 				sc->sc_stats.mst_ant_tx[txant]++;
3369 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3370 					sc->sc_stats.mst_tx_retries++;
3371 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3372 					sc->sc_stats.mst_tx_mretries++;
3373 				if (txq->qnum >= MWL_WME_AC_VO)
3374 					ic->ic_wme.wme_hipri_traffic++;
3375 				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3376 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3377 					ni->ni_txrate = mwl_cvtlegacyrix(
3378 					    ni->ni_txrate);
3379 				} else
3380 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3381 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3382 			} else {
3383 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3384 					sc->sc_stats.mst_tx_linkerror++;
3385 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3386 					sc->sc_stats.mst_tx_xretries++;
3387 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3388 					sc->sc_stats.mst_tx_aging++;
3389 				if (bf->bf_m->m_flags & M_FF)
3390 					sc->sc_stats.mst_ff_txerr++;
3391 			}
3392 			if (bf->bf_m->m_flags & M_TXCB)
3393 				/* XXX strip fw len in case header inspected */
3394 				m_adj(bf->bf_m, sizeof(uint16_t));
3395 			ieee80211_tx_complete(ni, bf->bf_m,
3396 			    (status & EAGLE_TXD_STATUS_OK) == 0);
3397 		} else
3398 			m_freem(bf->bf_m);
3399 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3400 
3401 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3402 		    BUS_DMASYNC_POSTWRITE);
3403 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3404 
3405 		mwl_puttxbuf_tail(txq, bf);
3406 	}
3407 	return nreaped;
3408 #undef EAGLE_TXD_STATUS_MCAST
3409 }
3410 
3411 /*
3412  * Deferred processing of transmit interrupt; special-cased
3413  * for four hardware queues, 0-3.
3414  */
3415 static void
3416 mwl_tx_proc(void *arg, int npending)
3417 {
3418 	struct mwl_softc *sc = arg;
3419 	int nreaped;
3420 
3421 	/*
3422 	 * Process each active queue.
3423 	 */
3424 	nreaped = 0;
3425 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3426 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3427 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3428 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3429 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3430 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3431 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3432 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3433 
3434 	if (nreaped != 0) {
3435 		sc->sc_tx_timer = 0;
3436 		if (mbufq_first(&sc->sc_snd) != NULL) {
3437 			/* NB: kick fw; the tx thread may have been preempted */
3438 			mwl_hal_txstart(sc->sc_mh, 0);
3439 			mwl_start(sc);
3440 		}
3441 	}
3442 }
3443 
3444 static void
3445 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3446 {
3447 	struct ieee80211_node *ni;
3448 	struct mwl_txbuf *bf;
3449 	u_int ix;
3450 
3451 	/*
3452 	 * NB: this assumes output has been stopped and
3453 	 *     we do not need to block mwl_tx_tasklet
3454 	 */
3455 	for (ix = 0;; ix++) {
3456 		MWL_TXQ_LOCK(txq);
3457 		bf = STAILQ_FIRST(&txq->active);
3458 		if (bf == NULL) {
3459 			MWL_TXQ_UNLOCK(txq);
3460 			break;
3461 		}
3462 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3463 		MWL_TXQ_UNLOCK(txq);
3464 #ifdef MWL_DEBUG
3465 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3466 			struct ieee80211com *ic = &sc->sc_ic;
3467 			const struct mwltxrec *tr =
3468 			    mtod(bf->bf_m, const struct mwltxrec *);
3469 			mwl_printtxbuf(bf, txq->qnum, ix);
3470 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3471 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3472 		}
3473 #endif /* MWL_DEBUG */
3474 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3475 		ni = bf->bf_node;
3476 		if (ni != NULL) {
3477 			/*
3478 			 * Reclaim node reference.
3479 			 */
3480 			ieee80211_free_node(ni);
3481 		}
3482 		m_freem(bf->bf_m);
3483 
3484 		mwl_puttxbuf_tail(txq, bf);
3485 	}
3486 }
3487 
3488 /*
3489  * Drain the transmit queues and reclaim resources.
3490  */
3491 static void
3492 mwl_draintxq(struct mwl_softc *sc)
3493 {
3494 	int i;
3495 
3496 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3497 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3498 	sc->sc_tx_timer = 0;
3499 }
3500 
3501 #ifdef MWL_DIAGAPI
3502 /*
3503  * Reset the transmit queues to a pristine state after a fw download.
3504  */
3505 static void
3506 mwl_resettxq(struct mwl_softc *sc)
3507 {
3508 	int i;
3509 
3510 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3511 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3512 }
3513 #endif /* MWL_DIAGAPI */
3514 
3515 /*
3516  * Clear the transmit queues of any frames submitted for the
3517  * specified vap.  This is done when the vap is deleted so we
3518  * don't potentially reference the vap after it is gone.
3519  * Note we cannot remove the frames; we only reclaim the node
3520  * reference.
3521  */
3522 static void
3523 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3524 {
3525 	struct mwl_txq *txq;
3526 	struct mwl_txbuf *bf;
3527 	int i;
3528 
3529 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3530 		txq = &sc->sc_txq[i];
3531 		MWL_TXQ_LOCK(txq);
3532 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3533 			struct ieee80211_node *ni = bf->bf_node;
3534 			if (ni != NULL && ni->ni_vap == vap) {
3535 				bf->bf_node = NULL;
3536 				ieee80211_free_node(ni);
3537 			}
3538 		}
3539 		MWL_TXQ_UNLOCK(txq);
3540 	}
3541 }
3542 
3543 static int
3544 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3545 	const uint8_t *frm, const uint8_t *efrm)
3546 {
3547 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3548 	const struct ieee80211_action *ia;
3549 
3550 	ia = (const struct ieee80211_action *) frm;
3551 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3552 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3553 		const struct ieee80211_action_ht_mimopowersave *mps =
3554 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3555 
3556 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3557 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3558 		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3559 		return 0;
3560 	} else
3561 		return sc->sc_recv_action(ni, wh, frm, efrm);
3562 }
3563 
3564 static int
3565 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3566 	int dialogtoken, int baparamset, int batimeout)
3567 {
3568 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3569 	struct ieee80211vap *vap = ni->ni_vap;
3570 	struct mwl_node *mn = MWL_NODE(ni);
3571 	struct mwl_bastate *bas;
3572 
3573 	bas = tap->txa_private;
3574 	if (bas == NULL) {
3575 		const MWL_HAL_BASTREAM *sp;
3576 		/*
3577 		 * Check for a free BA stream slot.
3578 		 */
3579 #if MWL_MAXBA > 3
3580 		if (mn->mn_ba[3].bastream == NULL)
3581 			bas = &mn->mn_ba[3];
3582 		else
3583 #endif
3584 #if MWL_MAXBA > 2
3585 		if (mn->mn_ba[2].bastream == NULL)
3586 			bas = &mn->mn_ba[2];
3587 		else
3588 #endif
3589 #if MWL_MAXBA > 1
3590 		if (mn->mn_ba[1].bastream == NULL)
3591 			bas = &mn->mn_ba[1];
3592 		else
3593 #endif
3594 #if MWL_MAXBA > 0
3595 		if (mn->mn_ba[0].bastream == NULL)
3596 			bas = &mn->mn_ba[0];
3597 		else
3598 #endif
3599 		{
3600 			/* sta already has max BA streams */
3601 			/* XXX assign BA stream to highest priority tid */
3602 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3603 			    "%s: already has max bastreams\n", __func__);
3604 			sc->sc_stats.mst_ampdu_reject++;
3605 			return 0;
3606 		}
3607 		/* NB: no held reference to ni */
3608 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3609 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3610 		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3611 		    ni, tap);
3612 		if (sp == NULL) {
3613 			/*
3614 			 * No available stream, return 0 so no
3615 			 * a-mpdu aggregation will be done.
3616 			 */
3617 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3618 			    "%s: no bastream available\n", __func__);
3619 			sc->sc_stats.mst_ampdu_nostream++;
3620 			return 0;
3621 		}
3622 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3623 		    __func__, sp);
3624 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3625 		bas->bastream = sp;
3626 		tap->txa_private = bas;
3627 	}
3628 	/* fetch current seq# from the firmware; if available */
3629 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3630 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3631 	    &tap->txa_start) != 0)
3632 		tap->txa_start = 0;
3633 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3634 }
3635 
3636 static int
3637 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3638 	int code, int baparamset, int batimeout)
3639 {
3640 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3641 	struct mwl_bastate *bas;
3642 
3643 	bas = tap->txa_private;
3644 	if (bas == NULL) {
3645 		/* XXX should not happen */
3646 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3647 		    "%s: no BA stream allocated, TID %d\n",
3648 		    __func__, tap->txa_tid);
3649 		sc->sc_stats.mst_addba_nostream++;
3650 		return 0;
3651 	}
3652 	if (code == IEEE80211_STATUS_SUCCESS) {
3653 		struct ieee80211vap *vap = ni->ni_vap;
3654 		int bufsiz, error;
3655 
3656 		/*
3657 		 * Tell the firmware to setup the BA stream;
3658 		 * we know resources are available because we
3659 		 * pre-allocated one before forming the request.
3660 		 */
3661 		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3662 		if (bufsiz == 0)
3663 			bufsiz = IEEE80211_AGGR_BAWMAX;
3664 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3665 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3666 		if (error != 0) {
3667 			/*
3668 			 * Setup failed, return immediately so no a-mpdu
3669 			 * aggregation will be done.
3670 			 */
3671 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3672 			mwl_bastream_free(bas);
3673 			tap->txa_private = NULL;
3674 
3675 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3676 			    "%s: create failed, error %d, bufsiz %d TID %d "
3677 			    "htparam 0x%x\n", __func__, error, bufsiz,
3678 			    tap->txa_tid, ni->ni_htparam);
3679 			sc->sc_stats.mst_bacreate_failed++;
3680 			return 0;
3681 		}
3682 		/* NB: cache txq to avoid ptr indirect */
3683 		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3684 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3685 		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3686 		    "htparam 0x%x\n", __func__, bas->bastream,
3687 		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3688 	} else {
3689 		/*
3690 		 * Other side NAK'd us; return the resources.
3691 		 */
3692 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3693 		    "%s: request failed with code %d, destroy bastream %p\n",
3694 		    __func__, code, bas->bastream);
3695 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3696 		mwl_bastream_free(bas);
3697 		tap->txa_private = NULL;
3698 	}
3699 	/* NB: firmware sends BAR so we don't need to */
3700 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3701 }
3702 
3703 static void
3704 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3705 {
3706 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3707 	struct mwl_bastate *bas;
3708 
3709 	bas = tap->txa_private;
3710 	if (bas != NULL) {
3711 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3712 		    __func__, bas->bastream);
3713 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3714 		mwl_bastream_free(bas);
3715 		tap->txa_private = NULL;
3716 	}
3717 	sc->sc_addba_stop(ni, tap);
3718 }
3719 
3720 /*
3721  * Setup the rx data structures.  This should only be
3722  * done once or we may get out of sync with the firmware.
3723  */
3724 static int
3725 mwl_startrecv(struct mwl_softc *sc)
3726 {
3727 	if (!sc->sc_recvsetup) {
3728 		struct mwl_rxbuf *bf, *prev;
3729 		struct mwl_rxdesc *ds;
3730 
3731 		prev = NULL;
3732 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3733 			int error = mwl_rxbuf_init(sc, bf);
3734 			if (error != 0) {
3735 				DPRINTF(sc, MWL_DEBUG_RECV,
3736 					"%s: mwl_rxbuf_init failed %d\n",
3737 					__func__, error);
3738 				return error;
3739 			}
3740 			if (prev != NULL) {
3741 				ds = prev->bf_desc;
3742 				ds->pPhysNext = htole32(bf->bf_daddr);
3743 			}
3744 			prev = bf;
3745 		}
3746 		if (prev != NULL) {
3747 			ds = prev->bf_desc;
3748 			ds->pPhysNext =
3749 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3750 		}
3751 		sc->sc_recvsetup = 1;
3752 	}
3753 	mwl_mode_init(sc);		/* set filters, etc. */
3754 	return 0;
3755 }
3756 
3757 static MWL_HAL_APMODE
3758 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3759 {
3760 	MWL_HAL_APMODE mode;
3761 
3762 	if (IEEE80211_IS_CHAN_HT(chan)) {
3763 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3764 			mode = AP_MODE_N_ONLY;
3765 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3766 			mode = AP_MODE_AandN;
3767 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3768 			mode = AP_MODE_GandN;
3769 		else
3770 			mode = AP_MODE_BandGandN;
3771 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3772 		if (vap->iv_flags & IEEE80211_F_PUREG)
3773 			mode = AP_MODE_G_ONLY;
3774 		else
3775 			mode = AP_MODE_MIXED;
3776 	} else if (IEEE80211_IS_CHAN_B(chan))
3777 		mode = AP_MODE_B_ONLY;
3778 	else if (IEEE80211_IS_CHAN_A(chan))
3779 		mode = AP_MODE_A_ONLY;
3780 	else
3781 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3782 	return mode;
3783 }
3784 
3785 static int
3786 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3787 {
3788 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3789 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3790 }
3791 
3792 /*
3793  * Set/change channels.
3794  */
3795 static int
3796 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3797 {
3798 	struct mwl_hal *mh = sc->sc_mh;
3799 	struct ieee80211com *ic = &sc->sc_ic;
3800 	MWL_HAL_CHANNEL hchan;
3801 	int maxtxpow;
3802 
3803 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3804 	    __func__, chan->ic_freq, chan->ic_flags);
3805 
3806 	/*
3807 	 * Convert to a HAL channel description with
3808 	 * the flags constrained to reflect the current
3809 	 * operating mode.
3810 	 */
3811 	mwl_mapchan(&hchan, chan);
3812 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3813 #if 0
3814 	mwl_draintxq(sc);		/* clear pending tx frames */
3815 #endif
3816 	mwl_hal_setchannel(mh, &hchan);
3817 	/*
3818 	 * Tx power is cap'd by the regulatory setting and
3819 	 * possibly a user-set limit.  We pass the min of
3820 	 * these to the hal to apply them to the cal data
3821 	 * for this channel.
3822 	 * XXX min bound?
3823 	 */
3824 	maxtxpow = 2*chan->ic_maxregpower;
3825 	if (maxtxpow > ic->ic_txpowlimit)
3826 		maxtxpow = ic->ic_txpowlimit;
3827 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3828 	/* NB: potentially change mcast/mgt rates */
3829 	mwl_setcurchanrates(sc);
3830 
3831 	/*
3832 	 * Update internal state.
3833 	 */
3834 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3835 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3836 	if (IEEE80211_IS_CHAN_A(chan)) {
3837 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3838 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3839 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3840 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3841 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3842 	} else {
3843 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3844 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3845 	}
3846 	sc->sc_curchan = hchan;
3847 	mwl_hal_intrset(mh, sc->sc_imask);
3848 
3849 	return 0;
3850 }
3851 
3852 static void
3853 mwl_scan_start(struct ieee80211com *ic)
3854 {
3855 	struct mwl_softc *sc = ic->ic_softc;
3856 
3857 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3858 }
3859 
3860 static void
3861 mwl_scan_end(struct ieee80211com *ic)
3862 {
3863 	struct mwl_softc *sc = ic->ic_softc;
3864 
3865 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3866 }
3867 
3868 static void
3869 mwl_set_channel(struct ieee80211com *ic)
3870 {
3871 	struct mwl_softc *sc = ic->ic_softc;
3872 
3873 	(void) mwl_chan_set(sc, ic->ic_curchan);
3874 }
3875 
3876 /*
3877  * Handle a channel switch request.  We inform the firmware
3878  * and mark the global state to suppress various actions.
3879  * NB: we issue only one request to the fw; we may be called
3880  * multiple times if there are multiple vap's.
3881  */
3882 static void
3883 mwl_startcsa(struct ieee80211vap *vap)
3884 {
3885 	struct ieee80211com *ic = vap->iv_ic;
3886 	struct mwl_softc *sc = ic->ic_softc;
3887 	MWL_HAL_CHANNEL hchan;
3888 
3889 	if (sc->sc_csapending)
3890 		return;
3891 
3892 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3893 	/* 1 =>'s quiet channel */
3894 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3895 	sc->sc_csapending = 1;
3896 }
3897 
3898 /*
3899  * Plumb any static WEP key for the station.  This is
3900  * necessary as we must propagate the key from the
3901  * global key table of the vap to each sta db entry.
3902  */
3903 static void
3904 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3905 {
3906 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3907 		IEEE80211_F_PRIVACY &&
3908 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3909 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3910 		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3911 				    mac);
3912 }
3913 
3914 static int
3915 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3916 {
3917 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3918 	struct ieee80211vap *vap = ni->ni_vap;
3919 	struct mwl_hal_vap *hvap;
3920 	int error;
3921 
3922 	if (vap->iv_opmode == IEEE80211_M_WDS) {
3923 		/*
3924 		 * WDS vap's do not have a f/w vap; instead they piggyback
3925 		 * on an AP vap and we must install the sta db entry and
3926 		 * crypto state using that AP's handle (the WDS vap has none).
3927 		 */
3928 		hvap = MWL_VAP(vap)->mv_ap_hvap;
3929 	} else
3930 		hvap = MWL_VAP(vap)->mv_hvap;
3931 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3932 	    aid, staid, pi,
3933 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3934 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3935 	if (error == 0) {
3936 		/*
3937 		 * Setup security for this station.  For sta mode this is
3938 		 * needed even though do the same thing on transition to
3939 		 * AUTH state because the call to mwl_hal_newstation
3940 		 * clobbers the crypto state we setup.
3941 		 */
3942 		mwl_setanywepkey(vap, ni->ni_macaddr);
3943 	}
3944 	return error;
3945 #undef WME
3946 }
3947 
3948 static void
3949 mwl_setglobalkeys(struct ieee80211vap *vap)
3950 {
3951 	struct ieee80211_key *wk;
3952 
3953 	wk = &vap->iv_nw_keys[0];
3954 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3955 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3956 			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3957 }
3958 
3959 /*
3960  * Convert a legacy rate set to a firmware bitmask.
3961  */
3962 static uint32_t
3963 get_rate_bitmap(const struct ieee80211_rateset *rs)
3964 {
3965 	uint32_t rates;
3966 	int i;
3967 
3968 	rates = 0;
3969 	for (i = 0; i < rs->rs_nrates; i++)
3970 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3971 		case 2:	  rates |= 0x001; break;
3972 		case 4:	  rates |= 0x002; break;
3973 		case 11:  rates |= 0x004; break;
3974 		case 22:  rates |= 0x008; break;
3975 		case 44:  rates |= 0x010; break;
3976 		case 12:  rates |= 0x020; break;
3977 		case 18:  rates |= 0x040; break;
3978 		case 24:  rates |= 0x080; break;
3979 		case 36:  rates |= 0x100; break;
3980 		case 48:  rates |= 0x200; break;
3981 		case 72:  rates |= 0x400; break;
3982 		case 96:  rates |= 0x800; break;
3983 		case 108: rates |= 0x1000; break;
3984 		}
3985 	return rates;
3986 }
3987 
3988 /*
3989  * Construct an HT firmware bitmask from an HT rate set.
3990  */
3991 static uint32_t
3992 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3993 {
3994 	uint32_t rates;
3995 	int i;
3996 
3997 	rates = 0;
3998 	for (i = 0; i < rs->rs_nrates; i++) {
3999 		if (rs->rs_rates[i] < 16)
4000 			rates |= 1<<rs->rs_rates[i];
4001 	}
4002 	return rates;
4003 }
4004 
4005 /*
4006  * Craft station database entry for station.
4007  * NB: use host byte order here, the hal handles byte swapping.
4008  */
4009 static MWL_HAL_PEERINFO *
4010 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4011 {
4012 	const struct ieee80211vap *vap = ni->ni_vap;
4013 
4014 	memset(pi, 0, sizeof(*pi));
4015 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4016 	pi->CapInfo = ni->ni_capinfo;
4017 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4018 		/* HT capabilities, etc */
4019 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4020 		/* XXX pi.HTCapabilitiesInfo */
4021 	        pi->MacHTParamInfo = ni->ni_htparam;
4022 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4023 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4024 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4025 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4026 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4027 
4028 		/* constrain according to local configuration */
4029 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4030 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4031 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4032 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4033 		if (ni->ni_chw != 40)
4034 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4035 	}
4036 	return pi;
4037 }
4038 
4039 /*
4040  * Re-create the local sta db entry for a vap to ensure
4041  * up to date WME state is pushed to the firmware.  Because
4042  * this resets crypto state this must be followed by a
4043  * reload of any keys in the global key table.
4044  */
4045 static int
4046 mwl_localstadb(struct ieee80211vap *vap)
4047 {
4048 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4049 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4050 	struct ieee80211_node *bss;
4051 	MWL_HAL_PEERINFO pi;
4052 	int error;
4053 
4054 	switch (vap->iv_opmode) {
4055 	case IEEE80211_M_STA:
4056 		bss = vap->iv_bss;
4057 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4058 		    vap->iv_state == IEEE80211_S_RUN ?
4059 			mkpeerinfo(&pi, bss) : NULL,
4060 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4061 		    bss->ni_ies.wme_ie != NULL ?
4062 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4063 		if (error == 0)
4064 			mwl_setglobalkeys(vap);
4065 		break;
4066 	case IEEE80211_M_HOSTAP:
4067 	case IEEE80211_M_MBSS:
4068 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4069 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4070 		if (error == 0)
4071 			mwl_setglobalkeys(vap);
4072 		break;
4073 	default:
4074 		error = 0;
4075 		break;
4076 	}
4077 	return error;
4078 #undef WME
4079 }
4080 
4081 static int
4082 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4083 {
4084 	struct mwl_vap *mvp = MWL_VAP(vap);
4085 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4086 	struct ieee80211com *ic = vap->iv_ic;
4087 	struct ieee80211_node *ni = NULL;
4088 	struct mwl_softc *sc = ic->ic_softc;
4089 	struct mwl_hal *mh = sc->sc_mh;
4090 	enum ieee80211_state ostate = vap->iv_state;
4091 	int error;
4092 
4093 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4094 	    vap->iv_ifp->if_xname, __func__,
4095 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4096 
4097 	callout_stop(&sc->sc_timer);
4098 	/*
4099 	 * Clear current radar detection state.
4100 	 */
4101 	if (ostate == IEEE80211_S_CAC) {
4102 		/* stop quiet mode radar detection */
4103 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4104 	} else if (sc->sc_radarena) {
4105 		/* stop in-service radar detection */
4106 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4107 		sc->sc_radarena = 0;
4108 	}
4109 	/*
4110 	 * Carry out per-state actions before doing net80211 work.
4111 	 */
4112 	if (nstate == IEEE80211_S_INIT) {
4113 		/* NB: only ap+sta vap's have a fw entity */
4114 		if (hvap != NULL)
4115 			mwl_hal_stop(hvap);
4116 	} else if (nstate == IEEE80211_S_SCAN) {
4117 		mwl_hal_start(hvap);
4118 		/* NB: this disables beacon frames */
4119 		mwl_hal_setinframode(hvap);
4120 	} else if (nstate == IEEE80211_S_AUTH) {
4121 		/*
4122 		 * Must create a sta db entry in case a WEP key needs to
4123 		 * be plumbed.  This entry will be overwritten if we
4124 		 * associate; otherwise it will be reclaimed on node free.
4125 		 */
4126 		ni = vap->iv_bss;
4127 		MWL_NODE(ni)->mn_hvap = hvap;
4128 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4129 	} else if (nstate == IEEE80211_S_CSA) {
4130 		/* XXX move to below? */
4131 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4132 		    vap->iv_opmode == IEEE80211_M_MBSS)
4133 			mwl_startcsa(vap);
4134 	} else if (nstate == IEEE80211_S_CAC) {
4135 		/* XXX move to below? */
4136 		/* stop ap xmit and enable quiet mode radar detection */
4137 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4138 	}
4139 
4140 	/*
4141 	 * Invoke the parent method to do net80211 work.
4142 	 */
4143 	error = mvp->mv_newstate(vap, nstate, arg);
4144 
4145 	/*
4146 	 * Carry out work that must be done after net80211 runs;
4147 	 * this work requires up to date state (e.g. iv_bss).
4148 	 */
4149 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4150 		/* NB: collect bss node again, it may have changed */
4151 		ni = vap->iv_bss;
4152 
4153 		DPRINTF(sc, MWL_DEBUG_STATE,
4154 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4155 		    "capinfo 0x%04x chan %d\n",
4156 		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4157 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4158 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4159 
4160 		/*
4161 		 * Recreate local sta db entry to update WME/HT state.
4162 		 */
4163 		mwl_localstadb(vap);
4164 		switch (vap->iv_opmode) {
4165 		case IEEE80211_M_HOSTAP:
4166 		case IEEE80211_M_MBSS:
4167 			if (ostate == IEEE80211_S_CAC) {
4168 				/* enable in-service radar detection */
4169 				mwl_hal_setradardetection(mh,
4170 				    DR_IN_SERVICE_MONITOR_START);
4171 				sc->sc_radarena = 1;
4172 			}
4173 			/*
4174 			 * Allocate and setup the beacon frame
4175 			 * (and related state).
4176 			 */
4177 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4178 			if (error != 0) {
4179 				DPRINTF(sc, MWL_DEBUG_STATE,
4180 				    "%s: beacon setup failed, error %d\n",
4181 				    __func__, error);
4182 				goto bad;
4183 			}
4184 			/* NB: must be after setting up beacon */
4185 			mwl_hal_start(hvap);
4186 			break;
4187 		case IEEE80211_M_STA:
4188 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4189 			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4190 			/*
4191 			 * Set state now that we're associated.
4192 			 */
4193 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4194 			mwl_setrates(vap);
4195 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4196 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4197 			    sc->sc_ndwdsvaps++ == 0)
4198 				mwl_hal_setdwds(mh, 1);
4199 			break;
4200 		case IEEE80211_M_WDS:
4201 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4202 			    vap->iv_ifp->if_xname, __func__,
4203 			    ether_sprintf(ni->ni_bssid));
4204 			mwl_seteapolformat(vap);
4205 			break;
4206 		default:
4207 			break;
4208 		}
4209 		/*
4210 		 * Set CS mode according to operating channel;
4211 		 * this mostly an optimization for 5GHz.
4212 		 *
4213 		 * NB: must follow mwl_hal_start which resets csmode
4214 		 */
4215 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4216 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4217 		else
4218 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4219 		/*
4220 		 * Start timer to prod firmware.
4221 		 */
4222 		if (sc->sc_ageinterval != 0)
4223 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4224 			    mwl_agestations, sc);
4225 	} else if (nstate == IEEE80211_S_SLEEP) {
4226 		/* XXX set chip in power save */
4227 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4228 	    --sc->sc_ndwdsvaps == 0)
4229 		mwl_hal_setdwds(mh, 0);
4230 bad:
4231 	return error;
4232 }
4233 
4234 /*
4235  * Manage station id's; these are separate from AID's
4236  * as AID's may have values out of the range of possible
4237  * station id's acceptable to the firmware.
4238  */
4239 static int
4240 allocstaid(struct mwl_softc *sc, int aid)
4241 {
4242 	int staid;
4243 
4244 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4245 		/* NB: don't use 0 */
4246 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4247 			if (isclr(sc->sc_staid, staid))
4248 				break;
4249 	} else
4250 		staid = aid;
4251 	setbit(sc->sc_staid, staid);
4252 	return staid;
4253 }
4254 
4255 static void
4256 delstaid(struct mwl_softc *sc, int staid)
4257 {
4258 	clrbit(sc->sc_staid, staid);
4259 }
4260 
4261 /*
4262  * Setup driver-specific state for a newly associated node.
4263  * Note that we're called also on a re-associate, the isnew
4264  * param tells us if this is the first time or not.
4265  */
4266 static void
4267 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4268 {
4269 	struct ieee80211vap *vap = ni->ni_vap;
4270         struct mwl_softc *sc = vap->iv_ic->ic_softc;
4271 	struct mwl_node *mn = MWL_NODE(ni);
4272 	MWL_HAL_PEERINFO pi;
4273 	uint16_t aid;
4274 	int error;
4275 
4276 	aid = IEEE80211_AID(ni->ni_associd);
4277 	if (isnew) {
4278 		mn->mn_staid = allocstaid(sc, aid);
4279 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4280 	} else {
4281 		mn = MWL_NODE(ni);
4282 		/* XXX reset BA stream? */
4283 	}
4284 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4285 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4286 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4287 	if (error != 0) {
4288 		DPRINTF(sc, MWL_DEBUG_NODE,
4289 		    "%s: error %d creating sta db entry\n",
4290 		    __func__, error);
4291 		/* XXX how to deal with error? */
4292 	}
4293 }
4294 
4295 /*
4296  * Periodically poke the firmware to age out station state
4297  * (power save queues, pending tx aggregates).
4298  */
4299 static void
4300 mwl_agestations(void *arg)
4301 {
4302 	struct mwl_softc *sc = arg;
4303 
4304 	mwl_hal_setkeepalive(sc->sc_mh);
4305 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4306 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4307 }
4308 
4309 static const struct mwl_hal_channel *
4310 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4311 {
4312 	int i;
4313 
4314 	for (i = 0; i < ci->nchannels; i++) {
4315 		const struct mwl_hal_channel *hc = &ci->channels[i];
4316 		if (hc->ieee == ieee)
4317 			return hc;
4318 	}
4319 	return NULL;
4320 }
4321 
4322 static int
4323 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4324 	int nchan, struct ieee80211_channel chans[])
4325 {
4326 	struct mwl_softc *sc = ic->ic_softc;
4327 	struct mwl_hal *mh = sc->sc_mh;
4328 	const MWL_HAL_CHANNELINFO *ci;
4329 	int i;
4330 
4331 	for (i = 0; i < nchan; i++) {
4332 		struct ieee80211_channel *c = &chans[i];
4333 		const struct mwl_hal_channel *hc;
4334 
4335 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4336 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4337 			    IEEE80211_IS_CHAN_HT40(c) ?
4338 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4339 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4340 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4341 			    IEEE80211_IS_CHAN_HT40(c) ?
4342 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4343 		} else {
4344 			device_printf(sc->sc_dev,
4345 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4346 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4347 			return EINVAL;
4348 		}
4349 		/*
4350 		 * Verify channel has cal data and cap tx power.
4351 		 */
4352 		hc = findhalchannel(ci, c->ic_ieee);
4353 		if (hc != NULL) {
4354 			if (c->ic_maxpower > 2*hc->maxTxPow)
4355 				c->ic_maxpower = 2*hc->maxTxPow;
4356 			goto next;
4357 		}
4358 		if (IEEE80211_IS_CHAN_HT40(c)) {
4359 			/*
4360 			 * Look for the extension channel since the
4361 			 * hal table only has the primary channel.
4362 			 */
4363 			hc = findhalchannel(ci, c->ic_extieee);
4364 			if (hc != NULL) {
4365 				if (c->ic_maxpower > 2*hc->maxTxPow)
4366 					c->ic_maxpower = 2*hc->maxTxPow;
4367 				goto next;
4368 			}
4369 		}
4370 		device_printf(sc->sc_dev,
4371 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4372 		    __func__, c->ic_ieee, c->ic_extieee,
4373 		    c->ic_freq, c->ic_flags);
4374 		return EINVAL;
4375 	next:
4376 		;
4377 	}
4378 	return 0;
4379 }
4380 
4381 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4382 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4383 
4384 static void
4385 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4386 	const MWL_HAL_CHANNELINFO *ci, int flags)
4387 {
4388 	int i, error;
4389 
4390 	for (i = 0; i < ci->nchannels; i++) {
4391 		const struct mwl_hal_channel *hc = &ci->channels[i];
4392 
4393 		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4394 		    hc->ieee, hc->maxTxPow, flags);
4395 		if (error != 0 && error != ENOENT)
4396 			break;
4397 	}
4398 }
4399 
4400 static void
4401 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4402 	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4403 {
4404 	int i, error;
4405 
4406 	error = 0;
4407 	for (i = 0; i < ci->nchannels && error == 0; i++) {
4408 		const struct mwl_hal_channel *hc = &ci->channels[i];
4409 
4410 		error = ieee80211_add_channel(chans, maxchans, nchans,
4411 		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4412 	}
4413 }
4414 
4415 static void
4416 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4417 	struct ieee80211_channel chans[])
4418 {
4419 	const MWL_HAL_CHANNELINFO *ci;
4420 	uint8_t bands[IEEE80211_MODE_BYTES];
4421 
4422 	/*
4423 	 * Use the channel info from the hal to craft the
4424 	 * channel list.  Note that we pass back an unsorted
4425 	 * list; the caller is required to sort it for us
4426 	 * (if desired).
4427 	 */
4428 	*nchans = 0;
4429 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4430 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4431 		memset(bands, 0, sizeof(bands));
4432 		setbit(bands, IEEE80211_MODE_11B);
4433 		setbit(bands, IEEE80211_MODE_11G);
4434 		setbit(bands, IEEE80211_MODE_11NG);
4435 		addchannels(chans, maxchans, nchans, ci, bands);
4436 	}
4437 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4438 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4439 		memset(bands, 0, sizeof(bands));
4440 		setbit(bands, IEEE80211_MODE_11A);
4441 		setbit(bands, IEEE80211_MODE_11NA);
4442 		addchannels(chans, maxchans, nchans, ci, bands);
4443 	}
4444 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4445 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4446 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4447 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4448 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4449 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4450 }
4451 
4452 static void
4453 mwl_getradiocaps(struct ieee80211com *ic,
4454 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4455 {
4456 	struct mwl_softc *sc = ic->ic_softc;
4457 
4458 	getchannels(sc, maxchans, nchans, chans);
4459 }
4460 
4461 static int
4462 mwl_getchannels(struct mwl_softc *sc)
4463 {
4464 	struct ieee80211com *ic = &sc->sc_ic;
4465 
4466 	/*
4467 	 * Use the channel info from the hal to craft the
4468 	 * channel list for net80211.  Note that we pass up
4469 	 * an unsorted list; net80211 will sort it for us.
4470 	 */
4471 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4472 	ic->ic_nchans = 0;
4473 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4474 
4475 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4476 	ic->ic_regdomain.country = CTRY_DEFAULT;
4477 	ic->ic_regdomain.location = 'I';
4478 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4479 	ic->ic_regdomain.isocc[1] = ' ';
4480 	return (ic->ic_nchans == 0 ? EIO : 0);
4481 }
4482 #undef IEEE80211_CHAN_HTA
4483 #undef IEEE80211_CHAN_HTG
4484 
4485 #ifdef MWL_DEBUG
4486 static void
4487 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4488 {
4489 	const struct mwl_rxdesc *ds = bf->bf_desc;
4490 	uint32_t status = le32toh(ds->Status);
4491 
4492 	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4493 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4494 	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4495 	    le32toh(ds->pPhysBuffData), ds->RxControl,
4496 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4497 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4498 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4499 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4500 }
4501 
4502 static void
4503 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4504 {
4505 	const struct mwl_txdesc *ds = bf->bf_desc;
4506 	uint32_t status = le32toh(ds->Status);
4507 
4508 	printf("Q%u[%3u]", qnum, ix);
4509 	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4510 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4511 	    le32toh(ds->pPhysNext),
4512 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4513 	    status & EAGLE_TXD_STATUS_USED ?
4514 		"" : (status & 3) != 0 ? " *" : " !");
4515 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4516 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4517 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4518 #if MWL_TXDESC > 1
4519 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4520 	    , le32toh(ds->multiframes)
4521 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4522 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4523 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4524 	);
4525 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4526 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4527 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4528 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4529 	);
4530 #endif
4531 #if 0
4532 { const uint8_t *cp = (const uint8_t *) ds;
4533   int i;
4534   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4535 	printf("%02x ", cp[i]);
4536 	if (((i+1) % 16) == 0)
4537 		printf("\n");
4538   }
4539   printf("\n");
4540 }
4541 #endif
4542 }
4543 #endif /* MWL_DEBUG */
4544 
4545 #if 0
4546 static void
4547 mwl_txq_dump(struct mwl_txq *txq)
4548 {
4549 	struct mwl_txbuf *bf;
4550 	int i = 0;
4551 
4552 	MWL_TXQ_LOCK(txq);
4553 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4554 		struct mwl_txdesc *ds = bf->bf_desc;
4555 		MWL_TXDESC_SYNC(txq, ds,
4556 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4557 #ifdef MWL_DEBUG
4558 		mwl_printtxbuf(bf, txq->qnum, i);
4559 #endif
4560 		i++;
4561 	}
4562 	MWL_TXQ_UNLOCK(txq);
4563 }
4564 #endif
4565 
4566 static void
4567 mwl_watchdog(void *arg)
4568 {
4569 	struct mwl_softc *sc = arg;
4570 
4571 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4572 	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4573 		return;
4574 
4575 	if (sc->sc_running && !sc->sc_invalid) {
4576 		if (mwl_hal_setkeepalive(sc->sc_mh))
4577 			device_printf(sc->sc_dev,
4578 			    "transmit timeout (firmware hung?)\n");
4579 		else
4580 			device_printf(sc->sc_dev,
4581 			    "transmit timeout\n");
4582 #if 0
4583 		mwl_reset(sc);
4584 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4585 #endif
4586 		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4587 		sc->sc_stats.mst_watchdog++;
4588 	}
4589 }
4590 
4591 #ifdef MWL_DIAGAPI
4592 /*
4593  * Diagnostic interface to the HAL.  This is used by various
4594  * tools to do things like retrieve register contents for
4595  * debugging.  The mechanism is intentionally opaque so that
4596  * it can change frequently w/o concern for compatibility.
4597  */
4598 static int
4599 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4600 {
4601 	struct mwl_hal *mh = sc->sc_mh;
4602 	u_int id = md->md_id & MWL_DIAG_ID;
4603 	void *indata = NULL;
4604 	void *outdata = NULL;
4605 	u_int32_t insize = md->md_in_size;
4606 	u_int32_t outsize = md->md_out_size;
4607 	int error = 0;
4608 
4609 	if (md->md_id & MWL_DIAG_IN) {
4610 		/*
4611 		 * Copy in data.
4612 		 */
4613 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4614 		if (indata == NULL) {
4615 			error = ENOMEM;
4616 			goto bad;
4617 		}
4618 		error = copyin(md->md_in_data, indata, insize);
4619 		if (error)
4620 			goto bad;
4621 	}
4622 	if (md->md_id & MWL_DIAG_DYN) {
4623 		/*
4624 		 * Allocate a buffer for the results (otherwise the HAL
4625 		 * returns a pointer to a buffer where we can read the
4626 		 * results).  Note that we depend on the HAL leaving this
4627 		 * pointer for us to use below in reclaiming the buffer;
4628 		 * may want to be more defensive.
4629 		 */
4630 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4631 		if (outdata == NULL) {
4632 			error = ENOMEM;
4633 			goto bad;
4634 		}
4635 	}
4636 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4637 		if (outsize < md->md_out_size)
4638 			md->md_out_size = outsize;
4639 		if (outdata != NULL)
4640 			error = copyout(outdata, md->md_out_data,
4641 					md->md_out_size);
4642 	} else {
4643 		error = EINVAL;
4644 	}
4645 bad:
4646 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4647 		free(indata, M_TEMP);
4648 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4649 		free(outdata, M_TEMP);
4650 	return error;
4651 }
4652 
4653 static int
4654 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4655 {
4656 	struct mwl_hal *mh = sc->sc_mh;
4657 	int error;
4658 
4659 	MWL_LOCK_ASSERT(sc);
4660 
4661 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4662 		device_printf(sc->sc_dev, "unable to load firmware\n");
4663 		return EIO;
4664 	}
4665 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4666 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4667 		return EIO;
4668 	}
4669 	error = mwl_setupdma(sc);
4670 	if (error != 0) {
4671 		/* NB: mwl_setupdma prints a msg */
4672 		return error;
4673 	}
4674 	/*
4675 	 * Reset tx/rx data structures; after reload we must
4676 	 * re-start the driver's notion of the next xmit/recv.
4677 	 */
4678 	mwl_draintxq(sc);		/* clear pending frames */
4679 	mwl_resettxq(sc);		/* rebuild tx q lists */
4680 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4681 	return 0;
4682 }
4683 #endif /* MWL_DIAGAPI */
4684 
4685 static void
4686 mwl_parent(struct ieee80211com *ic)
4687 {
4688 	struct mwl_softc *sc = ic->ic_softc;
4689 	int startall = 0;
4690 
4691 	MWL_LOCK(sc);
4692 	if (ic->ic_nrunning > 0) {
4693 		if (sc->sc_running) {
4694 			/*
4695 			 * To avoid rescanning another access point,
4696 			 * do not call mwl_init() here.  Instead,
4697 			 * only reflect promisc mode settings.
4698 			 */
4699 			mwl_mode_init(sc);
4700 		} else {
4701 			/*
4702 			 * Beware of being called during attach/detach
4703 			 * to reset promiscuous mode.  In that case we
4704 			 * will still be marked UP but not RUNNING.
4705 			 * However trying to re-init the interface
4706 			 * is the wrong thing to do as we've already
4707 			 * torn down much of our state.  There's
4708 			 * probably a better way to deal with this.
4709 			 */
4710 			if (!sc->sc_invalid) {
4711 				mwl_init(sc);	/* XXX lose error */
4712 				startall = 1;
4713 			}
4714 		}
4715 	} else
4716 		mwl_stop(sc);
4717 	MWL_UNLOCK(sc);
4718 	if (startall)
4719 		ieee80211_start_all(ic);
4720 }
4721 
4722 static int
4723 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4724 {
4725 	struct mwl_softc *sc = ic->ic_softc;
4726 	struct ifreq *ifr = data;
4727 	int error = 0;
4728 
4729 	switch (cmd) {
4730 	case SIOCGMVSTATS:
4731 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4732 #if 0
4733 		/* NB: embed these numbers to get a consistent view */
4734 		sc->sc_stats.mst_tx_packets =
4735 		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4736 		sc->sc_stats.mst_rx_packets =
4737 		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4738 #endif
4739 		/*
4740 		 * NB: Drop the softc lock in case of a page fault;
4741 		 * we'll accept any potential inconsisentcy in the
4742 		 * statistics.  The alternative is to copy the data
4743 		 * to a local structure.
4744 		 */
4745 		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4746 		    sizeof (sc->sc_stats)));
4747 #ifdef MWL_DIAGAPI
4748 	case SIOCGMVDIAG:
4749 		/* XXX check privs */
4750 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4751 	case SIOCGMVRESET:
4752 		/* XXX check privs */
4753 		MWL_LOCK(sc);
4754 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4755 		MWL_UNLOCK(sc);
4756 		break;
4757 #endif /* MWL_DIAGAPI */
4758 	default:
4759 		error = ENOTTY;
4760 		break;
4761 	}
4762 	return (error);
4763 }
4764 
4765 #ifdef	MWL_DEBUG
4766 static int
4767 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4768 {
4769 	struct mwl_softc *sc = arg1;
4770 	int debug, error;
4771 
4772 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4773 	error = sysctl_handle_int(oidp, &debug, 0, req);
4774 	if (error || !req->newptr)
4775 		return error;
4776 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4777 	sc->sc_debug = debug & 0x00ffffff;
4778 	return 0;
4779 }
4780 #endif /* MWL_DEBUG */
4781 
4782 static void
4783 mwl_sysctlattach(struct mwl_softc *sc)
4784 {
4785 #ifdef	MWL_DEBUG
4786 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4787 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4788 
4789 	sc->sc_debug = mwl_debug;
4790 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4791 		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4792 		mwl_sysctl_debug, "I", "control debugging printfs");
4793 #endif
4794 }
4795 
4796 /*
4797  * Announce various information on device/driver attach.
4798  */
4799 static void
4800 mwl_announce(struct mwl_softc *sc)
4801 {
4802 
4803 	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4804 		sc->sc_hwspecs.hwVersion,
4805 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4806 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4807 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4808 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4809 		sc->sc_hwspecs.regionCode);
4810 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4811 
4812 	if (bootverbose) {
4813 		int i;
4814 		for (i = 0; i <= WME_AC_VO; i++) {
4815 			struct mwl_txq *txq = sc->sc_ac2q[i];
4816 			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4817 				txq->qnum, ieee80211_wme_acnames[i]);
4818 		}
4819 	}
4820 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4821 		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4822 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4823 		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4824 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4825 		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4826 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4827 		device_printf(sc->sc_dev, "multi-bss support\n");
4828 #ifdef MWL_TX_NODROP
4829 	if (bootverbose)
4830 		device_printf(sc->sc_dev, "no tx drop\n");
4831 #endif
4832 }
4833