xref: /freebsd/sys/dev/mwl/if_mwl.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Marvell 88W8363 Wireless LAN controller.
36  */
37 
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/errno.h>
52 #include <sys/callout.h>
53 #include <sys/bus.h>
54 #include <sys/endian.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 
58 #include <machine/bus.h>
59 
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/if_llc.h>
67 
68 #include <net/bpf.h>
69 
70 #include <net80211/ieee80211_var.h>
71 #include <net80211/ieee80211_regdomain.h>
72 
73 #ifdef INET
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #endif /* INET */
77 
78 #include <dev/mwl/if_mwlvar.h>
79 #include <dev/mwl/mwldiag.h>
80 
81 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82 #define	MS(v,x)	(((v) & x) >> x##_S)
83 #define	SM(v,x)	(((v) << x##_S) & x)
84 
85 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86 		    const char name[IFNAMSIZ], int unit, int opmode,
87 		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
88 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
89 static void	mwl_vap_delete(struct ieee80211vap *);
90 static int	mwl_setupdma(struct mwl_softc *);
91 static int	mwl_hal_reset(struct mwl_softc *sc);
92 static int	mwl_init_locked(struct mwl_softc *);
93 static void	mwl_init(void *);
94 static void	mwl_stop_locked(struct ifnet *, int);
95 static int	mwl_reset(struct ieee80211vap *, u_long);
96 static void	mwl_stop(struct ifnet *, int);
97 static void	mwl_start(struct ifnet *);
98 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99 			const struct ieee80211_bpf_params *);
100 static int	mwl_media_change(struct ifnet *);
101 static void	mwl_watchdog(struct ifnet *);
102 static int	mwl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void	mwl_radar_proc(void *, int);
104 static void	mwl_chanswitch_proc(void *, int);
105 static void	mwl_bawatchdog_proc(void *, int);
106 static int	mwl_key_alloc(struct ieee80211vap *,
107 			struct ieee80211_key *,
108 			ieee80211_keyix *, ieee80211_keyix *);
109 static int	mwl_key_delete(struct ieee80211vap *,
110 			const struct ieee80211_key *);
111 static int	mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112 			const uint8_t mac[IEEE80211_ADDR_LEN]);
113 static int	mwl_mode_init(struct mwl_softc *);
114 static void	mwl_update_mcast(struct ifnet *);
115 static void	mwl_update_promisc(struct ifnet *);
116 static void	mwl_updateslot(struct ifnet *);
117 static int	mwl_beacon_setup(struct ieee80211vap *);
118 static void	mwl_beacon_update(struct ieee80211vap *, int);
119 #ifdef MWL_HOST_PS_SUPPORT
120 static void	mwl_update_ps(struct ieee80211vap *, int);
121 static int	mwl_set_tim(struct ieee80211_node *, int);
122 #endif
123 static int	mwl_dma_setup(struct mwl_softc *);
124 static void	mwl_dma_cleanup(struct mwl_softc *);
125 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126 		    const uint8_t [IEEE80211_ADDR_LEN]);
127 static void	mwl_node_cleanup(struct ieee80211_node *);
128 static void	mwl_node_drain(struct ieee80211_node *);
129 static void	mwl_node_getsignal(const struct ieee80211_node *,
130 			int8_t *, int8_t *);
131 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
132 			struct ieee80211_mimo_info *);
133 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134 static void	mwl_rx_proc(void *, int);
135 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136 static int	mwl_tx_setup(struct mwl_softc *, int, int);
137 static int	mwl_wme_update(struct ieee80211com *);
138 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139 static void	mwl_tx_cleanup(struct mwl_softc *);
140 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142 			     struct mwl_txbuf *, struct mbuf *);
143 static void	mwl_tx_proc(void *, int);
144 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145 static void	mwl_draintxq(struct mwl_softc *);
146 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147 static int	mwl_recv_action(struct ieee80211_node *,
148 			const struct ieee80211_frame *,
149 			const uint8_t *, const uint8_t *);
150 static int	mwl_addba_request(struct ieee80211_node *,
151 			struct ieee80211_tx_ampdu *, int dialogtoken,
152 			int baparamset, int batimeout);
153 static int	mwl_addba_response(struct ieee80211_node *,
154 			struct ieee80211_tx_ampdu *, int status,
155 			int baparamset, int batimeout);
156 static void	mwl_addba_stop(struct ieee80211_node *,
157 			struct ieee80211_tx_ampdu *);
158 static int	mwl_startrecv(struct mwl_softc *);
159 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
160 			struct ieee80211_channel *);
161 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
162 static void	mwl_scan_start(struct ieee80211com *);
163 static void	mwl_scan_end(struct ieee80211com *);
164 static void	mwl_set_channel(struct ieee80211com *);
165 static int	mwl_peerstadb(struct ieee80211_node *,
166 			int aid, int staid, MWL_HAL_PEERINFO *pi);
167 static int	mwl_localstadb(struct ieee80211vap *);
168 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
169 static int	allocstaid(struct mwl_softc *sc, int aid);
170 static void	delstaid(struct mwl_softc *sc, int staid);
171 static void	mwl_newassoc(struct ieee80211_node *, int);
172 static void	mwl_agestations(void *);
173 static int	mwl_setregdomain(struct ieee80211com *,
174 			struct ieee80211_regdomain *, int,
175 			struct ieee80211_channel []);
176 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
177 			struct ieee80211_channel []);
178 static int	mwl_getchannels(struct mwl_softc *);
179 
180 static void	mwl_sysctlattach(struct mwl_softc *);
181 static void	mwl_announce(struct mwl_softc *);
182 
183 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
184 
185 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
186 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
187 	    0, "rx descriptors allocated");
188 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
189 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
190 	    0, "rx buffers allocated");
191 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
192 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
193 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
194 	    0, "tx buffers allocated");
195 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
196 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
197 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
198 	    0, "tx buffers to send at once");
199 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
200 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
202 	    0, "max rx buffers to process per interrupt");
203 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
204 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
205 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
206 	    0, "min free rx buffers before restarting traffic");
207 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
208 
209 #ifdef MWL_DEBUG
210 static	int mwl_debug = 0;
211 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
212 	    0, "control debugging printfs");
213 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
214 enum {
215 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
216 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
217 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
218 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
219 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
220 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
221 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
222 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
223 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
224 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
225 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
226 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
227 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
228 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
229 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
230 	MWL_DEBUG_ANY		= 0xffffffff
231 };
232 #define	IS_BEACON(wh) \
233     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234 	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235 #define	IFF_DUMPPKTS_RECV(sc, wh) \
236     (((sc->sc_debug & MWL_DEBUG_RECV) && \
237       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
238      (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
239 #define	IFF_DUMPPKTS_XMIT(sc) \
240 	((sc->sc_debug & MWL_DEBUG_XMIT) || \
241 	 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
242 #define	DPRINTF(sc, m, fmt, ...) do {				\
243 	if (sc->sc_debug & (m))					\
244 		printf(fmt, __VA_ARGS__);			\
245 } while (0)
246 #define	KEYPRINTF(sc, hk, mac) do {				\
247 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
248 		mwl_keyprint(sc, __func__, hk, mac);		\
249 } while (0)
250 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
251 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
252 #else
253 #define	IFF_DUMPPKTS_RECV(sc, wh) \
254 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
255 #define	IFF_DUMPPKTS_XMIT(sc) \
256 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257 #define	DPRINTF(sc, m, fmt, ...) do {				\
258 	(void) sc;						\
259 } while (0)
260 #define	KEYPRINTF(sc, k, mac) do {				\
261 	(void) sc;						\
262 } while (0)
263 #endif
264 
265 MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
266 
267 /*
268  * Each packet has fixed front matter: a 2-byte length
269  * of the payload, followed by a 4-address 802.11 header
270  * (regardless of the actual header and always w/o any
271  * QoS header).  The payload then follows.
272  */
273 struct mwltxrec {
274 	uint16_t fwlen;
275 	struct ieee80211_frame_addr4 wh;
276 } __packed;
277 
278 /*
279  * Read/Write shorthands for accesses to BAR 0.  Note
280  * that all BAR 1 operations are done in the "hal" and
281  * there should be no reference to them here.
282  */
283 static __inline uint32_t
284 RD4(struct mwl_softc *sc, bus_size_t off)
285 {
286 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
287 }
288 
289 static __inline void
290 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
291 {
292 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
293 }
294 
295 int
296 mwl_attach(uint16_t devid, struct mwl_softc *sc)
297 {
298 	struct ifnet *ifp;
299 	struct ieee80211com *ic;
300 	struct mwl_hal *mh;
301 	int error = 0;
302 
303 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
304 
305 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
306 	if (ifp == NULL) {
307 		device_printf(sc->sc_dev, "can not if_alloc()\n");
308 		return ENOSPC;
309 	}
310 	ic = ifp->if_l2com;
311 
312 	/* set these up early for if_printf use */
313 	if_initname(ifp, device_get_name(sc->sc_dev),
314 		device_get_unit(sc->sc_dev));
315 
316 	mh = mwl_hal_attach(sc->sc_dev, devid,
317 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
318 	if (mh == NULL) {
319 		if_printf(ifp, "unable to attach HAL\n");
320 		error = EIO;
321 		goto bad;
322 	}
323 	sc->sc_mh = mh;
324 	/*
325 	 * Load firmware so we can get setup.  We arbitrarily
326 	 * pick station firmware; we'll re-load firmware as
327 	 * needed so setting up the wrong mode isn't a big deal.
328 	 */
329 	if (mwl_hal_fwload(mh, NULL) != 0) {
330 		if_printf(ifp, "unable to setup builtin firmware\n");
331 		error = EIO;
332 		goto bad1;
333 	}
334 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
335 		if_printf(ifp, "unable to fetch h/w specs\n");
336 		error = EIO;
337 		goto bad1;
338 	}
339 	error = mwl_getchannels(sc);
340 	if (error != 0)
341 		goto bad1;
342 
343 	sc->sc_txantenna = 0;		/* h/w default */
344 	sc->sc_rxantenna = 0;		/* h/w default */
345 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
346 	sc->sc_ageinterval = MWL_AGEINTERVAL;
347 
348 	/*
349 	 * Allocate tx+rx descriptors and populate the lists.
350 	 * We immediately push the information to the firmware
351 	 * as otherwise it gets upset.
352 	 */
353 	error = mwl_dma_setup(sc);
354 	if (error != 0) {
355 		if_printf(ifp, "failed to setup descriptors: %d\n", error);
356 		goto bad1;
357 	}
358 	error = mwl_setupdma(sc);	/* push to firmware */
359 	if (error != 0)			/* NB: mwl_setupdma prints msg */
360 		goto bad1;
361 
362 	callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
363 
364 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
365 		taskqueue_thread_enqueue, &sc->sc_tq);
366 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
367 		"%s taskq", ifp->if_xname);
368 
369 	TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
370 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
371 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
372 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
373 
374 	/* NB: insure BK queue is the lowest priority h/w queue */
375 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
376 		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
377 			ieee80211_wme_acnames[WME_AC_BK]);
378 		error = EIO;
379 		goto bad2;
380 	}
381 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
382 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
383 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
384 		/*
385 		 * Not enough hardware tx queues to properly do WME;
386 		 * just punt and assign them all to the same h/w queue.
387 		 * We could do a better job of this if, for example,
388 		 * we allocate queues when we switch from station to
389 		 * AP mode.
390 		 */
391 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
392 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
393 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
394 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
395 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
396 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
397 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
398 	}
399 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
400 
401 	ifp->if_softc = sc;
402 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
403 	ifp->if_start = mwl_start;
404 	ifp->if_watchdog = mwl_watchdog;
405 	ifp->if_ioctl = mwl_ioctl;
406 	ifp->if_init = mwl_init;
407 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
408 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
409 	IFQ_SET_READY(&ifp->if_snd);
410 
411 	ic->ic_ifp = ifp;
412 	/* XXX not right but it's not used anywhere important */
413 	ic->ic_phytype = IEEE80211_T_OFDM;
414 	ic->ic_opmode = IEEE80211_M_STA;
415 	ic->ic_caps =
416 		  IEEE80211_C_STA		/* station mode supported */
417 		| IEEE80211_C_HOSTAP		/* hostap mode */
418 		| IEEE80211_C_MONITOR		/* monitor mode */
419 #if 0
420 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
421 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
422 #endif
423 		| IEEE80211_C_MBSS		/* mesh point link mode */
424 		| IEEE80211_C_WDS		/* WDS supported */
425 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
426 		| IEEE80211_C_SHSLOT		/* short slot time supported */
427 		| IEEE80211_C_WME		/* WME/WMM supported */
428 		| IEEE80211_C_BURST		/* xmit bursting supported */
429 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
430 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
431 		| IEEE80211_C_TXFRAG		/* handle tx frags */
432 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
433 		| IEEE80211_C_DFS		/* DFS supported */
434 		;
435 
436 	ic->ic_htcaps =
437 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
438 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
439 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
440 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
441 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
442 #if MWL_AGGR_SIZE == 7935
443 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
444 #else
445 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
446 #endif
447 #if 0
448 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
449 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
450 #endif
451 		/* s/w capabilities */
452 		| IEEE80211_HTC_HT		/* HT operation */
453 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
454 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
455 		| IEEE80211_HTC_SMPS		/* SMPS available */
456 		;
457 
458 	/*
459 	 * Mark h/w crypto support.
460 	 * XXX no way to query h/w support.
461 	 */
462 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
463 			  |  IEEE80211_CRYPTO_AES_CCM
464 			  |  IEEE80211_CRYPTO_TKIP
465 			  |  IEEE80211_CRYPTO_TKIPMIC
466 			  ;
467 	/*
468 	 * Transmit requires space in the packet for a special
469 	 * format transmit record and optional padding between
470 	 * this record and the payload.  Ask the net80211 layer
471 	 * to arrange this when encapsulating packets so we can
472 	 * add it efficiently.
473 	 */
474 	ic->ic_headroom = sizeof(struct mwltxrec) -
475 		sizeof(struct ieee80211_frame);
476 
477 	/* call MI attach routine. */
478 	ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
479 	ic->ic_setregdomain = mwl_setregdomain;
480 	ic->ic_getradiocaps = mwl_getradiocaps;
481 	/* override default methods */
482 	ic->ic_raw_xmit = mwl_raw_xmit;
483 	ic->ic_newassoc = mwl_newassoc;
484 	ic->ic_updateslot = mwl_updateslot;
485 	ic->ic_update_mcast = mwl_update_mcast;
486 	ic->ic_update_promisc = mwl_update_promisc;
487 	ic->ic_wme.wme_update = mwl_wme_update;
488 
489 	ic->ic_node_alloc = mwl_node_alloc;
490 	sc->sc_node_cleanup = ic->ic_node_cleanup;
491 	ic->ic_node_cleanup = mwl_node_cleanup;
492 	sc->sc_node_drain = ic->ic_node_drain;
493 	ic->ic_node_drain = mwl_node_drain;
494 	ic->ic_node_getsignal = mwl_node_getsignal;
495 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
496 
497 	ic->ic_scan_start = mwl_scan_start;
498 	ic->ic_scan_end = mwl_scan_end;
499 	ic->ic_set_channel = mwl_set_channel;
500 
501 	sc->sc_recv_action = ic->ic_recv_action;
502 	ic->ic_recv_action = mwl_recv_action;
503 	sc->sc_addba_request = ic->ic_addba_request;
504 	ic->ic_addba_request = mwl_addba_request;
505 	sc->sc_addba_response = ic->ic_addba_response;
506 	ic->ic_addba_response = mwl_addba_response;
507 	sc->sc_addba_stop = ic->ic_addba_stop;
508 	ic->ic_addba_stop = mwl_addba_stop;
509 
510 	ic->ic_vap_create = mwl_vap_create;
511 	ic->ic_vap_delete = mwl_vap_delete;
512 
513 	ieee80211_radiotap_attach(ic,
514 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
515 		MWL_TX_RADIOTAP_PRESENT,
516 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
517 		MWL_RX_RADIOTAP_PRESENT);
518 	/*
519 	 * Setup dynamic sysctl's now that country code and
520 	 * regdomain are available from the hal.
521 	 */
522 	mwl_sysctlattach(sc);
523 
524 	if (bootverbose)
525 		ieee80211_announce(ic);
526 	mwl_announce(sc);
527 	return 0;
528 bad2:
529 	mwl_dma_cleanup(sc);
530 bad1:
531 	mwl_hal_detach(mh);
532 bad:
533 	if_free(ifp);
534 	sc->sc_invalid = 1;
535 	return error;
536 }
537 
538 int
539 mwl_detach(struct mwl_softc *sc)
540 {
541 	struct ifnet *ifp = sc->sc_ifp;
542 	struct ieee80211com *ic = ifp->if_l2com;
543 
544 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
545 		__func__, ifp->if_flags);
546 
547 	mwl_stop(ifp, 1);
548 	/*
549 	 * NB: the order of these is important:
550 	 * o call the 802.11 layer before detaching the hal to
551 	 *   insure callbacks into the driver to delete global
552 	 *   key cache entries can be handled
553 	 * o reclaim the tx queue data structures after calling
554 	 *   the 802.11 layer as we'll get called back to reclaim
555 	 *   node state and potentially want to use them
556 	 * o to cleanup the tx queues the hal is called, so detach
557 	 *   it last
558 	 * Other than that, it's straightforward...
559 	 */
560 	ieee80211_ifdetach(ic);
561 	mwl_dma_cleanup(sc);
562 	mwl_tx_cleanup(sc);
563 	mwl_hal_detach(sc->sc_mh);
564 	if_free(ifp);
565 
566 	return 0;
567 }
568 
569 /*
570  * MAC address handling for multiple BSS on the same radio.
571  * The first vap uses the MAC address from the EEPROM.  For
572  * subsequent vap's we set the U/L bit (bit 1) in the MAC
573  * address and use the next six bits as an index.
574  */
575 static void
576 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
577 {
578 	int i;
579 
580 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
581 		/* NB: we only do this if h/w supports multiple bssid */
582 		for (i = 0; i < 32; i++)
583 			if ((sc->sc_bssidmask & (1<<i)) == 0)
584 				break;
585 		if (i != 0)
586 			mac[0] |= (i << 2)|0x2;
587 	} else
588 		i = 0;
589 	sc->sc_bssidmask |= 1<<i;
590 	if (i == 0)
591 		sc->sc_nbssid0++;
592 }
593 
594 static void
595 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
596 {
597 	int i = mac[0] >> 2;
598 	if (i != 0 || --sc->sc_nbssid0 == 0)
599 		sc->sc_bssidmask &= ~(1<<i);
600 }
601 
602 static struct ieee80211vap *
603 mwl_vap_create(struct ieee80211com *ic,
604 	const char name[IFNAMSIZ], int unit, int opmode, int flags,
605 	const uint8_t bssid[IEEE80211_ADDR_LEN],
606 	const uint8_t mac0[IEEE80211_ADDR_LEN])
607 {
608 	struct ifnet *ifp = ic->ic_ifp;
609 	struct mwl_softc *sc = ifp->if_softc;
610 	struct mwl_hal *mh = sc->sc_mh;
611 	struct ieee80211vap *vap, *apvap;
612 	struct mwl_hal_vap *hvap;
613 	struct mwl_vap *mvp;
614 	uint8_t mac[IEEE80211_ADDR_LEN];
615 
616 	IEEE80211_ADDR_COPY(mac, mac0);
617 	switch (opmode) {
618 	case IEEE80211_M_HOSTAP:
619 	case IEEE80211_M_MBSS:
620 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
621 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
622 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
623 		if (hvap == NULL) {
624 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
625 				reclaim_address(sc, mac);
626 			return NULL;
627 		}
628 		break;
629 	case IEEE80211_M_STA:
630 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
631 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
632 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
633 		if (hvap == NULL) {
634 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
635 				reclaim_address(sc, mac);
636 			return NULL;
637 		}
638 		/* no h/w beacon miss support; always use s/w */
639 		flags |= IEEE80211_CLONE_NOBEACONS;
640 		break;
641 	case IEEE80211_M_WDS:
642 		hvap = NULL;		/* NB: we use associated AP vap */
643 		if (sc->sc_napvaps == 0)
644 			return NULL;	/* no existing AP vap */
645 		break;
646 	case IEEE80211_M_MONITOR:
647 		hvap = NULL;
648 		break;
649 	case IEEE80211_M_IBSS:
650 	case IEEE80211_M_AHDEMO:
651 	default:
652 		return NULL;
653 	}
654 
655 	mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
656 	    M_80211_VAP, M_NOWAIT | M_ZERO);
657 	if (mvp == NULL) {
658 		if (hvap != NULL) {
659 			mwl_hal_delvap(hvap);
660 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
661 				reclaim_address(sc, mac);
662 		}
663 		/* XXX msg */
664 		return NULL;
665 	}
666 	mvp->mv_hvap = hvap;
667 	if (opmode == IEEE80211_M_WDS) {
668 		/*
669 		 * WDS vaps must have an associated AP vap; find one.
670 		 * XXX not right.
671 		 */
672 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
673 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
674 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
675 				break;
676 			}
677 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
678 	}
679 	vap = &mvp->mv_vap;
680 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
681 	if (hvap != NULL)
682 		IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
683 	/* override with driver methods */
684 	mvp->mv_newstate = vap->iv_newstate;
685 	vap->iv_newstate = mwl_newstate;
686 	vap->iv_max_keyix = 0;	/* XXX */
687 	vap->iv_key_alloc = mwl_key_alloc;
688 	vap->iv_key_delete = mwl_key_delete;
689 	vap->iv_key_set = mwl_key_set;
690 #ifdef MWL_HOST_PS_SUPPORT
691 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
692 		vap->iv_update_ps = mwl_update_ps;
693 		mvp->mv_set_tim = vap->iv_set_tim;
694 		vap->iv_set_tim = mwl_set_tim;
695 	}
696 #endif
697 	vap->iv_reset = mwl_reset;
698 	vap->iv_update_beacon = mwl_beacon_update;
699 
700 	/* override max aid so sta's cannot assoc when we're out of sta id's */
701 	vap->iv_max_aid = MWL_MAXSTAID;
702 	/* override default A-MPDU rx parameters */
703 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
704 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
705 
706 	/* complete setup */
707 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
708 
709 	switch (vap->iv_opmode) {
710 	case IEEE80211_M_HOSTAP:
711 	case IEEE80211_M_MBSS:
712 	case IEEE80211_M_STA:
713 		/*
714 		 * Setup sta db entry for local address.
715 		 */
716 		mwl_localstadb(vap);
717 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
718 		    vap->iv_opmode == IEEE80211_M_MBSS)
719 			sc->sc_napvaps++;
720 		else
721 			sc->sc_nstavaps++;
722 		break;
723 	case IEEE80211_M_WDS:
724 		sc->sc_nwdsvaps++;
725 		break;
726 	default:
727 		break;
728 	}
729 	/*
730 	 * Setup overall operating mode.
731 	 */
732 	if (sc->sc_napvaps)
733 		ic->ic_opmode = IEEE80211_M_HOSTAP;
734 	else if (sc->sc_nstavaps)
735 		ic->ic_opmode = IEEE80211_M_STA;
736 	else
737 		ic->ic_opmode = opmode;
738 
739 	return vap;
740 }
741 
742 static void
743 mwl_vap_delete(struct ieee80211vap *vap)
744 {
745 	struct mwl_vap *mvp = MWL_VAP(vap);
746 	struct ifnet *parent = vap->iv_ic->ic_ifp;
747 	struct mwl_softc *sc = parent->if_softc;
748 	struct mwl_hal *mh = sc->sc_mh;
749 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
750 	enum ieee80211_opmode opmode = vap->iv_opmode;
751 
752 	/* XXX disallow ap vap delete if WDS still present */
753 	if (parent->if_drv_flags & IFF_DRV_RUNNING) {
754 		/* quiesce h/w while we remove the vap */
755 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
756 	}
757 	ieee80211_vap_detach(vap);
758 	switch (opmode) {
759 	case IEEE80211_M_HOSTAP:
760 	case IEEE80211_M_MBSS:
761 	case IEEE80211_M_STA:
762 		KASSERT(hvap != NULL, ("no hal vap handle"));
763 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
764 		mwl_hal_delvap(hvap);
765 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
766 			sc->sc_napvaps--;
767 		else
768 			sc->sc_nstavaps--;
769 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
770 		reclaim_address(sc, vap->iv_myaddr);
771 		break;
772 	case IEEE80211_M_WDS:
773 		sc->sc_nwdsvaps--;
774 		break;
775 	default:
776 		break;
777 	}
778 	mwl_cleartxq(sc, vap);
779 	free(mvp, M_80211_VAP);
780 	if (parent->if_drv_flags & IFF_DRV_RUNNING)
781 		mwl_hal_intrset(mh, sc->sc_imask);
782 }
783 
784 void
785 mwl_suspend(struct mwl_softc *sc)
786 {
787 	struct ifnet *ifp = sc->sc_ifp;
788 
789 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
790 		__func__, ifp->if_flags);
791 
792 	mwl_stop(ifp, 1);
793 }
794 
795 void
796 mwl_resume(struct mwl_softc *sc)
797 {
798 	struct ifnet *ifp = sc->sc_ifp;
799 
800 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
801 		__func__, ifp->if_flags);
802 
803 	if (ifp->if_flags & IFF_UP)
804 		mwl_init(sc);
805 }
806 
807 void
808 mwl_shutdown(void *arg)
809 {
810 	struct mwl_softc *sc = arg;
811 
812 	mwl_stop(sc->sc_ifp, 1);
813 }
814 
815 /*
816  * Interrupt handler.  Most of the actual processing is deferred.
817  */
818 void
819 mwl_intr(void *arg)
820 {
821 	struct mwl_softc *sc = arg;
822 	struct mwl_hal *mh = sc->sc_mh;
823 	uint32_t status;
824 
825 	if (sc->sc_invalid) {
826 		/*
827 		 * The hardware is not ready/present, don't touch anything.
828 		 * Note this can happen early on if the IRQ is shared.
829 		 */
830 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
831 		return;
832 	}
833 	/*
834 	 * Figure out the reason(s) for the interrupt.
835 	 */
836 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
837 	if (status == 0)			/* must be a shared irq */
838 		return;
839 
840 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
841 	    __func__, status, sc->sc_imask);
842 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
843 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
844 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
845 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
846 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
847 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
848 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
849 		mwl_hal_cmddone(mh);
850 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
851 		;
852 	}
853 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
854 		/* TKIP ICV error */
855 		sc->sc_stats.mst_rx_badtkipicv++;
856 	}
857 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
858 		/* 11n aggregation queue is empty, re-fill */
859 		;
860 	}
861 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
862 		;
863 	}
864 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
865 		/* radar detected, process event */
866 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
867 	}
868 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
869 		/* DFS channel switch */
870 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
871 	}
872 }
873 
874 static void
875 mwl_radar_proc(void *arg, int pending)
876 {
877 	struct mwl_softc *sc = arg;
878 	struct ifnet *ifp = sc->sc_ifp;
879 	struct ieee80211com *ic = ifp->if_l2com;
880 
881 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
882 	    __func__, pending);
883 
884 	sc->sc_stats.mst_radardetect++;
885 	/* XXX stop h/w BA streams? */
886 
887 	IEEE80211_LOCK(ic);
888 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
889 	IEEE80211_UNLOCK(ic);
890 }
891 
892 static void
893 mwl_chanswitch_proc(void *arg, int pending)
894 {
895 	struct mwl_softc *sc = arg;
896 	struct ifnet *ifp = sc->sc_ifp;
897 	struct ieee80211com *ic = ifp->if_l2com;
898 
899 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
900 	    __func__, pending);
901 
902 	IEEE80211_LOCK(ic);
903 	sc->sc_csapending = 0;
904 	ieee80211_csa_completeswitch(ic);
905 	IEEE80211_UNLOCK(ic);
906 }
907 
908 static void
909 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
910 {
911 	struct ieee80211_node *ni = sp->data[0];
912 
913 	/* send DELBA and drop the stream */
914 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
915 }
916 
917 static void
918 mwl_bawatchdog_proc(void *arg, int pending)
919 {
920 	struct mwl_softc *sc = arg;
921 	struct mwl_hal *mh = sc->sc_mh;
922 	const MWL_HAL_BASTREAM *sp;
923 	uint8_t bitmap, n;
924 
925 	sc->sc_stats.mst_bawatchdog++;
926 
927 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
928 		DPRINTF(sc, MWL_DEBUG_AMPDU,
929 		    "%s: could not get bitmap\n", __func__);
930 		sc->sc_stats.mst_bawatchdog_failed++;
931 		return;
932 	}
933 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
934 	if (bitmap == 0xff) {
935 		n = 0;
936 		/* disable all ba streams */
937 		for (bitmap = 0; bitmap < 8; bitmap++) {
938 			sp = mwl_hal_bastream_lookup(mh, bitmap);
939 			if (sp != NULL) {
940 				mwl_bawatchdog(sp);
941 				n++;
942 			}
943 		}
944 		if (n == 0) {
945 			DPRINTF(sc, MWL_DEBUG_AMPDU,
946 			    "%s: no BA streams found\n", __func__);
947 			sc->sc_stats.mst_bawatchdog_empty++;
948 		}
949 	} else if (bitmap != 0xaa) {
950 		/* disable a single ba stream */
951 		sp = mwl_hal_bastream_lookup(mh, bitmap);
952 		if (sp != NULL) {
953 			mwl_bawatchdog(sp);
954 		} else {
955 			DPRINTF(sc, MWL_DEBUG_AMPDU,
956 			    "%s: no BA stream %d\n", __func__, bitmap);
957 			sc->sc_stats.mst_bawatchdog_notfound++;
958 		}
959 	}
960 }
961 
962 /*
963  * Convert net80211 channel to a HAL channel.
964  */
965 static void
966 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
967 {
968 	hc->channel = chan->ic_ieee;
969 
970 	*(uint32_t *)&hc->channelFlags = 0;
971 	if (IEEE80211_IS_CHAN_2GHZ(chan))
972 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
973 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
974 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
975 	if (IEEE80211_IS_CHAN_HT40(chan)) {
976 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
977 		if (IEEE80211_IS_CHAN_HT40U(chan))
978 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
979 		else
980 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
981 	} else
982 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
983 	/* XXX 10MHz channels */
984 }
985 
986 /*
987  * Inform firmware of our tx/rx dma setup.  The BAR 0
988  * writes below are for compatibility with older firmware.
989  * For current firmware we send this information with a
990  * cmd block via mwl_hal_sethwdma.
991  */
992 static int
993 mwl_setupdma(struct mwl_softc *sc)
994 {
995 	int error, i;
996 
997 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
998 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
999 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1000 
1001 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1002 		struct mwl_txq *txq = &sc->sc_txq[i];
1003 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1004 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1005 	}
1006 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1007 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1008 
1009 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1010 	if (error != 0) {
1011 		device_printf(sc->sc_dev,
1012 		    "unable to setup tx/rx dma; hal status %u\n", error);
1013 		/* XXX */
1014 	}
1015 	return error;
1016 }
1017 
1018 /*
1019  * Inform firmware of tx rate parameters.
1020  * Called after a channel change.
1021  */
1022 static int
1023 mwl_setcurchanrates(struct mwl_softc *sc)
1024 {
1025 	struct ifnet *ifp = sc->sc_ifp;
1026 	struct ieee80211com *ic = ifp->if_l2com;
1027 	const struct ieee80211_rateset *rs;
1028 	MWL_HAL_TXRATE rates;
1029 
1030 	memset(&rates, 0, sizeof(rates));
1031 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1032 	/* rate used to send management frames */
1033 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1034 	/* rate used to send multicast frames */
1035 	rates.McastRate = rates.MgtRate;
1036 
1037 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1038 }
1039 
1040 /*
1041  * Inform firmware of tx rate parameters.  Called whenever
1042  * user-settable params change and after a channel change.
1043  */
1044 static int
1045 mwl_setrates(struct ieee80211vap *vap)
1046 {
1047 	struct mwl_vap *mvp = MWL_VAP(vap);
1048 	struct ieee80211_node *ni = vap->iv_bss;
1049 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1050 	MWL_HAL_TXRATE rates;
1051 
1052 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1053 
1054 	/*
1055 	 * Update the h/w rate map.
1056 	 * NB: 0x80 for MCS is passed through unchanged
1057 	 */
1058 	memset(&rates, 0, sizeof(rates));
1059 	/* rate used to send management frames */
1060 	rates.MgtRate = tp->mgmtrate;
1061 	/* rate used to send multicast frames */
1062 	rates.McastRate = tp->mcastrate;
1063 
1064 	/* while here calculate EAPOL fixed rate cookie */
1065 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1066 
1067 	return mwl_hal_settxrate(mvp->mv_hvap,
1068 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1069 		RATE_FIXED : RATE_AUTO, &rates);
1070 }
1071 
1072 /*
1073  * Setup a fixed xmit rate cookie for EAPOL frames.
1074  */
1075 static void
1076 mwl_seteapolformat(struct ieee80211vap *vap)
1077 {
1078 	struct mwl_vap *mvp = MWL_VAP(vap);
1079 	struct ieee80211_node *ni = vap->iv_bss;
1080 	enum ieee80211_phymode mode;
1081 	uint8_t rate;
1082 
1083 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1084 
1085 	mode = ieee80211_chan2mode(ni->ni_chan);
1086 	/*
1087 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1088 	 * NB: this may violate POLA for sta and wds vap's.
1089 	 */
1090 	if (mode == IEEE80211_MODE_11NA &&
1091 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1092 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1093 	else if (mode == IEEE80211_MODE_11NG &&
1094 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1095 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1096 	else
1097 		rate = vap->iv_txparms[mode].mgmtrate;
1098 
1099 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1100 }
1101 
1102 /*
1103  * Map SKU+country code to region code for radar bin'ing.
1104  */
1105 static int
1106 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1107 {
1108 	switch (rd->regdomain) {
1109 	case SKU_FCC:
1110 	case SKU_FCC3:
1111 		return DOMAIN_CODE_FCC;
1112 	case SKU_CA:
1113 		return DOMAIN_CODE_IC;
1114 	case SKU_ETSI:
1115 	case SKU_ETSI2:
1116 	case SKU_ETSI3:
1117 		if (rd->country == CTRY_SPAIN)
1118 			return DOMAIN_CODE_SPAIN;
1119 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1120 			return DOMAIN_CODE_FRANCE;
1121 		/* XXX force 1.3.1 radar type */
1122 		return DOMAIN_CODE_ETSI_131;
1123 	case SKU_JAPAN:
1124 		return DOMAIN_CODE_MKK;
1125 	case SKU_ROW:
1126 		return DOMAIN_CODE_DGT;	/* Taiwan */
1127 	case SKU_APAC:
1128 	case SKU_APAC2:
1129 	case SKU_APAC3:
1130 		return DOMAIN_CODE_AUS;	/* Australia */
1131 	}
1132 	/* XXX KOREA? */
1133 	return DOMAIN_CODE_FCC;			/* XXX? */
1134 }
1135 
1136 static int
1137 mwl_hal_reset(struct mwl_softc *sc)
1138 {
1139 	struct ifnet *ifp = sc->sc_ifp;
1140 	struct ieee80211com *ic = ifp->if_l2com;
1141 	struct mwl_hal *mh = sc->sc_mh;
1142 
1143 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1144 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1145 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1146 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1147 	mwl_chan_set(sc, ic->ic_curchan);
1148 	/* NB: RF/RA performance tuned for indoor mode */
1149 	mwl_hal_setrateadaptmode(mh, 0);
1150 	mwl_hal_setoptimizationlevel(mh,
1151 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1152 
1153 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1154 
1155 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1156 	mwl_hal_setcfend(mh, 0);			/* XXX */
1157 
1158 	return 1;
1159 }
1160 
1161 static int
1162 mwl_init_locked(struct mwl_softc *sc)
1163 {
1164 	struct ifnet *ifp = sc->sc_ifp;
1165 	struct mwl_hal *mh = sc->sc_mh;
1166 	int error = 0;
1167 
1168 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1169 		__func__, ifp->if_flags);
1170 
1171 	MWL_LOCK_ASSERT(sc);
1172 
1173 	/*
1174 	 * Stop anything previously setup.  This is safe
1175 	 * whether this is the first time through or not.
1176 	 */
1177 	mwl_stop_locked(ifp, 0);
1178 
1179 	/*
1180 	 * Push vap-independent state to the firmware.
1181 	 */
1182 	if (!mwl_hal_reset(sc)) {
1183 		if_printf(ifp, "unable to reset hardware\n");
1184 		return EIO;
1185 	}
1186 
1187 	/*
1188 	 * Setup recv (once); transmit is already good to go.
1189 	 */
1190 	error = mwl_startrecv(sc);
1191 	if (error != 0) {
1192 		if_printf(ifp, "unable to start recv logic\n");
1193 		return error;
1194 	}
1195 
1196 	/*
1197 	 * Enable interrupts.
1198 	 */
1199 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1200 		     | MACREG_A2HRIC_BIT_TX_DONE
1201 		     | MACREG_A2HRIC_BIT_OPC_DONE
1202 #if 0
1203 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1204 #endif
1205 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1206 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1207 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1208 #if 0
1209 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1210 #endif
1211 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1212 		     | MACREQ_A2HRIC_BIT_TX_ACK
1213 		     ;
1214 
1215 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1216 	mwl_hal_intrset(mh, sc->sc_imask);
1217 
1218 	return 0;
1219 }
1220 
1221 static void
1222 mwl_init(void *arg)
1223 {
1224 	struct mwl_softc *sc = arg;
1225 	struct ifnet *ifp = sc->sc_ifp;
1226 	struct ieee80211com *ic = ifp->if_l2com;
1227 	int error = 0;
1228 
1229 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1230 		__func__, ifp->if_flags);
1231 
1232 	MWL_LOCK(sc);
1233 	error = mwl_init_locked(sc);
1234 	MWL_UNLOCK(sc);
1235 
1236 	if (error == 0)
1237 		ieee80211_start_all(ic);	/* start all vap's */
1238 }
1239 
1240 static void
1241 mwl_stop_locked(struct ifnet *ifp, int disable)
1242 {
1243 	struct mwl_softc *sc = ifp->if_softc;
1244 
1245 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1246 		__func__, sc->sc_invalid, ifp->if_flags);
1247 
1248 	MWL_LOCK_ASSERT(sc);
1249 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1250 		/*
1251 		 * Shutdown the hardware and driver.
1252 		 */
1253 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1254 		ifp->if_timer = 0;
1255 		mwl_draintxq(sc);
1256 	}
1257 }
1258 
1259 static void
1260 mwl_stop(struct ifnet *ifp, int disable)
1261 {
1262 	struct mwl_softc *sc = ifp->if_softc;
1263 
1264 	MWL_LOCK(sc);
1265 	mwl_stop_locked(ifp, disable);
1266 	MWL_UNLOCK(sc);
1267 }
1268 
1269 static int
1270 mwl_reset_vap(struct ieee80211vap *vap, int state)
1271 {
1272 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1273 	struct ieee80211com *ic = vap->iv_ic;
1274 
1275 	if (state == IEEE80211_S_RUN)
1276 		mwl_setrates(vap);
1277 	/* XXX off by 1? */
1278 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1279 	/* XXX auto? 20/40 split? */
1280 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1281 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1282 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1283 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1284 	/* XXX txpower cap */
1285 
1286 	/* re-setup beacons */
1287 	if (state == IEEE80211_S_RUN &&
1288 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1289 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1290 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1291 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1292 		mwl_hal_setnprotmode(hvap,
1293 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1294 		return mwl_beacon_setup(vap);
1295 	}
1296 	return 0;
1297 }
1298 
1299 /*
1300  * Reset the hardware w/o losing operational state.
1301  * Used to to reset or reload hardware state for a vap.
1302  */
1303 static int
1304 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1305 {
1306 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1307 	int error = 0;
1308 
1309 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1310 		struct ieee80211com *ic = vap->iv_ic;
1311 		struct ifnet *ifp = ic->ic_ifp;
1312 		struct mwl_softc *sc = ifp->if_softc;
1313 		struct mwl_hal *mh = sc->sc_mh;
1314 
1315 		/* XXX handle DWDS sta vap change */
1316 		/* XXX do we need to disable interrupts? */
1317 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1318 		error = mwl_reset_vap(vap, vap->iv_state);
1319 		mwl_hal_intrset(mh, sc->sc_imask);
1320 	}
1321 	return error;
1322 }
1323 
1324 /*
1325  * Allocate a tx buffer for sending a frame.  The
1326  * packet is assumed to have the WME AC stored so
1327  * we can use it to select the appropriate h/w queue.
1328  */
1329 static struct mwl_txbuf *
1330 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1331 {
1332 	struct mwl_txbuf *bf;
1333 
1334 	/*
1335 	 * Grab a TX buffer and associated resources.
1336 	 */
1337 	MWL_TXQ_LOCK(txq);
1338 	bf = STAILQ_FIRST(&txq->free);
1339 	if (bf != NULL) {
1340 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1341 		txq->nfree--;
1342 	}
1343 	MWL_TXQ_UNLOCK(txq);
1344 	if (bf == NULL)
1345 		DPRINTF(sc, MWL_DEBUG_XMIT,
1346 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1347 	return bf;
1348 }
1349 
1350 /*
1351  * Return a tx buffer to the queue it came from.  Note there
1352  * are two cases because we must preserve the order of buffers
1353  * as it reflects the fixed order of descriptors in memory
1354  * (the firmware pre-fetches descriptors so we cannot reorder).
1355  */
1356 static void
1357 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1358 {
1359 	bf->bf_m = NULL;
1360 	bf->bf_node = NULL;
1361 	MWL_TXQ_LOCK(txq);
1362 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1363 	txq->nfree++;
1364 	MWL_TXQ_UNLOCK(txq);
1365 }
1366 
1367 static void
1368 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1369 {
1370 	bf->bf_m = NULL;
1371 	bf->bf_node = NULL;
1372 	MWL_TXQ_LOCK(txq);
1373 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1374 	txq->nfree++;
1375 	MWL_TXQ_UNLOCK(txq);
1376 }
1377 
1378 static void
1379 mwl_start(struct ifnet *ifp)
1380 {
1381 	struct mwl_softc *sc = ifp->if_softc;
1382 	struct ieee80211_node *ni;
1383 	struct mwl_txbuf *bf;
1384 	struct mbuf *m;
1385 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1386 	int nqueued;
1387 
1388 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1389 		return;
1390 	nqueued = 0;
1391 	for (;;) {
1392 		bf = NULL;
1393 		IFQ_DEQUEUE(&ifp->if_snd, m);
1394 		if (m == NULL)
1395 			break;
1396 		/*
1397 		 * Grab the node for the destination.
1398 		 */
1399 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1400 		KASSERT(ni != NULL, ("no node"));
1401 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1402 		/*
1403 		 * Grab a TX buffer and associated resources.
1404 		 * We honor the classification by the 802.11 layer.
1405 		 */
1406 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1407 		bf = mwl_gettxbuf(sc, txq);
1408 		if (bf == NULL) {
1409 			m_freem(m);
1410 			ieee80211_free_node(ni);
1411 #ifdef MWL_TX_NODROP
1412 			sc->sc_stats.mst_tx_qstop++;
1413 			/* XXX blocks other traffic */
1414 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1415 			break;
1416 #else
1417 			DPRINTF(sc, MWL_DEBUG_XMIT,
1418 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1419 			sc->sc_stats.mst_tx_qdrop++;
1420 			continue;
1421 #endif /* MWL_TX_NODROP */
1422 		}
1423 
1424 		/*
1425 		 * Pass the frame to the h/w for transmission.
1426 		 */
1427 		if (mwl_tx_start(sc, ni, bf, m)) {
1428 			ifp->if_oerrors++;
1429 			mwl_puttxbuf_head(txq, bf);
1430 			ieee80211_free_node(ni);
1431 			continue;
1432 		}
1433 		nqueued++;
1434 		if (nqueued >= mwl_txcoalesce) {
1435 			/*
1436 			 * Poke the firmware to process queued frames;
1437 			 * see below about (lack of) locking.
1438 			 */
1439 			nqueued = 0;
1440 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1441 		}
1442 	}
1443 	if (nqueued) {
1444 		/*
1445 		 * NB: We don't need to lock against tx done because
1446 		 * this just prods the firmware to check the transmit
1447 		 * descriptors.  The firmware will also start fetching
1448 		 * descriptors by itself if it notices new ones are
1449 		 * present when it goes to deliver a tx done interrupt
1450 		 * to the host. So if we race with tx done processing
1451 		 * it's ok.  Delivering the kick here rather than in
1452 		 * mwl_tx_start is an optimization to avoid poking the
1453 		 * firmware for each packet.
1454 		 *
1455 		 * NB: the queue id isn't used so 0 is ok.
1456 		 */
1457 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1458 	}
1459 }
1460 
1461 static int
1462 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1463 	const struct ieee80211_bpf_params *params)
1464 {
1465 	struct ieee80211com *ic = ni->ni_ic;
1466 	struct ifnet *ifp = ic->ic_ifp;
1467 	struct mwl_softc *sc = ifp->if_softc;
1468 	struct mwl_txbuf *bf;
1469 	struct mwl_txq *txq;
1470 
1471 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1472 		ieee80211_free_node(ni);
1473 		m_freem(m);
1474 		return ENETDOWN;
1475 	}
1476 	/*
1477 	 * Grab a TX buffer and associated resources.
1478 	 * Note that we depend on the classification
1479 	 * by the 802.11 layer to get to the right h/w
1480 	 * queue.  Management frames must ALWAYS go on
1481 	 * queue 1 but we cannot just force that here
1482 	 * because we may receive non-mgt frames.
1483 	 */
1484 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1485 	bf = mwl_gettxbuf(sc, txq);
1486 	if (bf == NULL) {
1487 		sc->sc_stats.mst_tx_qstop++;
1488 		/* XXX blocks other traffic */
1489 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1490 		ieee80211_free_node(ni);
1491 		m_freem(m);
1492 		return ENOBUFS;
1493 	}
1494 	/*
1495 	 * Pass the frame to the h/w for transmission.
1496 	 */
1497 	if (mwl_tx_start(sc, ni, bf, m)) {
1498 		ifp->if_oerrors++;
1499 		mwl_puttxbuf_head(txq, bf);
1500 
1501 		ieee80211_free_node(ni);
1502 		return EIO;		/* XXX */
1503 	}
1504 	/*
1505 	 * NB: We don't need to lock against tx done because
1506 	 * this just prods the firmware to check the transmit
1507 	 * descriptors.  The firmware will also start fetching
1508 	 * descriptors by itself if it notices new ones are
1509 	 * present when it goes to deliver a tx done interrupt
1510 	 * to the host. So if we race with tx done processing
1511 	 * it's ok.  Delivering the kick here rather than in
1512 	 * mwl_tx_start is an optimization to avoid poking the
1513 	 * firmware for each packet.
1514 	 *
1515 	 * NB: the queue id isn't used so 0 is ok.
1516 	 */
1517 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1518 	return 0;
1519 }
1520 
1521 static int
1522 mwl_media_change(struct ifnet *ifp)
1523 {
1524 	struct ieee80211vap *vap = ifp->if_softc;
1525 	int error;
1526 
1527 	error = ieee80211_media_change(ifp);
1528 	/* NB: only the fixed rate can change and that doesn't need a reset */
1529 	if (error == ENETRESET) {
1530 		mwl_setrates(vap);
1531 		error = 0;
1532 	}
1533 	return error;
1534 }
1535 
1536 #ifdef MWL_DEBUG
1537 static void
1538 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1539 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1540 {
1541 	static const char *ciphers[] = {
1542 		"WEP",
1543 		"TKIP",
1544 		"AES-CCM",
1545 	};
1546 	int i, n;
1547 
1548 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1549 	for (i = 0, n = hk->keyLen; i < n; i++)
1550 		printf(" %02x", hk->key.aes[i]);
1551 	printf(" mac %s", ether_sprintf(mac));
1552 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1553 		printf(" %s", "rxmic");
1554 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1555 			printf(" %02x", hk->key.tkip.rxMic[i]);
1556 		printf(" txmic");
1557 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1558 			printf(" %02x", hk->key.tkip.txMic[i]);
1559 	}
1560 	printf(" flags 0x%x\n", hk->keyFlags);
1561 }
1562 #endif
1563 
1564 /*
1565  * Allocate a key cache slot for a unicast key.  The
1566  * firmware handles key allocation and every station is
1567  * guaranteed key space so we are always successful.
1568  */
1569 static int
1570 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1571 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1572 {
1573 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1574 
1575 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1576 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1577 		if (!(&vap->iv_nw_keys[0] <= k &&
1578 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1579 			/* should not happen */
1580 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1581 				"%s: bogus group key\n", __func__);
1582 			return 0;
1583 		}
1584 		/* give the caller what they requested */
1585 		*keyix = *rxkeyix = k - vap->iv_nw_keys;
1586 	} else {
1587 		/*
1588 		 * Firmware handles key allocation.
1589 		 */
1590 		*keyix = *rxkeyix = 0;
1591 	}
1592 	return 1;
1593 }
1594 
1595 /*
1596  * Delete a key entry allocated by mwl_key_alloc.
1597  */
1598 static int
1599 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1600 {
1601 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1602 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1603 	MWL_HAL_KEYVAL hk;
1604 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1605 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1606 
1607 	if (hvap == NULL) {
1608 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1609 			/* XXX monitor mode? */
1610 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1611 			    "%s: no hvap for opmode %d\n", __func__,
1612 			    vap->iv_opmode);
1613 			return 0;
1614 		}
1615 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1616 	}
1617 
1618 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1619 	    __func__, k->wk_keyix);
1620 
1621 	memset(&hk, 0, sizeof(hk));
1622 	hk.keyIndex = k->wk_keyix;
1623 	switch (k->wk_cipher->ic_cipher) {
1624 	case IEEE80211_CIPHER_WEP:
1625 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1626 		break;
1627 	case IEEE80211_CIPHER_TKIP:
1628 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1629 		break;
1630 	case IEEE80211_CIPHER_AES_CCM:
1631 		hk.keyTypeId = KEY_TYPE_ID_AES;
1632 		break;
1633 	default:
1634 		/* XXX should not happen */
1635 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1636 		    __func__, k->wk_cipher->ic_cipher);
1637 		return 0;
1638 	}
1639 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1640 }
1641 
1642 static __inline int
1643 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1644 {
1645 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1646 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1647 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1648 		if (k->wk_flags & IEEE80211_KEY_RECV)
1649 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1650 		return 1;
1651 	} else
1652 		return 0;
1653 }
1654 
1655 /*
1656  * Set the key cache contents for the specified key.  Key cache
1657  * slot(s) must already have been allocated by mwl_key_alloc.
1658  */
1659 static int
1660 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1661 	const uint8_t mac[IEEE80211_ADDR_LEN])
1662 {
1663 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1664 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1665 #define	IEEE80211_IS_STATICKEY(k) \
1666 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1667 	 (GRPXMIT|IEEE80211_KEY_RECV))
1668 	struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1669 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1670 	const struct ieee80211_cipher *cip = k->wk_cipher;
1671 	const uint8_t *macaddr;
1672 	MWL_HAL_KEYVAL hk;
1673 
1674 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1675 		("s/w crypto set?"));
1676 
1677 	if (hvap == NULL) {
1678 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1679 			/* XXX monitor mode? */
1680 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1681 			    "%s: no hvap for opmode %d\n", __func__,
1682 			    vap->iv_opmode);
1683 			return 0;
1684 		}
1685 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1686 	}
1687 	memset(&hk, 0, sizeof(hk));
1688 	hk.keyIndex = k->wk_keyix;
1689 	switch (cip->ic_cipher) {
1690 	case IEEE80211_CIPHER_WEP:
1691 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1692 		hk.keyLen = k->wk_keylen;
1693 		if (k->wk_keyix == vap->iv_def_txkey)
1694 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1695 		if (!IEEE80211_IS_STATICKEY(k)) {
1696 			/* NB: WEP is never used for the PTK */
1697 			(void) addgroupflags(&hk, k);
1698 		}
1699 		break;
1700 	case IEEE80211_CIPHER_TKIP:
1701 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1702 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1703 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1704 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1705 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1706 		if (!addgroupflags(&hk, k))
1707 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1708 		break;
1709 	case IEEE80211_CIPHER_AES_CCM:
1710 		hk.keyTypeId = KEY_TYPE_ID_AES;
1711 		hk.keyLen = k->wk_keylen;
1712 		if (!addgroupflags(&hk, k))
1713 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1714 		break;
1715 	default:
1716 		/* XXX should not happen */
1717 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1718 		    __func__, k->wk_cipher->ic_cipher);
1719 		return 0;
1720 	}
1721 	/*
1722 	 * NB: tkip mic keys get copied here too; the layout
1723 	 *     just happens to match that in ieee80211_key.
1724 	 */
1725 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1726 
1727 	/*
1728 	 * Locate address of sta db entry for writing key;
1729 	 * the convention unfortunately is somewhat different
1730 	 * than how net80211, hostapd, and wpa_supplicant think.
1731 	 */
1732 	if (vap->iv_opmode == IEEE80211_M_STA) {
1733 		/*
1734 		 * NB: keys plumbed before the sta reaches AUTH state
1735 		 * will be discarded or written to the wrong sta db
1736 		 * entry because iv_bss is meaningless.  This is ok
1737 		 * (right now) because we handle deferred plumbing of
1738 		 * WEP keys when the sta reaches AUTH state.
1739 		 */
1740 		macaddr = vap->iv_bss->ni_bssid;
1741 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1742 	    vap->iv_state != IEEE80211_S_RUN) {
1743 		/*
1744 		 * Prior to RUN state a WDS vap will not it's BSS node
1745 		 * setup so we will plumb the key to the wrong mac
1746 		 * address (it'll be our local address).  Workaround
1747 		 * this for the moment by grabbing the correct address.
1748 		 */
1749 		macaddr = vap->iv_des_bssid;
1750 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1751 		macaddr = vap->iv_myaddr;
1752 	else
1753 		macaddr = mac;
1754 	KEYPRINTF(sc, &hk, macaddr);
1755 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1756 #undef IEEE80211_IS_STATICKEY
1757 #undef GRPXMIT
1758 }
1759 
1760 /* unaligned little endian access */
1761 #define LE_READ_2(p)				\
1762 	((uint16_t)				\
1763 	 ((((const uint8_t *)(p))[0]      ) |	\
1764 	  (((const uint8_t *)(p))[1] <<  8)))
1765 #define LE_READ_4(p)				\
1766 	((uint32_t)				\
1767 	 ((((const uint8_t *)(p))[0]      ) |	\
1768 	  (((const uint8_t *)(p))[1] <<  8) |	\
1769 	  (((const uint8_t *)(p))[2] << 16) |	\
1770 	  (((const uint8_t *)(p))[3] << 24)))
1771 
1772 /*
1773  * Set the multicast filter contents into the hardware.
1774  * XXX f/w has no support; just defer to the os.
1775  */
1776 static void
1777 mwl_setmcastfilter(struct mwl_softc *sc)
1778 {
1779 	struct ifnet *ifp = sc->sc_ifp;
1780 #if 0
1781 	struct ether_multi *enm;
1782 	struct ether_multistep estep;
1783 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1784 	uint8_t *mp;
1785 	int nmc;
1786 
1787 	mp = macs;
1788 	nmc = 0;
1789 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1790 	while (enm != NULL) {
1791 		/* XXX Punt on ranges. */
1792 		if (nmc == MWL_HAL_MCAST_MAX ||
1793 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1794 			ifp->if_flags |= IFF_ALLMULTI;
1795 			return;
1796 		}
1797 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1798 		mp += IEEE80211_ADDR_LEN, nmc++;
1799 		ETHER_NEXT_MULTI(estep, enm);
1800 	}
1801 	ifp->if_flags &= ~IFF_ALLMULTI;
1802 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1803 #else
1804 	/* XXX no mcast filter support; we get everything */
1805 	ifp->if_flags |= IFF_ALLMULTI;
1806 #endif
1807 }
1808 
1809 static int
1810 mwl_mode_init(struct mwl_softc *sc)
1811 {
1812 	struct ifnet *ifp = sc->sc_ifp;
1813 	struct ieee80211com *ic = ifp->if_l2com;
1814 	struct mwl_hal *mh = sc->sc_mh;
1815 
1816 	/*
1817 	 * NB: Ignore promisc in hostap mode; it's set by the
1818 	 * bridge.  This is wrong but we have no way to
1819 	 * identify internal requests (from the bridge)
1820 	 * versus external requests such as for tcpdump.
1821 	 */
1822 	mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1823 	    ic->ic_opmode != IEEE80211_M_HOSTAP);
1824 	mwl_setmcastfilter(sc);
1825 
1826 	return 0;
1827 }
1828 
1829 /*
1830  * Callback from the 802.11 layer after a multicast state change.
1831  */
1832 static void
1833 mwl_update_mcast(struct ifnet *ifp)
1834 {
1835 	struct mwl_softc *sc = ifp->if_softc;
1836 
1837 	mwl_setmcastfilter(sc);
1838 }
1839 
1840 /*
1841  * Callback from the 802.11 layer after a promiscuous mode change.
1842  * Note this interface does not check the operating mode as this
1843  * is an internal callback and we are expected to honor the current
1844  * state (e.g. this is used for setting the interface in promiscuous
1845  * mode when operating in hostap mode to do ACS).
1846  */
1847 static void
1848 mwl_update_promisc(struct ifnet *ifp)
1849 {
1850 	struct mwl_softc *sc = ifp->if_softc;
1851 
1852 	mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1853 }
1854 
1855 /*
1856  * Callback from the 802.11 layer to update the slot time
1857  * based on the current setting.  We use it to notify the
1858  * firmware of ERP changes and the f/w takes care of things
1859  * like slot time and preamble.
1860  */
1861 static void
1862 mwl_updateslot(struct ifnet *ifp)
1863 {
1864 	struct mwl_softc *sc = ifp->if_softc;
1865 	struct ieee80211com *ic = ifp->if_l2com;
1866 	struct mwl_hal *mh = sc->sc_mh;
1867 	int prot;
1868 
1869 	/* NB: can be called early; suppress needless cmds */
1870 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1871 		return;
1872 
1873 	/*
1874 	 * Calculate the ERP flags.  The firwmare will use
1875 	 * this to carry out the appropriate measures.
1876 	 */
1877 	prot = 0;
1878 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1879 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1880 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1881 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1882 			prot |= IEEE80211_ERP_USE_PROTECTION;
1883 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1884 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1885 	}
1886 
1887 	DPRINTF(sc, MWL_DEBUG_RESET,
1888 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1889 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1890 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1891 	    ic->ic_flags);
1892 
1893 	mwl_hal_setgprot(mh, prot);
1894 }
1895 
1896 /*
1897  * Setup the beacon frame.
1898  */
1899 static int
1900 mwl_beacon_setup(struct ieee80211vap *vap)
1901 {
1902 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1903 	struct ieee80211_node *ni = vap->iv_bss;
1904 	struct ieee80211_beacon_offsets bo;
1905 	struct mbuf *m;
1906 
1907 	m = ieee80211_beacon_alloc(ni, &bo);
1908 	if (m == NULL)
1909 		return ENOBUFS;
1910 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1911 	m_free(m);
1912 
1913 	return 0;
1914 }
1915 
1916 /*
1917  * Update the beacon frame in response to a change.
1918  */
1919 static void
1920 mwl_beacon_update(struct ieee80211vap *vap, int item)
1921 {
1922 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1923 	struct ieee80211com *ic = vap->iv_ic;
1924 
1925 	KASSERT(hvap != NULL, ("no beacon"));
1926 	switch (item) {
1927 	case IEEE80211_BEACON_ERP:
1928 		mwl_updateslot(ic->ic_ifp);
1929 		break;
1930 	case IEEE80211_BEACON_HTINFO:
1931 		mwl_hal_setnprotmode(hvap,
1932 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1933 		break;
1934 	case IEEE80211_BEACON_CAPS:
1935 	case IEEE80211_BEACON_WME:
1936 	case IEEE80211_BEACON_APPIE:
1937 	case IEEE80211_BEACON_CSA:
1938 		break;
1939 	case IEEE80211_BEACON_TIM:
1940 		/* NB: firmware always forms TIM */
1941 		return;
1942 	}
1943 	/* XXX retain beacon frame and update */
1944 	mwl_beacon_setup(vap);
1945 }
1946 
1947 static void
1948 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1949 {
1950 	bus_addr_t *paddr = (bus_addr_t*) arg;
1951 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1952 	*paddr = segs->ds_addr;
1953 }
1954 
1955 #ifdef MWL_HOST_PS_SUPPORT
1956 /*
1957  * Handle power save station occupancy changes.
1958  */
1959 static void
1960 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1961 {
1962 	struct mwl_vap *mvp = MWL_VAP(vap);
1963 
1964 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1965 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1966 	mvp->mv_last_ps_sta = nsta;
1967 }
1968 
1969 /*
1970  * Handle associated station power save state changes.
1971  */
1972 static int
1973 mwl_set_tim(struct ieee80211_node *ni, int set)
1974 {
1975 	struct ieee80211vap *vap = ni->ni_vap;
1976 	struct mwl_vap *mvp = MWL_VAP(vap);
1977 
1978 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1979 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1980 		    IEEE80211_AID(ni->ni_associd), set);
1981 		return 1;
1982 	} else
1983 		return 0;
1984 }
1985 #endif /* MWL_HOST_PS_SUPPORT */
1986 
1987 static int
1988 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1989 	struct mwl_descdma *dd,
1990 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1991 {
1992 	struct ifnet *ifp = sc->sc_ifp;
1993 	uint8_t *ds;
1994 	int error;
1995 
1996 	DPRINTF(sc, MWL_DEBUG_RESET,
1997 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1998 	    __func__, name, nbuf, (uintmax_t) bufsize,
1999 	    ndesc, (uintmax_t) descsize);
2000 
2001 	dd->dd_name = name;
2002 	dd->dd_desc_len = nbuf * ndesc * descsize;
2003 
2004 	/*
2005 	 * Setup DMA descriptor area.
2006 	 */
2007 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2008 		       PAGE_SIZE, 0,		/* alignment, bounds */
2009 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2010 		       BUS_SPACE_MAXADDR,	/* highaddr */
2011 		       NULL, NULL,		/* filter, filterarg */
2012 		       dd->dd_desc_len,		/* maxsize */
2013 		       1,			/* nsegments */
2014 		       dd->dd_desc_len,		/* maxsegsize */
2015 		       BUS_DMA_ALLOCNOW,	/* flags */
2016 		       NULL,			/* lockfunc */
2017 		       NULL,			/* lockarg */
2018 		       &dd->dd_dmat);
2019 	if (error != 0) {
2020 		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2021 		return error;
2022 	}
2023 
2024 	/* allocate descriptors */
2025 	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2026 	if (error != 0) {
2027 		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2028 			"error %u\n", dd->dd_name, error);
2029 		goto fail0;
2030 	}
2031 
2032 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2033 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2034 				 &dd->dd_dmamap);
2035 	if (error != 0) {
2036 		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2037 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2038 		goto fail1;
2039 	}
2040 
2041 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2042 				dd->dd_desc, dd->dd_desc_len,
2043 				mwl_load_cb, &dd->dd_desc_paddr,
2044 				BUS_DMA_NOWAIT);
2045 	if (error != 0) {
2046 		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2047 			dd->dd_name, error);
2048 		goto fail2;
2049 	}
2050 
2051 	ds = dd->dd_desc;
2052 	memset(ds, 0, dd->dd_desc_len);
2053 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2054 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2055 	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2056 
2057 	return 0;
2058 fail2:
2059 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2060 fail1:
2061 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2062 fail0:
2063 	bus_dma_tag_destroy(dd->dd_dmat);
2064 	memset(dd, 0, sizeof(*dd));
2065 	return error;
2066 #undef DS2PHYS
2067 }
2068 
2069 static void
2070 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2071 {
2072 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2073 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2074 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2075 	bus_dma_tag_destroy(dd->dd_dmat);
2076 
2077 	memset(dd, 0, sizeof(*dd));
2078 }
2079 
2080 /*
2081  * Construct a tx q's free list.  The order of entries on
2082  * the list must reflect the physical layout of tx descriptors
2083  * because the firmware pre-fetches descriptors.
2084  *
2085  * XXX might be better to use indices into the buffer array.
2086  */
2087 static void
2088 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2089 {
2090 	struct mwl_txbuf *bf;
2091 	int i;
2092 
2093 	bf = txq->dma.dd_bufptr;
2094 	STAILQ_INIT(&txq->free);
2095 	for (i = 0; i < mwl_txbuf; i++, bf++)
2096 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2097 	txq->nfree = i;
2098 }
2099 
2100 #define	DS2PHYS(_dd, _ds) \
2101 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2102 
2103 static int
2104 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2105 {
2106 	struct ifnet *ifp = sc->sc_ifp;
2107 	int error, bsize, i;
2108 	struct mwl_txbuf *bf;
2109 	struct mwl_txdesc *ds;
2110 
2111 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2112 			mwl_txbuf, sizeof(struct mwl_txbuf),
2113 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2114 	if (error != 0)
2115 		return error;
2116 
2117 	/* allocate and setup tx buffers */
2118 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2119 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2120 	if (bf == NULL) {
2121 		if_printf(ifp, "malloc of %u tx buffers failed\n",
2122 			mwl_txbuf);
2123 		return ENOMEM;
2124 	}
2125 	txq->dma.dd_bufptr = bf;
2126 
2127 	ds = txq->dma.dd_desc;
2128 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2129 		bf->bf_desc = ds;
2130 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2131 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2132 				&bf->bf_dmamap);
2133 		if (error != 0) {
2134 			if_printf(ifp, "unable to create dmamap for tx "
2135 				"buffer %u, error %u\n", i, error);
2136 			return error;
2137 		}
2138 	}
2139 	mwl_txq_reset(sc, txq);
2140 	return 0;
2141 }
2142 
2143 static void
2144 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2145 {
2146 	struct mwl_txbuf *bf;
2147 	int i;
2148 
2149 	bf = txq->dma.dd_bufptr;
2150 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2151 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2152 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2153 		if (bf->bf_dmamap != NULL)
2154 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2155 	}
2156 	STAILQ_INIT(&txq->free);
2157 	txq->nfree = 0;
2158 	if (txq->dma.dd_bufptr != NULL) {
2159 		free(txq->dma.dd_bufptr, M_MWLDEV);
2160 		txq->dma.dd_bufptr = NULL;
2161 	}
2162 	if (txq->dma.dd_desc_len != 0)
2163 		mwl_desc_cleanup(sc, &txq->dma);
2164 }
2165 
2166 static int
2167 mwl_rxdma_setup(struct mwl_softc *sc)
2168 {
2169 	struct ifnet *ifp = sc->sc_ifp;
2170 	int error, jumbosize, bsize, i;
2171 	struct mwl_rxbuf *bf;
2172 	struct mwl_jumbo *rbuf;
2173 	struct mwl_rxdesc *ds;
2174 	caddr_t data;
2175 
2176 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2177 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2178 			1, sizeof(struct mwl_rxdesc));
2179 	if (error != 0)
2180 		return error;
2181 
2182 	/*
2183 	 * Receive is done to a private pool of jumbo buffers.
2184 	 * This allows us to attach to mbuf's and avoid re-mapping
2185 	 * memory on each rx we post.  We allocate a large chunk
2186 	 * of memory and manage it in the driver.  The mbuf free
2187 	 * callback method is used to reclaim frames after sending
2188 	 * them up the stack.  By default we allocate 2x the number of
2189 	 * rx descriptors configured so we have some slop to hold
2190 	 * us while frames are processed.
2191 	 */
2192 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2193 		if_printf(ifp,
2194 		    "too few rx dma buffers (%d); increasing to %d\n",
2195 		    mwl_rxbuf, 2*mwl_rxdesc);
2196 		mwl_rxbuf = 2*mwl_rxdesc;
2197 	}
2198 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2199 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2200 
2201 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2202 		       PAGE_SIZE, 0,		/* alignment, bounds */
2203 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2204 		       BUS_SPACE_MAXADDR,	/* highaddr */
2205 		       NULL, NULL,		/* filter, filterarg */
2206 		       sc->sc_rxmemsize,	/* maxsize */
2207 		       1,			/* nsegments */
2208 		       sc->sc_rxmemsize,	/* maxsegsize */
2209 		       BUS_DMA_ALLOCNOW,	/* flags */
2210 		       NULL,			/* lockfunc */
2211 		       NULL,			/* lockarg */
2212 		       &sc->sc_rxdmat);
2213 	error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2214 	if (error != 0) {
2215 		if_printf(ifp, "could not create rx DMA map\n");
2216 		return error;
2217 	}
2218 
2219 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2220 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2221 				 &sc->sc_rxmap);
2222 	if (error != 0) {
2223 		if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2224 		    (uintmax_t) sc->sc_rxmemsize);
2225 		return error;
2226 	}
2227 
2228 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2229 				sc->sc_rxmem, sc->sc_rxmemsize,
2230 				mwl_load_cb, &sc->sc_rxmem_paddr,
2231 				BUS_DMA_NOWAIT);
2232 	if (error != 0) {
2233 		if_printf(ifp, "could not load rx DMA map\n");
2234 		return error;
2235 	}
2236 
2237 	/*
2238 	 * Allocate rx buffers and set them up.
2239 	 */
2240 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2241 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2242 	if (bf == NULL) {
2243 		if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2244 		return error;
2245 	}
2246 	sc->sc_rxdma.dd_bufptr = bf;
2247 
2248 	STAILQ_INIT(&sc->sc_rxbuf);
2249 	ds = sc->sc_rxdma.dd_desc;
2250 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2251 		bf->bf_desc = ds;
2252 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2253 		/* pre-assign dma buffer */
2254 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2255 		/* NB: tail is intentional to preserve descriptor order */
2256 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2257 	}
2258 
2259 	/*
2260 	 * Place remainder of dma memory buffers on the free list.
2261 	 */
2262 	SLIST_INIT(&sc->sc_rxfree);
2263 	for (; i < mwl_rxbuf; i++) {
2264 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2265 		rbuf = MWL_JUMBO_DATA2BUF(data);
2266 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2267 		sc->sc_nrxfree++;
2268 	}
2269 	MWL_RXFREE_INIT(sc);
2270 	return 0;
2271 }
2272 #undef DS2PHYS
2273 
2274 static void
2275 mwl_rxdma_cleanup(struct mwl_softc *sc)
2276 {
2277 	if (sc->sc_rxmap != NULL)
2278 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2279 	if (sc->sc_rxmem != NULL) {
2280 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2281 		sc->sc_rxmem = NULL;
2282 	}
2283 	if (sc->sc_rxmap != NULL) {
2284 		bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2285 		sc->sc_rxmap = NULL;
2286 	}
2287 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2288 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2289 		sc->sc_rxdma.dd_bufptr = NULL;
2290 	}
2291 	if (sc->sc_rxdma.dd_desc_len != 0)
2292 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2293 	MWL_RXFREE_DESTROY(sc);
2294 }
2295 
2296 static int
2297 mwl_dma_setup(struct mwl_softc *sc)
2298 {
2299 	int error, i;
2300 
2301 	error = mwl_rxdma_setup(sc);
2302 	if (error != 0)
2303 		return error;
2304 
2305 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2306 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2307 		if (error != 0) {
2308 			mwl_dma_cleanup(sc);
2309 			return error;
2310 		}
2311 	}
2312 	return 0;
2313 }
2314 
2315 static void
2316 mwl_dma_cleanup(struct mwl_softc *sc)
2317 {
2318 	int i;
2319 
2320 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2321 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2322 	mwl_rxdma_cleanup(sc);
2323 }
2324 
2325 static struct ieee80211_node *
2326 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2327 {
2328 	struct ieee80211com *ic = vap->iv_ic;
2329 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
2330 	const size_t space = sizeof(struct mwl_node);
2331 	struct mwl_node *mn;
2332 
2333 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2334 	if (mn == NULL) {
2335 		/* XXX stat+msg */
2336 		return NULL;
2337 	}
2338 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2339 	return &mn->mn_node;
2340 }
2341 
2342 static void
2343 mwl_node_cleanup(struct ieee80211_node *ni)
2344 {
2345 	struct ieee80211com *ic = ni->ni_ic;
2346         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2347 	struct mwl_node *mn = MWL_NODE(ni);
2348 
2349 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2350 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2351 
2352 	if (mn->mn_staid != 0) {
2353 		struct ieee80211vap *vap = ni->ni_vap;
2354 
2355 		if (mn->mn_hvap != NULL) {
2356 			if (vap->iv_opmode == IEEE80211_M_STA)
2357 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2358 			else
2359 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2360 		}
2361 		/*
2362 		 * NB: legacy WDS peer sta db entry is installed using
2363 		 * the associate ap's hvap; use it again to delete it.
2364 		 * XXX can vap be NULL?
2365 		 */
2366 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2367 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2368 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2369 			    ni->ni_macaddr);
2370 		delstaid(sc, mn->mn_staid);
2371 		mn->mn_staid = 0;
2372 	}
2373 	sc->sc_node_cleanup(ni);
2374 }
2375 
2376 /*
2377  * Reclaim rx dma buffers from packets sitting on the ampdu
2378  * reorder queue for a station.  We replace buffers with a
2379  * system cluster (if available).
2380  */
2381 static void
2382 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2383 {
2384 #if 0
2385 	int i, n, off;
2386 	struct mbuf *m;
2387 	void *cl;
2388 
2389 	n = rap->rxa_qframes;
2390 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2391 		m = rap->rxa_m[i];
2392 		if (m == NULL)
2393 			continue;
2394 		n--;
2395 		/* our dma buffers have a well-known free routine */
2396 		if ((m->m_flags & M_EXT) == 0 ||
2397 		    m->m_ext.ext_free != mwl_ext_free)
2398 			continue;
2399 		/*
2400 		 * Try to allocate a cluster and move the data.
2401 		 */
2402 		off = m->m_data - m->m_ext.ext_buf;
2403 		if (off + m->m_pkthdr.len > MCLBYTES) {
2404 			/* XXX no AMSDU for now */
2405 			continue;
2406 		}
2407 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2408 		    &m->m_ext.ext_paddr);
2409 		if (cl != NULL) {
2410 			/*
2411 			 * Copy the existing data to the cluster, remove
2412 			 * the rx dma buffer, and attach the cluster in
2413 			 * its place.  Note we preserve the offset to the
2414 			 * data so frames being bridged can still prepend
2415 			 * their headers without adding another mbuf.
2416 			 */
2417 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2418 			MEXTREMOVE(m);
2419 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2420 			/* setup mbuf like _MCLGET does */
2421 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2422 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2423 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2424 			m->m_data += off;
2425 		}
2426 	}
2427 #endif
2428 }
2429 
2430 /*
2431  * Callback to reclaim resources.  We first let the
2432  * net80211 layer do it's thing, then if we are still
2433  * blocked by a lack of rx dma buffers we walk the ampdu
2434  * reorder q's to reclaim buffers by copying to a system
2435  * cluster.
2436  */
2437 static void
2438 mwl_node_drain(struct ieee80211_node *ni)
2439 {
2440 	struct ieee80211com *ic = ni->ni_ic;
2441         struct mwl_softc *sc = ic->ic_ifp->if_softc;
2442 	struct mwl_node *mn = MWL_NODE(ni);
2443 
2444 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2445 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2446 
2447 	/* NB: call up first to age out ampdu q's */
2448 	sc->sc_node_drain(ni);
2449 
2450 	/* XXX better to not check low water mark? */
2451 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2452 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2453 		uint8_t tid;
2454 		/*
2455 		 * Walk the reorder q and reclaim rx dma buffers by copying
2456 		 * the packet contents into clusters.
2457 		 */
2458 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2459 			struct ieee80211_rx_ampdu *rap;
2460 
2461 			rap = &ni->ni_rx_ampdu[tid];
2462 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2463 				continue;
2464 			if (rap->rxa_qframes)
2465 				mwl_ampdu_rxdma_reclaim(rap);
2466 		}
2467 	}
2468 }
2469 
2470 static void
2471 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2472 {
2473 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2474 #ifdef MWL_ANT_INFO_SUPPORT
2475 #if 0
2476 	/* XXX need to smooth data */
2477 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2478 #else
2479 	*noise = -95;		/* XXX */
2480 #endif
2481 #else
2482 	*noise = -95;		/* XXX */
2483 #endif
2484 }
2485 
2486 /*
2487  * Convert Hardware per-antenna rssi info to common format:
2488  * Let a1, a2, a3 represent the amplitudes per chain
2489  * Let amax represent max[a1, a2, a3]
2490  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2491  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2492  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2493  * maintain some extra precision.
2494  *
2495  * Values are stored in .5 db format capped at 127.
2496  */
2497 static void
2498 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2499 	struct ieee80211_mimo_info *mi)
2500 {
2501 #define	CVT(_dst, _src) do {						\
2502 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2503 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2504 } while (0)
2505 	static const int8_t logdbtbl[32] = {
2506 	       0,   0,  24,  38,  48,  56,  62,  68,
2507 	      72,  76,  80,  83,  86,  89,  92,  94,
2508 	      96,  98, 100, 102, 104, 106, 107, 109,
2509 	     110, 112, 113, 115, 116, 117, 118, 119
2510 	};
2511 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2512 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2513 	uint32_t rssi_max;
2514 
2515 	rssi_max = mn->mn_ai.rssi_a;
2516 	if (mn->mn_ai.rssi_b > rssi_max)
2517 		rssi_max = mn->mn_ai.rssi_b;
2518 	if (mn->mn_ai.rssi_c > rssi_max)
2519 		rssi_max = mn->mn_ai.rssi_c;
2520 
2521 	CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2522 	CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2523 	CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2524 
2525 	mi->noise[0] = mn->mn_ai.nf_a;
2526 	mi->noise[1] = mn->mn_ai.nf_b;
2527 	mi->noise[2] = mn->mn_ai.nf_c;
2528 #undef CVT
2529 }
2530 
2531 static __inline void *
2532 mwl_getrxdma(struct mwl_softc *sc)
2533 {
2534 	struct mwl_jumbo *buf;
2535 	void *data;
2536 
2537 	/*
2538 	 * Allocate from jumbo pool.
2539 	 */
2540 	MWL_RXFREE_LOCK(sc);
2541 	buf = SLIST_FIRST(&sc->sc_rxfree);
2542 	if (buf == NULL) {
2543 		DPRINTF(sc, MWL_DEBUG_ANY,
2544 		    "%s: out of rx dma buffers\n", __func__);
2545 		sc->sc_stats.mst_rx_nodmabuf++;
2546 		data = NULL;
2547 	} else {
2548 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2549 		sc->sc_nrxfree--;
2550 		data = MWL_JUMBO_BUF2DATA(buf);
2551 	}
2552 	MWL_RXFREE_UNLOCK(sc);
2553 	return data;
2554 }
2555 
2556 static __inline void
2557 mwl_putrxdma(struct mwl_softc *sc, void *data)
2558 {
2559 	struct mwl_jumbo *buf;
2560 
2561 	/* XXX bounds check data */
2562 	MWL_RXFREE_LOCK(sc);
2563 	buf = MWL_JUMBO_DATA2BUF(data);
2564 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2565 	sc->sc_nrxfree++;
2566 	MWL_RXFREE_UNLOCK(sc);
2567 }
2568 
2569 static int
2570 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2571 {
2572 	struct mwl_rxdesc *ds;
2573 
2574 	ds = bf->bf_desc;
2575 	if (bf->bf_data == NULL) {
2576 		bf->bf_data = mwl_getrxdma(sc);
2577 		if (bf->bf_data == NULL) {
2578 			/* mark descriptor to be skipped */
2579 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2580 			/* NB: don't need PREREAD */
2581 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2582 			sc->sc_stats.mst_rxbuf_failed++;
2583 			return ENOMEM;
2584 		}
2585 	}
2586 	/*
2587 	 * NB: DMA buffer contents is known to be unmodified
2588 	 *     so there's no need to flush the data cache.
2589 	 */
2590 
2591 	/*
2592 	 * Setup descriptor.
2593 	 */
2594 	ds->QosCtrl = 0;
2595 	ds->RSSI = 0;
2596 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2597 	ds->Channel = 0;
2598 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2599 	ds->SQ2 = 0;
2600 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2601 	/* NB: don't touch pPhysNext, set once */
2602 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2603 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2604 
2605 	return 0;
2606 }
2607 
2608 static void
2609 mwl_ext_free(void *data, void *arg)
2610 {
2611 	struct mwl_softc *sc = arg;
2612 
2613 	/* XXX bounds check data */
2614 	mwl_putrxdma(sc, data);
2615 	/*
2616 	 * If we were previously blocked by a lack of rx dma buffers
2617 	 * check if we now have enough to restart rx interrupt handling.
2618 	 * NB: we know we are called at splvm which is above splnet.
2619 	 */
2620 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2621 		sc->sc_rxblocked = 0;
2622 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2623 	}
2624 }
2625 
2626 struct mwl_frame_bar {
2627 	u_int8_t	i_fc[2];
2628 	u_int8_t	i_dur[2];
2629 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2630 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2631 	/* ctl, seq, FCS */
2632 } __packed;
2633 
2634 /*
2635  * Like ieee80211_anyhdrsize, but handles BAR frames
2636  * specially so the logic below to piece the 802.11
2637  * header together works.
2638  */
2639 static __inline int
2640 mwl_anyhdrsize(const void *data)
2641 {
2642 	const struct ieee80211_frame *wh = data;
2643 
2644 	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2645 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2646 		case IEEE80211_FC0_SUBTYPE_CTS:
2647 		case IEEE80211_FC0_SUBTYPE_ACK:
2648 			return sizeof(struct ieee80211_frame_ack);
2649 		case IEEE80211_FC0_SUBTYPE_BAR:
2650 			return sizeof(struct mwl_frame_bar);
2651 		}
2652 		return sizeof(struct ieee80211_frame_min);
2653 	} else
2654 		return ieee80211_hdrsize(data);
2655 }
2656 
2657 static void
2658 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2659 {
2660 	const struct ieee80211_frame *wh;
2661 	struct ieee80211_node *ni;
2662 
2663 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2664 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2665 	if (ni != NULL) {
2666 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2667 		ieee80211_free_node(ni);
2668 	}
2669 }
2670 
2671 /*
2672  * Convert hardware signal strength to rssi.  The value
2673  * provided by the device has the noise floor added in;
2674  * we need to compensate for this but we don't have that
2675  * so we use a fixed value.
2676  *
2677  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2678  * offset is already set as part of the initial gain.  This
2679  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2680  */
2681 static __inline int
2682 cvtrssi(uint8_t ssi)
2683 {
2684 	int rssi = (int) ssi + 8;
2685 	/* XXX hack guess until we have a real noise floor */
2686 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2687 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2688 }
2689 
2690 static void
2691 mwl_rx_proc(void *arg, int npending)
2692 {
2693 #define	IEEE80211_DIR_DSTODS(wh) \
2694 	((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2695 	struct mwl_softc *sc = arg;
2696 	struct ifnet *ifp = sc->sc_ifp;
2697 	struct ieee80211com *ic = ifp->if_l2com;
2698 	struct mwl_rxbuf *bf;
2699 	struct mwl_rxdesc *ds;
2700 	struct mbuf *m;
2701 	struct ieee80211_qosframe *wh;
2702 	struct ieee80211_qosframe_addr4 *wh4;
2703 	struct ieee80211_node *ni;
2704 	struct mwl_node *mn;
2705 	int off, len, hdrlen, pktlen, rssi, ntodo;
2706 	uint8_t *data, status;
2707 	void *newdata;
2708 	int16_t nf;
2709 
2710 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2711 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2712 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2713 	nf = -96;			/* XXX */
2714 	bf = sc->sc_rxnext;
2715 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2716 		if (bf == NULL)
2717 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2718 		ds = bf->bf_desc;
2719 		data = bf->bf_data;
2720 		if (data == NULL) {
2721 			/*
2722 			 * If data allocation failed previously there
2723 			 * will be no buffer; try again to re-populate it.
2724 			 * Note the firmware will not advance to the next
2725 			 * descriptor with a dma buffer so we must mimic
2726 			 * this or we'll get out of sync.
2727 			 */
2728 			DPRINTF(sc, MWL_DEBUG_ANY,
2729 			    "%s: rx buf w/o dma memory\n", __func__);
2730 			(void) mwl_rxbuf_init(sc, bf);
2731 			sc->sc_stats.mst_rx_dmabufmissing++;
2732 			break;
2733 		}
2734 		MWL_RXDESC_SYNC(sc, ds,
2735 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2736 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2737 			break;
2738 #ifdef MWL_DEBUG
2739 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2740 			mwl_printrxbuf(bf, 0);
2741 #endif
2742 		status = ds->Status;
2743 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2744 			ifp->if_ierrors++;
2745 			sc->sc_stats.mst_rx_crypto++;
2746 			/*
2747 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2748 			 *     for backwards compatibility.
2749 			 */
2750 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2751 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2752 				/*
2753 				 * MIC error, notify upper layers.
2754 				 */
2755 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2756 				    BUS_DMASYNC_POSTREAD);
2757 				mwl_handlemicerror(ic, data);
2758 				sc->sc_stats.mst_rx_tkipmic++;
2759 			}
2760 			/* XXX too painful to tap packets */
2761 			goto rx_next;
2762 		}
2763 		/*
2764 		 * Sync the data buffer.
2765 		 */
2766 		len = le16toh(ds->PktLen);
2767 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2768 		/*
2769 		 * The 802.11 header is provided all or in part at the front;
2770 		 * use it to calculate the true size of the header that we'll
2771 		 * construct below.  We use this to figure out where to copy
2772 		 * payload prior to constructing the header.
2773 		 */
2774 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2775 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2776 
2777 		/* calculate rssi early so we can re-use for each aggregate */
2778 		rssi = cvtrssi(ds->RSSI);
2779 
2780 		pktlen = hdrlen + (len - off);
2781 		/*
2782 		 * NB: we know our frame is at least as large as
2783 		 * IEEE80211_MIN_LEN because there is a 4-address
2784 		 * frame at the front.  Hence there's no need to
2785 		 * vet the packet length.  If the frame in fact
2786 		 * is too small it should be discarded at the
2787 		 * net80211 layer.
2788 		 */
2789 
2790 		/*
2791 		 * Attach dma buffer to an mbuf.  We tried
2792 		 * doing this based on the packet size (i.e.
2793 		 * copying small packets) but it turns out to
2794 		 * be a net loss.  The tradeoff might be system
2795 		 * dependent (cache architecture is important).
2796 		 */
2797 		MGETHDR(m, M_DONTWAIT, MT_DATA);
2798 		if (m == NULL) {
2799 			DPRINTF(sc, MWL_DEBUG_ANY,
2800 			    "%s: no rx mbuf\n", __func__);
2801 			sc->sc_stats.mst_rx_nombuf++;
2802 			goto rx_next;
2803 		}
2804 		/*
2805 		 * Acquire the replacement dma buffer before
2806 		 * processing the frame.  If we're out of dma
2807 		 * buffers we disable rx interrupts and wait
2808 		 * for the free pool to reach mlw_rxdmalow buffers
2809 		 * before starting to do work again.  If the firmware
2810 		 * runs out of descriptors then it will toss frames
2811 		 * which is better than our doing it as that can
2812 		 * starve our processing.  It is also important that
2813 		 * we always process rx'd frames in case they are
2814 		 * A-MPDU as otherwise the host's view of the BA
2815 		 * window may get out of sync with the firmware.
2816 		 */
2817 		newdata = mwl_getrxdma(sc);
2818 		if (newdata == NULL) {
2819 			/* NB: stat+msg in mwl_getrxdma */
2820 			m_free(m);
2821 			/* disable RX interrupt and mark state */
2822 			mwl_hal_intrset(sc->sc_mh,
2823 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2824 			sc->sc_rxblocked = 1;
2825 			ieee80211_drain(ic);
2826 			/* XXX check rxblocked and immediately start again? */
2827 			goto rx_stop;
2828 		}
2829 		bf->bf_data = newdata;
2830 		/*
2831 		 * Attach the dma buffer to the mbuf;
2832 		 * mwl_rxbuf_init will re-setup the rx
2833 		 * descriptor using the replacement dma
2834 		 * buffer we just installed above.
2835 		 */
2836 		MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2837 		    data, sc, 0, EXT_NET_DRV);
2838 		m->m_data += off - hdrlen;
2839 		m->m_pkthdr.len = m->m_len = pktlen;
2840 		m->m_pkthdr.rcvif = ifp;
2841 		/* NB: dma buffer assumed read-only */
2842 
2843 		/*
2844 		 * Piece 802.11 header together.
2845 		 */
2846 		wh = mtod(m, struct ieee80211_qosframe *);
2847 		/* NB: don't need to do this sometimes but ... */
2848 		/* XXX special case so we can memcpy after m_devget? */
2849 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2850 		if (IEEE80211_QOS_HAS_SEQ(wh)) {
2851 			if (IEEE80211_DIR_DSTODS(wh)) {
2852 				wh4 = mtod(m,
2853 				    struct ieee80211_qosframe_addr4*);
2854 				*(uint16_t *)wh4->i_qos = ds->QosCtrl;
2855 			} else {
2856 				*(uint16_t *)wh->i_qos = ds->QosCtrl;
2857 			}
2858 		}
2859 		/*
2860 		 * The f/w strips WEP header but doesn't clear
2861 		 * the WEP bit; mark the packet with M_WEP so
2862 		 * net80211 will treat the data as decrypted.
2863 		 * While here also clear the PWR_MGT bit since
2864 		 * power save is handled by the firmware and
2865 		 * passing this up will potentially cause the
2866 		 * upper layer to put a station in power save
2867 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2868 		 */
2869 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2870 			m->m_flags |= M_WEP;
2871 #ifdef MWL_HOST_PS_SUPPORT
2872 		wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2873 #else
2874 		wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2875 #endif
2876 
2877 		if (ieee80211_radiotap_active(ic)) {
2878 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2879 
2880 			tap->wr_flags = 0;
2881 			tap->wr_rate = ds->Rate;
2882 			tap->wr_antsignal = rssi + nf;
2883 			tap->wr_antnoise = nf;
2884 		}
2885 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2886 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2887 			    len, ds->Rate, rssi);
2888 		}
2889 		ifp->if_ipackets++;
2890 
2891 		/* dispatch */
2892 		ni = ieee80211_find_rxnode(ic,
2893 		    (const struct ieee80211_frame_min *) wh);
2894 		if (ni != NULL) {
2895 			mn = MWL_NODE(ni);
2896 #ifdef MWL_ANT_INFO_SUPPORT
2897 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2898 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2899 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2900 			mn->mn_ai.rsvd1 = rssi;
2901 #endif
2902 			/* tag AMPDU aggregates for reorder processing */
2903 			if (ni->ni_flags & IEEE80211_NODE_HT)
2904 				m->m_flags |= M_AMPDU;
2905 			(void) ieee80211_input(ni, m, rssi, nf);
2906 			ieee80211_free_node(ni);
2907 		} else
2908 			(void) ieee80211_input_all(ic, m, rssi, nf);
2909 rx_next:
2910 		/* NB: ignore ENOMEM so we process more descriptors */
2911 		(void) mwl_rxbuf_init(sc, bf);
2912 		bf = STAILQ_NEXT(bf, bf_list);
2913 	}
2914 rx_stop:
2915 	sc->sc_rxnext = bf;
2916 
2917 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2918 	    !IFQ_IS_EMPTY(&ifp->if_snd)) {
2919 		/* NB: kick fw; the tx thread may have been preempted */
2920 		mwl_hal_txstart(sc->sc_mh, 0);
2921 		mwl_start(ifp);
2922 	}
2923 #undef IEEE80211_DIR_DSTODS
2924 }
2925 
2926 static void
2927 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2928 {
2929 	struct mwl_txbuf *bf, *bn;
2930 	struct mwl_txdesc *ds;
2931 
2932 	MWL_TXQ_LOCK_INIT(sc, txq);
2933 	txq->qnum = qnum;
2934 	txq->txpri = 0;	/* XXX */
2935 #if 0
2936 	/* NB: q setup by mwl_txdma_setup XXX */
2937 	STAILQ_INIT(&txq->free);
2938 #endif
2939 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2940 		bf->bf_txq = txq;
2941 
2942 		ds = bf->bf_desc;
2943 		bn = STAILQ_NEXT(bf, bf_list);
2944 		if (bn == NULL)
2945 			bn = STAILQ_FIRST(&txq->free);
2946 		ds->pPhysNext = htole32(bn->bf_daddr);
2947 	}
2948 	STAILQ_INIT(&txq->active);
2949 }
2950 
2951 /*
2952  * Setup a hardware data transmit queue for the specified
2953  * access control.  We record the mapping from ac's
2954  * to h/w queues for use by mwl_tx_start.
2955  */
2956 static int
2957 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2958 {
2959 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2960 	struct mwl_txq *txq;
2961 
2962 	if (ac >= N(sc->sc_ac2q)) {
2963 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2964 			ac, N(sc->sc_ac2q));
2965 		return 0;
2966 	}
2967 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2968 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2969 			mvtype, MWL_NUM_TX_QUEUES);
2970 		return 0;
2971 	}
2972 	txq = &sc->sc_txq[mvtype];
2973 	mwl_txq_init(sc, txq, mvtype);
2974 	sc->sc_ac2q[ac] = txq;
2975 	return 1;
2976 #undef N
2977 }
2978 
2979 /*
2980  * Update WME parameters for a transmit queue.
2981  */
2982 static int
2983 mwl_txq_update(struct mwl_softc *sc, int ac)
2984 {
2985 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2986 	struct ifnet *ifp = sc->sc_ifp;
2987 	struct ieee80211com *ic = ifp->if_l2com;
2988 	struct mwl_txq *txq = sc->sc_ac2q[ac];
2989 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2990 	struct mwl_hal *mh = sc->sc_mh;
2991 	int aifs, cwmin, cwmax, txoplim;
2992 
2993 	aifs = wmep->wmep_aifsn;
2994 	/* XXX in sta mode need to pass log values for cwmin/max */
2995 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2996 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2997 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2998 
2999 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3000 		device_printf(sc->sc_dev, "unable to update hardware queue "
3001 			"parameters for %s traffic!\n",
3002 			ieee80211_wme_acnames[ac]);
3003 		return 0;
3004 	}
3005 	return 1;
3006 #undef MWL_EXPONENT_TO_VALUE
3007 }
3008 
3009 /*
3010  * Callback from the 802.11 layer to update WME parameters.
3011  */
3012 static int
3013 mwl_wme_update(struct ieee80211com *ic)
3014 {
3015 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
3016 
3017 	return !mwl_txq_update(sc, WME_AC_BE) ||
3018 	    !mwl_txq_update(sc, WME_AC_BK) ||
3019 	    !mwl_txq_update(sc, WME_AC_VI) ||
3020 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3021 }
3022 
3023 /*
3024  * Reclaim resources for a setup queue.
3025  */
3026 static void
3027 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3028 {
3029 	/* XXX hal work? */
3030 	MWL_TXQ_LOCK_DESTROY(txq);
3031 }
3032 
3033 /*
3034  * Reclaim all tx queue resources.
3035  */
3036 static void
3037 mwl_tx_cleanup(struct mwl_softc *sc)
3038 {
3039 	int i;
3040 
3041 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3042 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3043 }
3044 
3045 static int
3046 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3047 {
3048 	struct mbuf *m;
3049 	int error;
3050 
3051 	/*
3052 	 * Load the DMA map so any coalescing is done.  This
3053 	 * also calculates the number of descriptors we need.
3054 	 */
3055 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3056 				     bf->bf_segs, &bf->bf_nseg,
3057 				     BUS_DMA_NOWAIT);
3058 	if (error == EFBIG) {
3059 		/* XXX packet requires too many descriptors */
3060 		bf->bf_nseg = MWL_TXDESC+1;
3061 	} else if (error != 0) {
3062 		sc->sc_stats.mst_tx_busdma++;
3063 		m_freem(m0);
3064 		return error;
3065 	}
3066 	/*
3067 	 * Discard null packets and check for packets that
3068 	 * require too many TX descriptors.  We try to convert
3069 	 * the latter to a cluster.
3070 	 */
3071 	if (error == EFBIG) {		/* too many desc's, linearize */
3072 		sc->sc_stats.mst_tx_linear++;
3073 #if MWL_TXDESC > 1
3074 		m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3075 #else
3076 		m = m_defrag(m0, M_DONTWAIT);
3077 #endif
3078 		if (m == NULL) {
3079 			m_freem(m0);
3080 			sc->sc_stats.mst_tx_nombuf++;
3081 			return ENOMEM;
3082 		}
3083 		m0 = m;
3084 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3085 					     bf->bf_segs, &bf->bf_nseg,
3086 					     BUS_DMA_NOWAIT);
3087 		if (error != 0) {
3088 			sc->sc_stats.mst_tx_busdma++;
3089 			m_freem(m0);
3090 			return error;
3091 		}
3092 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3093 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3094 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3095 		sc->sc_stats.mst_tx_nodata++;
3096 		m_freem(m0);
3097 		return EIO;
3098 	}
3099 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3100 		__func__, m0, m0->m_pkthdr.len);
3101 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3102 	bf->bf_m = m0;
3103 
3104 	return 0;
3105 }
3106 
3107 static __inline int
3108 mwl_cvtlegacyrate(int rate)
3109 {
3110 	switch (rate) {
3111 	case 2:	 return 0;
3112 	case 4:	 return 1;
3113 	case 11: return 2;
3114 	case 22: return 3;
3115 	case 44: return 4;
3116 	case 12: return 5;
3117 	case 18: return 6;
3118 	case 24: return 7;
3119 	case 36: return 8;
3120 	case 48: return 9;
3121 	case 72: return 10;
3122 	case 96: return 11;
3123 	case 108:return 12;
3124 	}
3125 	return 0;
3126 }
3127 
3128 /*
3129  * Calculate fixed tx rate information per client state;
3130  * this value is suitable for writing to the Format field
3131  * of a tx descriptor.
3132  */
3133 static uint16_t
3134 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3135 {
3136 	uint16_t fmt;
3137 
3138 	fmt = SM(3, EAGLE_TXD_ANTENNA)
3139 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3140 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3141 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3142 		fmt |= EAGLE_TXD_FORMAT_HT
3143 		    /* NB: 0x80 implicitly stripped from ucastrate */
3144 		    | SM(rate, EAGLE_TXD_RATE);
3145 		/* XXX short/long GI may be wrong; re-check */
3146 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3147 			fmt |= EAGLE_TXD_CHW_40
3148 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3149 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3150 		} else {
3151 			fmt |= EAGLE_TXD_CHW_20
3152 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3153 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3154 		}
3155 	} else {			/* legacy rate */
3156 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3157 		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3158 		    | EAGLE_TXD_CHW_20
3159 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3160 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3161 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3162 	}
3163 	return fmt;
3164 }
3165 
3166 static int
3167 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3168     struct mbuf *m0)
3169 {
3170 #define	IEEE80211_DIR_DSTODS(wh) \
3171 	((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3172 	struct ifnet *ifp = sc->sc_ifp;
3173 	struct ieee80211com *ic = ifp->if_l2com;
3174 	struct ieee80211vap *vap = ni->ni_vap;
3175 	int error, iswep, ismcast;
3176 	int hdrlen, copyhdrlen, pktlen;
3177 	struct mwl_txdesc *ds;
3178 	struct mwl_txq *txq;
3179 	struct ieee80211_frame *wh;
3180 	struct mwltxrec *tr;
3181 	struct mwl_node *mn;
3182 	uint16_t qos;
3183 #if MWL_TXDESC > 1
3184 	int i;
3185 #endif
3186 
3187 	wh = mtod(m0, struct ieee80211_frame *);
3188 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3189 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3190 	hdrlen = ieee80211_anyhdrsize(wh);
3191 	copyhdrlen = hdrlen;
3192 	pktlen = m0->m_pkthdr.len;
3193 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3194 		if (IEEE80211_DIR_DSTODS(wh)) {
3195 			qos = *(uint16_t *)
3196 			    (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3197 			copyhdrlen -= sizeof(qos);
3198 		} else
3199 			qos = *(uint16_t *)
3200 			    (((struct ieee80211_qosframe *) wh)->i_qos);
3201 	} else
3202 		qos = 0;
3203 
3204 	if (iswep) {
3205 		const struct ieee80211_cipher *cip;
3206 		struct ieee80211_key *k;
3207 
3208 		/*
3209 		 * Construct the 802.11 header+trailer for an encrypted
3210 		 * frame. The only reason this can fail is because of an
3211 		 * unknown or unsupported cipher/key type.
3212 		 *
3213 		 * NB: we do this even though the firmware will ignore
3214 		 *     what we've done for WEP and TKIP as we need the
3215 		 *     ExtIV filled in for CCMP and this also adjusts
3216 		 *     the headers which simplifies our work below.
3217 		 */
3218 		k = ieee80211_crypto_encap(ni, m0);
3219 		if (k == NULL) {
3220 			/*
3221 			 * This can happen when the key is yanked after the
3222 			 * frame was queued.  Just discard the frame; the
3223 			 * 802.11 layer counts failures and provides
3224 			 * debugging/diagnostics.
3225 			 */
3226 			m_freem(m0);
3227 			return EIO;
3228 		}
3229 		/*
3230 		 * Adjust the packet length for the crypto additions
3231 		 * done during encap and any other bits that the f/w
3232 		 * will add later on.
3233 		 */
3234 		cip = k->wk_cipher;
3235 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3236 
3237 		/* packet header may have moved, reset our local pointer */
3238 		wh = mtod(m0, struct ieee80211_frame *);
3239 	}
3240 
3241 	if (ieee80211_radiotap_active_vap(vap)) {
3242 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3243 		if (iswep)
3244 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3245 #if 0
3246 		sc->sc_tx_th.wt_rate = ds->DataRate;
3247 #endif
3248 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3249 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3250 
3251 		ieee80211_radiotap_tx(vap, m0);
3252 	}
3253 	/*
3254 	 * Copy up/down the 802.11 header; the firmware requires
3255 	 * we present a 2-byte payload length followed by a
3256 	 * 4-address header (w/o QoS), followed (optionally) by
3257 	 * any WEP/ExtIV header (but only filled in for CCMP).
3258 	 * We are assured the mbuf has sufficient headroom to
3259 	 * prepend in-place by the setup of ic_headroom in
3260 	 * mwl_attach.
3261 	 */
3262 	if (hdrlen < sizeof(struct mwltxrec)) {
3263 		const int space = sizeof(struct mwltxrec) - hdrlen;
3264 		if (M_LEADINGSPACE(m0) < space) {
3265 			/* NB: should never happen */
3266 			device_printf(sc->sc_dev,
3267 			    "not enough headroom, need %d found %zd, "
3268 			    "m_flags 0x%x m_len %d\n",
3269 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3270 			ieee80211_dump_pkt(ic,
3271 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3272 			m_freem(m0);
3273 			sc->sc_stats.mst_tx_noheadroom++;
3274 			return EIO;
3275 		}
3276 		M_PREPEND(m0, space, M_NOWAIT);
3277 	}
3278 	tr = mtod(m0, struct mwltxrec *);
3279 	if (wh != (struct ieee80211_frame *) &tr->wh)
3280 		ovbcopy(wh, &tr->wh, hdrlen);
3281 	/*
3282 	 * Note: the "firmware length" is actually the length
3283 	 * of the fully formed "802.11 payload".  That is, it's
3284 	 * everything except for the 802.11 header.  In particular
3285 	 * this includes all crypto material including the MIC!
3286 	 */
3287 	tr->fwlen = htole16(pktlen - hdrlen);
3288 
3289 	/*
3290 	 * Load the DMA map so any coalescing is done.  This
3291 	 * also calculates the number of descriptors we need.
3292 	 */
3293 	error = mwl_tx_dmasetup(sc, bf, m0);
3294 	if (error != 0) {
3295 		/* NB: stat collected in mwl_tx_dmasetup */
3296 		DPRINTF(sc, MWL_DEBUG_XMIT,
3297 		    "%s: unable to setup dma\n", __func__);
3298 		return error;
3299 	}
3300 	bf->bf_node = ni;			/* NB: held reference */
3301 	m0 = bf->bf_m;				/* NB: may have changed */
3302 	tr = mtod(m0, struct mwltxrec *);
3303 	wh = (struct ieee80211_frame *)&tr->wh;
3304 
3305 	/*
3306 	 * Formulate tx descriptor.
3307 	 */
3308 	ds = bf->bf_desc;
3309 	txq = bf->bf_txq;
3310 
3311 	ds->QosCtrl = qos;			/* NB: already little-endian */
3312 #if MWL_TXDESC == 1
3313 	/*
3314 	 * NB: multiframes should be zero because the descriptors
3315 	 *     are initialized to zero.  This should handle the case
3316 	 *     where the driver is built with MWL_TXDESC=1 but we are
3317 	 *     using firmware with multi-segment support.
3318 	 */
3319 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3320 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3321 #else
3322 	ds->multiframes = htole32(bf->bf_nseg);
3323 	ds->PktLen = htole16(m0->m_pkthdr.len);
3324 	for (i = 0; i < bf->bf_nseg; i++) {
3325 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3326 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3327 	}
3328 #endif
3329 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3330 	ds->Format = 0;
3331 	ds->pad = 0;
3332 	ds->ack_wcb_addr = 0;
3333 
3334 	mn = MWL_NODE(ni);
3335 	/*
3336 	 * Select transmit rate.
3337 	 */
3338 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3339 	case IEEE80211_FC0_TYPE_MGT:
3340 		sc->sc_stats.mst_tx_mgmt++;
3341 		/* fall thru... */
3342 	case IEEE80211_FC0_TYPE_CTL:
3343 		/* NB: assign to BE q to avoid bursting */
3344 		ds->TxPriority = MWL_WME_AC_BE;
3345 		break;
3346 	case IEEE80211_FC0_TYPE_DATA:
3347 		if (!ismcast) {
3348 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3349 			/*
3350 			 * EAPOL frames get forced to a fixed rate and w/o
3351 			 * aggregation; otherwise check for any fixed rate
3352 			 * for the client (may depend on association state).
3353 			 */
3354 			if (m0->m_flags & M_EAPOL) {
3355 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3356 				ds->Format = mvp->mv_eapolformat;
3357 				ds->pad = htole16(
3358 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3359 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3360 				/* XXX pre-calculate per node */
3361 				ds->Format = htole16(
3362 				    mwl_calcformat(tp->ucastrate, ni));
3363 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3364 			}
3365 			/* NB: EAPOL frames will never have qos set */
3366 			if (qos == 0)
3367 				ds->TxPriority = txq->qnum;
3368 #if MWL_MAXBA > 3
3369 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3370 				ds->TxPriority = mn->mn_ba[3].txq;
3371 #endif
3372 #if MWL_MAXBA > 2
3373 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3374 				ds->TxPriority = mn->mn_ba[2].txq;
3375 #endif
3376 #if MWL_MAXBA > 1
3377 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3378 				ds->TxPriority = mn->mn_ba[1].txq;
3379 #endif
3380 #if MWL_MAXBA > 0
3381 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3382 				ds->TxPriority = mn->mn_ba[0].txq;
3383 #endif
3384 			else
3385 				ds->TxPriority = txq->qnum;
3386 		} else
3387 			ds->TxPriority = txq->qnum;
3388 		break;
3389 	default:
3390 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3391 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3392 		sc->sc_stats.mst_tx_badframetype++;
3393 		m_freem(m0);
3394 		return EIO;
3395 	}
3396 
3397 	if (IFF_DUMPPKTS_XMIT(sc))
3398 		ieee80211_dump_pkt(ic,
3399 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3400 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3401 
3402 	MWL_TXQ_LOCK(txq);
3403 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3404 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3405 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3406 
3407 	ifp->if_opackets++;
3408 	ifp->if_timer = 5;
3409 	MWL_TXQ_UNLOCK(txq);
3410 
3411 	return 0;
3412 #undef	IEEE80211_DIR_DSTODS
3413 }
3414 
3415 static __inline int
3416 mwl_cvtlegacyrix(int rix)
3417 {
3418 #define	N(x)	(sizeof(x)/sizeof(x[0]))
3419 	static const int ieeerates[] =
3420 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3421 	return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3422 #undef N
3423 }
3424 
3425 /*
3426  * Process completed xmit descriptors from the specified queue.
3427  */
3428 static int
3429 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3430 {
3431 #define	EAGLE_TXD_STATUS_MCAST \
3432 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3433 	struct ifnet *ifp = sc->sc_ifp;
3434 	struct ieee80211com *ic = ifp->if_l2com;
3435 	struct mwl_txbuf *bf;
3436 	struct mwl_txdesc *ds;
3437 	struct ieee80211_node *ni;
3438 	struct mwl_node *an;
3439 	int nreaped;
3440 	uint32_t status;
3441 
3442 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3443 	for (nreaped = 0;; nreaped++) {
3444 		MWL_TXQ_LOCK(txq);
3445 		bf = STAILQ_FIRST(&txq->active);
3446 		if (bf == NULL) {
3447 			MWL_TXQ_UNLOCK(txq);
3448 			break;
3449 		}
3450 		ds = bf->bf_desc;
3451 		MWL_TXDESC_SYNC(txq, ds,
3452 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3453 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3454 			MWL_TXQ_UNLOCK(txq);
3455 			break;
3456 		}
3457 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3458 		MWL_TXQ_UNLOCK(txq);
3459 
3460 #ifdef MWL_DEBUG
3461 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3462 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3463 #endif
3464 		ni = bf->bf_node;
3465 		if (ni != NULL) {
3466 			an = MWL_NODE(ni);
3467 			status = le32toh(ds->Status);
3468 			if (status & EAGLE_TXD_STATUS_OK) {
3469 				uint16_t Format = le16toh(ds->Format);
3470 				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3471 
3472 				sc->sc_stats.mst_ant_tx[txant]++;
3473 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3474 					sc->sc_stats.mst_tx_retries++;
3475 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3476 					sc->sc_stats.mst_tx_mretries++;
3477 				if (txq->qnum >= MWL_WME_AC_VO)
3478 					ic->ic_wme.wme_hipri_traffic++;
3479 				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3480 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3481 					ni->ni_txrate = mwl_cvtlegacyrix(
3482 					    ni->ni_txrate);
3483 				} else
3484 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3485 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3486 			} else {
3487 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3488 					sc->sc_stats.mst_tx_linkerror++;
3489 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3490 					sc->sc_stats.mst_tx_xretries++;
3491 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3492 					sc->sc_stats.mst_tx_aging++;
3493 				if (bf->bf_m->m_flags & M_FF)
3494 					sc->sc_stats.mst_ff_txerr++;
3495 			}
3496 			/*
3497 			 * Do any tx complete callback.  Note this must
3498 			 * be done before releasing the node reference.
3499 			 * XXX no way to figure out if frame was ACK'd
3500 			 */
3501 			if (bf->bf_m->m_flags & M_TXCB) {
3502 				/* XXX strip fw len in case header inspected */
3503 				m_adj(bf->bf_m, sizeof(uint16_t));
3504 				ieee80211_process_callback(ni, bf->bf_m,
3505 					(status & EAGLE_TXD_STATUS_OK) == 0);
3506 			}
3507 			/*
3508 			 * Reclaim reference to node.
3509 			 *
3510 			 * NB: the node may be reclaimed here if, for example
3511 			 *     this is a DEAUTH message that was sent and the
3512 			 *     node was timed out due to inactivity.
3513 			 */
3514 			ieee80211_free_node(ni);
3515 		}
3516 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3517 
3518 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3519 		    BUS_DMASYNC_POSTWRITE);
3520 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3521 		m_freem(bf->bf_m);
3522 
3523 		mwl_puttxbuf_tail(txq, bf);
3524 	}
3525 	return nreaped;
3526 #undef EAGLE_TXD_STATUS_MCAST
3527 }
3528 
3529 /*
3530  * Deferred processing of transmit interrupt; special-cased
3531  * for four hardware queues, 0-3.
3532  */
3533 static void
3534 mwl_tx_proc(void *arg, int npending)
3535 {
3536 	struct mwl_softc *sc = arg;
3537 	struct ifnet *ifp = sc->sc_ifp;
3538 	int nreaped;
3539 
3540 	/*
3541 	 * Process each active queue.
3542 	 */
3543 	nreaped = 0;
3544 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3545 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3546 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3547 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3548 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3549 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3550 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3551 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3552 
3553 	if (nreaped != 0) {
3554 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3555 		ifp->if_timer = 0;
3556 		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3557 			/* NB: kick fw; the tx thread may have been preempted */
3558 			mwl_hal_txstart(sc->sc_mh, 0);
3559 			mwl_start(ifp);
3560 		}
3561 	}
3562 }
3563 
3564 static void
3565 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3566 {
3567 	struct ieee80211_node *ni;
3568 	struct mwl_txbuf *bf;
3569 	u_int ix;
3570 
3571 	/*
3572 	 * NB: this assumes output has been stopped and
3573 	 *     we do not need to block mwl_tx_tasklet
3574 	 */
3575 	for (ix = 0;; ix++) {
3576 		MWL_TXQ_LOCK(txq);
3577 		bf = STAILQ_FIRST(&txq->active);
3578 		if (bf == NULL) {
3579 			MWL_TXQ_UNLOCK(txq);
3580 			break;
3581 		}
3582 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3583 		MWL_TXQ_UNLOCK(txq);
3584 #ifdef MWL_DEBUG
3585 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3586 			struct ifnet *ifp = sc->sc_ifp;
3587 			struct ieee80211com *ic = ifp->if_l2com;
3588 			const struct mwltxrec *tr =
3589 			    mtod(bf->bf_m, const struct mwltxrec *);
3590 			mwl_printtxbuf(bf, txq->qnum, ix);
3591 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3592 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3593 		}
3594 #endif /* MWL_DEBUG */
3595 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3596 		ni = bf->bf_node;
3597 		if (ni != NULL) {
3598 			/*
3599 			 * Reclaim node reference.
3600 			 */
3601 			ieee80211_free_node(ni);
3602 		}
3603 		m_freem(bf->bf_m);
3604 
3605 		mwl_puttxbuf_tail(txq, bf);
3606 	}
3607 }
3608 
3609 /*
3610  * Drain the transmit queues and reclaim resources.
3611  */
3612 static void
3613 mwl_draintxq(struct mwl_softc *sc)
3614 {
3615 	struct ifnet *ifp = sc->sc_ifp;
3616 	int i;
3617 
3618 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3619 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3620 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3621 	ifp->if_timer = 0;
3622 }
3623 
3624 #ifdef MWL_DIAGAPI
3625 /*
3626  * Reset the transmit queues to a pristine state after a fw download.
3627  */
3628 static void
3629 mwl_resettxq(struct mwl_softc *sc)
3630 {
3631 	int i;
3632 
3633 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3634 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3635 }
3636 #endif /* MWL_DIAGAPI */
3637 
3638 /*
3639  * Clear the transmit queues of any frames submitted for the
3640  * specified vap.  This is done when the vap is deleted so we
3641  * don't potentially reference the vap after it is gone.
3642  * Note we cannot remove the frames; we only reclaim the node
3643  * reference.
3644  */
3645 static void
3646 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3647 {
3648 	struct mwl_txq *txq;
3649 	struct mwl_txbuf *bf;
3650 	int i;
3651 
3652 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3653 		txq = &sc->sc_txq[i];
3654 		MWL_TXQ_LOCK(txq);
3655 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3656 			struct ieee80211_node *ni = bf->bf_node;
3657 			if (ni != NULL && ni->ni_vap == vap) {
3658 				bf->bf_node = NULL;
3659 				ieee80211_free_node(ni);
3660 			}
3661 		}
3662 		MWL_TXQ_UNLOCK(txq);
3663 	}
3664 }
3665 
3666 static int
3667 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3668 	const uint8_t *frm, const uint8_t *efrm)
3669 {
3670 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3671 	const struct ieee80211_action *ia;
3672 
3673 	ia = (const struct ieee80211_action *) frm;
3674 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3675 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3676 		const struct ieee80211_action_ht_mimopowersave *mps =
3677 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3678 
3679 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3680 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3681 		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3682 		return 0;
3683 	} else
3684 		return sc->sc_recv_action(ni, wh, frm, efrm);
3685 }
3686 
3687 static int
3688 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3689 	int dialogtoken, int baparamset, int batimeout)
3690 {
3691 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3692 	struct ieee80211vap *vap = ni->ni_vap;
3693 	struct mwl_node *mn = MWL_NODE(ni);
3694 	struct mwl_bastate *bas;
3695 
3696 	bas = tap->txa_private;
3697 	if (bas == NULL) {
3698 		const MWL_HAL_BASTREAM *sp;
3699 		/*
3700 		 * Check for a free BA stream slot.
3701 		 */
3702 #if MWL_MAXBA > 3
3703 		if (mn->mn_ba[3].bastream == NULL)
3704 			bas = &mn->mn_ba[3];
3705 		else
3706 #endif
3707 #if MWL_MAXBA > 2
3708 		if (mn->mn_ba[2].bastream == NULL)
3709 			bas = &mn->mn_ba[2];
3710 		else
3711 #endif
3712 #if MWL_MAXBA > 1
3713 		if (mn->mn_ba[1].bastream == NULL)
3714 			bas = &mn->mn_ba[1];
3715 		else
3716 #endif
3717 #if MWL_MAXBA > 0
3718 		if (mn->mn_ba[0].bastream == NULL)
3719 			bas = &mn->mn_ba[0];
3720 		else
3721 #endif
3722 		{
3723 			/* sta already has max BA streams */
3724 			/* XXX assign BA stream to highest priority tid */
3725 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3726 			    "%s: already has max bastreams\n", __func__);
3727 			sc->sc_stats.mst_ampdu_reject++;
3728 			return 0;
3729 		}
3730 		/* NB: no held reference to ni */
3731 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3732 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3733 		    ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3734 		    ni, tap);
3735 		if (sp == NULL) {
3736 			/*
3737 			 * No available stream, return 0 so no
3738 			 * a-mpdu aggregation will be done.
3739 			 */
3740 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3741 			    "%s: no bastream available\n", __func__);
3742 			sc->sc_stats.mst_ampdu_nostream++;
3743 			return 0;
3744 		}
3745 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3746 		    __func__, sp);
3747 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3748 		bas->bastream = sp;
3749 		tap->txa_private = bas;
3750 	}
3751 	/* fetch current seq# from the firmware; if available */
3752 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3753 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3754 	    &tap->txa_start) != 0)
3755 		tap->txa_start = 0;
3756 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3757 }
3758 
3759 static int
3760 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3761 	int code, int baparamset, int batimeout)
3762 {
3763 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3764 	struct mwl_bastate *bas;
3765 
3766 	bas = tap->txa_private;
3767 	if (bas == NULL) {
3768 		/* XXX should not happen */
3769 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3770 		    "%s: no BA stream allocated, AC %d\n",
3771 		    __func__, tap->txa_ac);
3772 		sc->sc_stats.mst_addba_nostream++;
3773 		return 0;
3774 	}
3775 	if (code == IEEE80211_STATUS_SUCCESS) {
3776 		struct ieee80211vap *vap = ni->ni_vap;
3777 		int bufsiz, error;
3778 
3779 		/*
3780 		 * Tell the firmware to setup the BA stream;
3781 		 * we know resources are available because we
3782 		 * pre-allocated one before forming the request.
3783 		 */
3784 		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3785 		if (bufsiz == 0)
3786 			bufsiz = IEEE80211_AGGR_BAWMAX;
3787 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3788 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3789 		if (error != 0) {
3790 			/*
3791 			 * Setup failed, return immediately so no a-mpdu
3792 			 * aggregation will be done.
3793 			 */
3794 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3795 			mwl_bastream_free(bas);
3796 			tap->txa_private = NULL;
3797 
3798 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3799 			    "%s: create failed, error %d, bufsiz %d AC %d "
3800 			    "htparam 0x%x\n", __func__, error, bufsiz,
3801 			    tap->txa_ac, ni->ni_htparam);
3802 			sc->sc_stats.mst_bacreate_failed++;
3803 			return 0;
3804 		}
3805 		/* NB: cache txq to avoid ptr indirect */
3806 		mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3807 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3808 		    "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3809 		    "htparam 0x%x\n", __func__, bas->bastream,
3810 		    bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3811 	} else {
3812 		/*
3813 		 * Other side NAK'd us; return the resources.
3814 		 */
3815 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3816 		    "%s: request failed with code %d, destroy bastream %p\n",
3817 		    __func__, code, bas->bastream);
3818 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3819 		mwl_bastream_free(bas);
3820 		tap->txa_private = NULL;
3821 	}
3822 	/* NB: firmware sends BAR so we don't need to */
3823 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3824 }
3825 
3826 static void
3827 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3828 {
3829 	struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3830 	struct mwl_bastate *bas;
3831 
3832 	bas = tap->txa_private;
3833 	if (bas != NULL) {
3834 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3835 		    __func__, bas->bastream);
3836 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3837 		mwl_bastream_free(bas);
3838 		tap->txa_private = NULL;
3839 	}
3840 	sc->sc_addba_stop(ni, tap);
3841 }
3842 
3843 /*
3844  * Setup the rx data structures.  This should only be
3845  * done once or we may get out of sync with the firmware.
3846  */
3847 static int
3848 mwl_startrecv(struct mwl_softc *sc)
3849 {
3850 	if (!sc->sc_recvsetup) {
3851 		struct mwl_rxbuf *bf, *prev;
3852 		struct mwl_rxdesc *ds;
3853 
3854 		prev = NULL;
3855 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3856 			int error = mwl_rxbuf_init(sc, bf);
3857 			if (error != 0) {
3858 				DPRINTF(sc, MWL_DEBUG_RECV,
3859 					"%s: mwl_rxbuf_init failed %d\n",
3860 					__func__, error);
3861 				return error;
3862 			}
3863 			if (prev != NULL) {
3864 				ds = prev->bf_desc;
3865 				ds->pPhysNext = htole32(bf->bf_daddr);
3866 			}
3867 			prev = bf;
3868 		}
3869 		if (prev != NULL) {
3870 			ds = prev->bf_desc;
3871 			ds->pPhysNext =
3872 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3873 		}
3874 		sc->sc_recvsetup = 1;
3875 	}
3876 	mwl_mode_init(sc);		/* set filters, etc. */
3877 	return 0;
3878 }
3879 
3880 static MWL_HAL_APMODE
3881 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3882 {
3883 	MWL_HAL_APMODE mode;
3884 
3885 	if (IEEE80211_IS_CHAN_HT(chan)) {
3886 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3887 			mode = AP_MODE_N_ONLY;
3888 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3889 			mode = AP_MODE_AandN;
3890 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3891 			mode = AP_MODE_GandN;
3892 		else
3893 			mode = AP_MODE_BandGandN;
3894 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3895 		if (vap->iv_flags & IEEE80211_F_PUREG)
3896 			mode = AP_MODE_G_ONLY;
3897 		else
3898 			mode = AP_MODE_MIXED;
3899 	} else if (IEEE80211_IS_CHAN_B(chan))
3900 		mode = AP_MODE_B_ONLY;
3901 	else if (IEEE80211_IS_CHAN_A(chan))
3902 		mode = AP_MODE_A_ONLY;
3903 	else
3904 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3905 	return mode;
3906 }
3907 
3908 static int
3909 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3910 {
3911 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3912 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3913 }
3914 
3915 /*
3916  * Set/change channels.
3917  */
3918 static int
3919 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3920 {
3921 	struct mwl_hal *mh = sc->sc_mh;
3922 	struct ifnet *ifp = sc->sc_ifp;
3923 	struct ieee80211com *ic = ifp->if_l2com;
3924 	MWL_HAL_CHANNEL hchan;
3925 	int maxtxpow;
3926 
3927 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3928 	    __func__, chan->ic_freq, chan->ic_flags);
3929 
3930 	/*
3931 	 * Convert to a HAL channel description with
3932 	 * the flags constrained to reflect the current
3933 	 * operating mode.
3934 	 */
3935 	mwl_mapchan(&hchan, chan);
3936 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3937 #if 0
3938 	mwl_draintxq(sc);		/* clear pending tx frames */
3939 #endif
3940 	mwl_hal_setchannel(mh, &hchan);
3941 	/*
3942 	 * Tx power is cap'd by the regulatory setting and
3943 	 * possibly a user-set limit.  We pass the min of
3944 	 * these to the hal to apply them to the cal data
3945 	 * for this channel.
3946 	 * XXX min bound?
3947 	 */
3948 	maxtxpow = 2*chan->ic_maxregpower;
3949 	if (maxtxpow > ic->ic_txpowlimit)
3950 		maxtxpow = ic->ic_txpowlimit;
3951 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3952 	/* NB: potentially change mcast/mgt rates */
3953 	mwl_setcurchanrates(sc);
3954 
3955 	/*
3956 	 * Update internal state.
3957 	 */
3958 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3959 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3960 	if (IEEE80211_IS_CHAN_A(chan)) {
3961 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3962 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3963 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3964 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3965 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3966 	} else {
3967 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3968 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3969 	}
3970 	sc->sc_curchan = hchan;
3971 	mwl_hal_intrset(mh, sc->sc_imask);
3972 
3973 	return 0;
3974 }
3975 
3976 static void
3977 mwl_scan_start(struct ieee80211com *ic)
3978 {
3979 	struct ifnet *ifp = ic->ic_ifp;
3980 	struct mwl_softc *sc = ifp->if_softc;
3981 
3982 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3983 }
3984 
3985 static void
3986 mwl_scan_end(struct ieee80211com *ic)
3987 {
3988 	struct ifnet *ifp = ic->ic_ifp;
3989 	struct mwl_softc *sc = ifp->if_softc;
3990 
3991 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3992 }
3993 
3994 static void
3995 mwl_set_channel(struct ieee80211com *ic)
3996 {
3997 	struct ifnet *ifp = ic->ic_ifp;
3998 	struct mwl_softc *sc = ifp->if_softc;
3999 
4000 	(void) mwl_chan_set(sc, ic->ic_curchan);
4001 }
4002 
4003 /*
4004  * Handle a channel switch request.  We inform the firmware
4005  * and mark the global state to suppress various actions.
4006  * NB: we issue only one request to the fw; we may be called
4007  * multiple times if there are multiple vap's.
4008  */
4009 static void
4010 mwl_startcsa(struct ieee80211vap *vap)
4011 {
4012 	struct ieee80211com *ic = vap->iv_ic;
4013 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4014 	MWL_HAL_CHANNEL hchan;
4015 
4016 	if (sc->sc_csapending)
4017 		return;
4018 
4019 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
4020 	/* 1 =>'s quiet channel */
4021 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4022 	sc->sc_csapending = 1;
4023 }
4024 
4025 /*
4026  * Plumb any static WEP key for the station.  This is
4027  * necessary as we must propagate the key from the
4028  * global key table of the vap to each sta db entry.
4029  */
4030 static void
4031 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4032 {
4033 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4034 		IEEE80211_F_PRIVACY &&
4035 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4036 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4037 		(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4038 }
4039 
4040 static int
4041 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4042 {
4043 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4044 	struct ieee80211vap *vap = ni->ni_vap;
4045 	struct mwl_hal_vap *hvap;
4046 	int error;
4047 
4048 	if (vap->iv_opmode == IEEE80211_M_WDS) {
4049 		/*
4050 		 * WDS vap's do not have a f/w vap; instead they piggyback
4051 		 * on an AP vap and we must install the sta db entry and
4052 		 * crypto state using that AP's handle (the WDS vap has none).
4053 		 */
4054 		hvap = MWL_VAP(vap)->mv_ap_hvap;
4055 	} else
4056 		hvap = MWL_VAP(vap)->mv_hvap;
4057 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4058 	    aid, staid, pi,
4059 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4060 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4061 	if (error == 0) {
4062 		/*
4063 		 * Setup security for this station.  For sta mode this is
4064 		 * needed even though do the same thing on transition to
4065 		 * AUTH state because the call to mwl_hal_newstation
4066 		 * clobbers the crypto state we setup.
4067 		 */
4068 		mwl_setanywepkey(vap, ni->ni_macaddr);
4069 	}
4070 	return error;
4071 #undef WME
4072 }
4073 
4074 static void
4075 mwl_setglobalkeys(struct ieee80211vap *vap)
4076 {
4077 	struct ieee80211_key *wk;
4078 
4079 	wk = &vap->iv_nw_keys[0];
4080 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4081 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4082 			(void) mwl_key_set(vap, wk, vap->iv_myaddr);
4083 }
4084 
4085 /*
4086  * Convert a legacy rate set to a firmware bitmask.
4087  */
4088 static uint32_t
4089 get_rate_bitmap(const struct ieee80211_rateset *rs)
4090 {
4091 	uint32_t rates;
4092 	int i;
4093 
4094 	rates = 0;
4095 	for (i = 0; i < rs->rs_nrates; i++)
4096 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4097 		case 2:	  rates |= 0x001; break;
4098 		case 4:	  rates |= 0x002; break;
4099 		case 11:  rates |= 0x004; break;
4100 		case 22:  rates |= 0x008; break;
4101 		case 44:  rates |= 0x010; break;
4102 		case 12:  rates |= 0x020; break;
4103 		case 18:  rates |= 0x040; break;
4104 		case 24:  rates |= 0x080; break;
4105 		case 36:  rates |= 0x100; break;
4106 		case 48:  rates |= 0x200; break;
4107 		case 72:  rates |= 0x400; break;
4108 		case 96:  rates |= 0x800; break;
4109 		case 108: rates |= 0x1000; break;
4110 		}
4111 	return rates;
4112 }
4113 
4114 /*
4115  * Construct an HT firmware bitmask from an HT rate set.
4116  */
4117 static uint32_t
4118 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4119 {
4120 	uint32_t rates;
4121 	int i;
4122 
4123 	rates = 0;
4124 	for (i = 0; i < rs->rs_nrates; i++) {
4125 		if (rs->rs_rates[i] < 16)
4126 			rates |= 1<<rs->rs_rates[i];
4127 	}
4128 	return rates;
4129 }
4130 
4131 /*
4132  * Craft station database entry for station.
4133  * NB: use host byte order here, the hal handles byte swapping.
4134  */
4135 static MWL_HAL_PEERINFO *
4136 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4137 {
4138 	const struct ieee80211vap *vap = ni->ni_vap;
4139 
4140 	memset(pi, 0, sizeof(*pi));
4141 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4142 	pi->CapInfo = ni->ni_capinfo;
4143 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4144 		/* HT capabilities, etc */
4145 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4146 		/* XXX pi.HTCapabilitiesInfo */
4147 	        pi->MacHTParamInfo = ni->ni_htparam;
4148 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4149 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4150 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4151 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4152 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4153 
4154 		/* constrain according to local configuration */
4155 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4156 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4157 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4158 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4159 		if (ni->ni_chw != 40)
4160 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4161 	}
4162 	return pi;
4163 }
4164 
4165 /*
4166  * Re-create the local sta db entry for a vap to ensure
4167  * up to date WME state is pushed to the firmware.  Because
4168  * this resets crypto state this must be followed by a
4169  * reload of any keys in the global key table.
4170  */
4171 static int
4172 mwl_localstadb(struct ieee80211vap *vap)
4173 {
4174 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4175 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4176 	struct ieee80211_node *bss;
4177 	MWL_HAL_PEERINFO pi;
4178 	int error;
4179 
4180 	switch (vap->iv_opmode) {
4181 	case IEEE80211_M_STA:
4182 		bss = vap->iv_bss;
4183 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4184 		    vap->iv_state == IEEE80211_S_RUN ?
4185 			mkpeerinfo(&pi, bss) : NULL,
4186 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4187 		    bss->ni_ies.wme_ie != NULL ?
4188 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4189 		if (error == 0)
4190 			mwl_setglobalkeys(vap);
4191 		break;
4192 	case IEEE80211_M_HOSTAP:
4193 	case IEEE80211_M_MBSS:
4194 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4195 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4196 		if (error == 0)
4197 			mwl_setglobalkeys(vap);
4198 		break;
4199 	default:
4200 		error = 0;
4201 		break;
4202 	}
4203 	return error;
4204 #undef WME
4205 }
4206 
4207 static int
4208 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4209 {
4210 	struct mwl_vap *mvp = MWL_VAP(vap);
4211 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4212 	struct ieee80211com *ic = vap->iv_ic;
4213 	struct ieee80211_node *ni = NULL;
4214 	struct ifnet *ifp = ic->ic_ifp;
4215 	struct mwl_softc *sc = ifp->if_softc;
4216 	struct mwl_hal *mh = sc->sc_mh;
4217 	enum ieee80211_state ostate = vap->iv_state;
4218 	int error;
4219 
4220 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4221 	    vap->iv_ifp->if_xname, __func__,
4222 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4223 
4224 	callout_stop(&sc->sc_timer);
4225 	/*
4226 	 * Clear current radar detection state.
4227 	 */
4228 	if (ostate == IEEE80211_S_CAC) {
4229 		/* stop quiet mode radar detection */
4230 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4231 	} else if (sc->sc_radarena) {
4232 		/* stop in-service radar detection */
4233 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4234 		sc->sc_radarena = 0;
4235 	}
4236 	/*
4237 	 * Carry out per-state actions before doing net80211 work.
4238 	 */
4239 	if (nstate == IEEE80211_S_INIT) {
4240 		/* NB: only ap+sta vap's have a fw entity */
4241 		if (hvap != NULL)
4242 			mwl_hal_stop(hvap);
4243 	} else if (nstate == IEEE80211_S_SCAN) {
4244 		mwl_hal_start(hvap);
4245 		/* NB: this disables beacon frames */
4246 		mwl_hal_setinframode(hvap);
4247 	} else if (nstate == IEEE80211_S_AUTH) {
4248 		/*
4249 		 * Must create a sta db entry in case a WEP key needs to
4250 		 * be plumbed.  This entry will be overwritten if we
4251 		 * associate; otherwise it will be reclaimed on node free.
4252 		 */
4253 		ni = vap->iv_bss;
4254 		MWL_NODE(ni)->mn_hvap = hvap;
4255 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4256 	} else if (nstate == IEEE80211_S_CSA) {
4257 		/* XXX move to below? */
4258 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4259 		    vap->iv_opmode == IEEE80211_M_MBSS)
4260 			mwl_startcsa(vap);
4261 	} else if (nstate == IEEE80211_S_CAC) {
4262 		/* XXX move to below? */
4263 		/* stop ap xmit and enable quiet mode radar detection */
4264 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4265 	}
4266 
4267 	/*
4268 	 * Invoke the parent method to do net80211 work.
4269 	 */
4270 	error = mvp->mv_newstate(vap, nstate, arg);
4271 
4272 	/*
4273 	 * Carry out work that must be done after net80211 runs;
4274 	 * this work requires up to date state (e.g. iv_bss).
4275 	 */
4276 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4277 		/* NB: collect bss node again, it may have changed */
4278 		ni = vap->iv_bss;
4279 
4280 		DPRINTF(sc, MWL_DEBUG_STATE,
4281 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4282 		    "capinfo 0x%04x chan %d\n",
4283 		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4284 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4285 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4286 
4287 		/*
4288 		 * Recreate local sta db entry to update WME/HT state.
4289 		 */
4290 		mwl_localstadb(vap);
4291 		switch (vap->iv_opmode) {
4292 		case IEEE80211_M_HOSTAP:
4293 		case IEEE80211_M_MBSS:
4294 			if (ostate == IEEE80211_S_CAC) {
4295 				/* enable in-service radar detection */
4296 				mwl_hal_setradardetection(mh,
4297 				    DR_IN_SERVICE_MONITOR_START);
4298 				sc->sc_radarena = 1;
4299 			}
4300 			/*
4301 			 * Allocate and setup the beacon frame
4302 			 * (and related state).
4303 			 */
4304 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4305 			if (error != 0) {
4306 				DPRINTF(sc, MWL_DEBUG_STATE,
4307 				    "%s: beacon setup failed, error %d\n",
4308 				    __func__, error);
4309 				goto bad;
4310 			}
4311 			/* NB: must be after setting up beacon */
4312 			mwl_hal_start(hvap);
4313 			break;
4314 		case IEEE80211_M_STA:
4315 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4316 			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4317 			/*
4318 			 * Set state now that we're associated.
4319 			 */
4320 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4321 			mwl_setrates(vap);
4322 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4323 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4324 			    sc->sc_ndwdsvaps++ == 0)
4325 				mwl_hal_setdwds(mh, 1);
4326 			break;
4327 		case IEEE80211_M_WDS:
4328 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4329 			    vap->iv_ifp->if_xname, __func__,
4330 			    ether_sprintf(ni->ni_bssid));
4331 			mwl_seteapolformat(vap);
4332 			break;
4333 		default:
4334 			break;
4335 		}
4336 		/*
4337 		 * Set CS mode according to operating channel;
4338 		 * this mostly an optimization for 5GHz.
4339 		 *
4340 		 * NB: must follow mwl_hal_start which resets csmode
4341 		 */
4342 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4343 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4344 		else
4345 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4346 		/*
4347 		 * Start timer to prod firmware.
4348 		 */
4349 		if (sc->sc_ageinterval != 0)
4350 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4351 			    mwl_agestations, sc);
4352 	} else if (nstate == IEEE80211_S_SLEEP) {
4353 		/* XXX set chip in power save */
4354 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4355 	    --sc->sc_ndwdsvaps == 0)
4356 		mwl_hal_setdwds(mh, 0);
4357 bad:
4358 	return error;
4359 }
4360 
4361 /*
4362  * Manage station id's; these are separate from AID's
4363  * as AID's may have values out of the range of possible
4364  * station id's acceptable to the firmware.
4365  */
4366 static int
4367 allocstaid(struct mwl_softc *sc, int aid)
4368 {
4369 	int staid;
4370 
4371 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4372 		/* NB: don't use 0 */
4373 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4374 			if (isclr(sc->sc_staid, staid))
4375 				break;
4376 	} else
4377 		staid = aid;
4378 	setbit(sc->sc_staid, staid);
4379 	return staid;
4380 }
4381 
4382 static void
4383 delstaid(struct mwl_softc *sc, int staid)
4384 {
4385 	clrbit(sc->sc_staid, staid);
4386 }
4387 
4388 /*
4389  * Setup driver-specific state for a newly associated node.
4390  * Note that we're called also on a re-associate, the isnew
4391  * param tells us if this is the first time or not.
4392  */
4393 static void
4394 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4395 {
4396 	struct ieee80211vap *vap = ni->ni_vap;
4397         struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4398 	struct mwl_node *mn = MWL_NODE(ni);
4399 	MWL_HAL_PEERINFO pi;
4400 	uint16_t aid;
4401 	int error;
4402 
4403 	aid = IEEE80211_AID(ni->ni_associd);
4404 	if (isnew) {
4405 		mn->mn_staid = allocstaid(sc, aid);
4406 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4407 	} else {
4408 		mn = MWL_NODE(ni);
4409 		/* XXX reset BA stream? */
4410 	}
4411 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4412 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4413 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4414 	if (error != 0) {
4415 		DPRINTF(sc, MWL_DEBUG_NODE,
4416 		    "%s: error %d creating sta db entry\n",
4417 		    __func__, error);
4418 		/* XXX how to deal with error? */
4419 	}
4420 }
4421 
4422 /*
4423  * Periodically poke the firmware to age out station state
4424  * (power save queues, pending tx aggregates).
4425  */
4426 static void
4427 mwl_agestations(void *arg)
4428 {
4429 	struct mwl_softc *sc = arg;
4430 
4431 	mwl_hal_setkeepalive(sc->sc_mh);
4432 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4433 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4434 }
4435 
4436 static const struct mwl_hal_channel *
4437 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4438 {
4439 	int i;
4440 
4441 	for (i = 0; i < ci->nchannels; i++) {
4442 		const struct mwl_hal_channel *hc = &ci->channels[i];
4443 		if (hc->ieee == ieee)
4444 			return hc;
4445 	}
4446 	return NULL;
4447 }
4448 
4449 static int
4450 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4451 	int nchan, struct ieee80211_channel chans[])
4452 {
4453 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4454 	struct mwl_hal *mh = sc->sc_mh;
4455 	const MWL_HAL_CHANNELINFO *ci;
4456 	int i;
4457 
4458 	for (i = 0; i < nchan; i++) {
4459 		struct ieee80211_channel *c = &chans[i];
4460 		const struct mwl_hal_channel *hc;
4461 
4462 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4463 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4464 			    IEEE80211_IS_CHAN_HT40(c) ?
4465 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4466 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4467 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4468 			    IEEE80211_IS_CHAN_HT40(c) ?
4469 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4470 		} else {
4471 			if_printf(ic->ic_ifp,
4472 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4473 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4474 			return EINVAL;
4475 		}
4476 		/*
4477 		 * Verify channel has cal data and cap tx power.
4478 		 */
4479 		hc = findhalchannel(ci, c->ic_ieee);
4480 		if (hc != NULL) {
4481 			if (c->ic_maxpower > 2*hc->maxTxPow)
4482 				c->ic_maxpower = 2*hc->maxTxPow;
4483 			goto next;
4484 		}
4485 		if (IEEE80211_IS_CHAN_HT40(c)) {
4486 			/*
4487 			 * Look for the extension channel since the
4488 			 * hal table only has the primary channel.
4489 			 */
4490 			hc = findhalchannel(ci, c->ic_extieee);
4491 			if (hc != NULL) {
4492 				if (c->ic_maxpower > 2*hc->maxTxPow)
4493 					c->ic_maxpower = 2*hc->maxTxPow;
4494 				goto next;
4495 			}
4496 		}
4497 		if_printf(ic->ic_ifp,
4498 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4499 		    __func__, c->ic_ieee, c->ic_extieee,
4500 		    c->ic_freq, c->ic_flags);
4501 		return EINVAL;
4502 	next:
4503 		;
4504 	}
4505 	return 0;
4506 }
4507 
4508 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4509 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4510 
4511 static void
4512 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4513 {
4514 	c->ic_freq = freq;
4515 	c->ic_flags = flags;
4516 	c->ic_ieee = ieee;
4517 	c->ic_minpower = 0;
4518 	c->ic_maxpower = 2*txpow;
4519 	c->ic_maxregpower = txpow;
4520 }
4521 
4522 static const struct ieee80211_channel *
4523 findchannel(const struct ieee80211_channel chans[], int nchans,
4524 	int freq, int flags)
4525 {
4526 	const struct ieee80211_channel *c;
4527 	int i;
4528 
4529 	for (i = 0; i < nchans; i++) {
4530 		c = &chans[i];
4531 		if (c->ic_freq == freq && c->ic_flags == flags)
4532 			return c;
4533 	}
4534 	return NULL;
4535 }
4536 
4537 static void
4538 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4539 	const MWL_HAL_CHANNELINFO *ci, int flags)
4540 {
4541 	struct ieee80211_channel *c;
4542 	const struct ieee80211_channel *extc;
4543 	const struct mwl_hal_channel *hc;
4544 	int i;
4545 
4546 	c = &chans[*nchans];
4547 
4548 	flags &= ~IEEE80211_CHAN_HT;
4549 	for (i = 0; i < ci->nchannels; i++) {
4550 		/*
4551 		 * Each entry defines an HT40 channel pair; find the
4552 		 * extension channel above and the insert the pair.
4553 		 */
4554 		hc = &ci->channels[i];
4555 		extc = findchannel(chans, *nchans, hc->freq+20,
4556 		    flags | IEEE80211_CHAN_HT20);
4557 		if (extc != NULL) {
4558 			if (*nchans >= maxchans)
4559 				break;
4560 			addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4561 			    hc->ieee, hc->maxTxPow);
4562 			c->ic_extieee = extc->ic_ieee;
4563 			c++, (*nchans)++;
4564 			if (*nchans >= maxchans)
4565 				break;
4566 			addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4567 			    extc->ic_ieee, hc->maxTxPow);
4568 			c->ic_extieee = hc->ieee;
4569 			c++, (*nchans)++;
4570 		}
4571 	}
4572 }
4573 
4574 static void
4575 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4576 	const MWL_HAL_CHANNELINFO *ci, int flags)
4577 {
4578 	struct ieee80211_channel *c;
4579 	int i;
4580 
4581 	c = &chans[*nchans];
4582 
4583 	for (i = 0; i < ci->nchannels; i++) {
4584 		const struct mwl_hal_channel *hc;
4585 
4586 		hc = &ci->channels[i];
4587 		if (*nchans >= maxchans)
4588 			break;
4589 		addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4590 		c++, (*nchans)++;
4591 		if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4592 			/* g channel have a separate b-only entry */
4593 			if (*nchans >= maxchans)
4594 				break;
4595 			c[0] = c[-1];
4596 			c[-1].ic_flags = IEEE80211_CHAN_B;
4597 			c++, (*nchans)++;
4598 		}
4599 		if (flags == IEEE80211_CHAN_HTG) {
4600 			/* HT g channel have a separate g-only entry */
4601 			if (*nchans >= maxchans)
4602 				break;
4603 			c[-1].ic_flags = IEEE80211_CHAN_G;
4604 			c[0] = c[-1];
4605 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4606 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4607 			c++, (*nchans)++;
4608 		}
4609 		if (flags == IEEE80211_CHAN_HTA) {
4610 			/* HT a channel have a separate a-only entry */
4611 			if (*nchans >= maxchans)
4612 				break;
4613 			c[-1].ic_flags = IEEE80211_CHAN_A;
4614 			c[0] = c[-1];
4615 			c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4616 			c[0].ic_flags |= IEEE80211_CHAN_HT20;	/* HT20 */
4617 			c++, (*nchans)++;
4618 		}
4619 	}
4620 }
4621 
4622 static void
4623 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4624 	struct ieee80211_channel chans[])
4625 {
4626 	const MWL_HAL_CHANNELINFO *ci;
4627 
4628 	/*
4629 	 * Use the channel info from the hal to craft the
4630 	 * channel list.  Note that we pass back an unsorted
4631 	 * list; the caller is required to sort it for us
4632 	 * (if desired).
4633 	 */
4634 	*nchans = 0;
4635 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4636 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4637 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4638 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4639 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4640 		addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4641 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4642 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4643 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4644 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4645 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4646 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4647 }
4648 
4649 static void
4650 mwl_getradiocaps(struct ieee80211com *ic,
4651 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4652 {
4653 	struct mwl_softc *sc = ic->ic_ifp->if_softc;
4654 
4655 	getchannels(sc, maxchans, nchans, chans);
4656 }
4657 
4658 static int
4659 mwl_getchannels(struct mwl_softc *sc)
4660 {
4661 	struct ifnet *ifp = sc->sc_ifp;
4662 	struct ieee80211com *ic = ifp->if_l2com;
4663 
4664 	/*
4665 	 * Use the channel info from the hal to craft the
4666 	 * channel list for net80211.  Note that we pass up
4667 	 * an unsorted list; net80211 will sort it for us.
4668 	 */
4669 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4670 	ic->ic_nchans = 0;
4671 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4672 
4673 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4674 	ic->ic_regdomain.country = CTRY_DEFAULT;
4675 	ic->ic_regdomain.location = 'I';
4676 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4677 	ic->ic_regdomain.isocc[1] = ' ';
4678 	return (ic->ic_nchans == 0 ? EIO : 0);
4679 }
4680 #undef IEEE80211_CHAN_HTA
4681 #undef IEEE80211_CHAN_HTG
4682 
4683 #ifdef MWL_DEBUG
4684 static void
4685 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4686 {
4687 	const struct mwl_rxdesc *ds = bf->bf_desc;
4688 	uint32_t status = le32toh(ds->Status);
4689 
4690 	printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4691 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4692 	    ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4693 	    le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4694 	    ds->RxControl,
4695 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4696 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4697 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4698 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4699 }
4700 
4701 static void
4702 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4703 {
4704 	const struct mwl_txdesc *ds = bf->bf_desc;
4705 	uint32_t status = le32toh(ds->Status);
4706 
4707 	printf("Q%u[%3u]", qnum, ix);
4708 	printf(" (DS.V:%p DS.P:%p)\n",
4709 	    ds, (const struct mwl_txdesc *)bf->bf_daddr);
4710 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4711 	    le32toh(ds->pPhysNext),
4712 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4713 	    status & EAGLE_TXD_STATUS_USED ?
4714 		"" : (status & 3) != 0 ? " *" : " !");
4715 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4716 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4717 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4718 #if MWL_TXDESC > 1
4719 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4720 	    , le32toh(ds->multiframes)
4721 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4722 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4723 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4724 	);
4725 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4726 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4727 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4728 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4729 	);
4730 #endif
4731 #if 0
4732 { const uint8_t *cp = (const uint8_t *) ds;
4733   int i;
4734   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4735 	printf("%02x ", cp[i]);
4736 	if (((i+1) % 16) == 0)
4737 		printf("\n");
4738   }
4739   printf("\n");
4740 }
4741 #endif
4742 }
4743 #endif /* MWL_DEBUG */
4744 
4745 #if 0
4746 static void
4747 mwl_txq_dump(struct mwl_txq *txq)
4748 {
4749 	struct mwl_txbuf *bf;
4750 	int i = 0;
4751 
4752 	MWL_TXQ_LOCK(txq);
4753 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4754 		struct mwl_txdesc *ds = bf->bf_desc;
4755 		MWL_TXDESC_SYNC(txq, ds,
4756 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4757 #ifdef MWL_DEBUG
4758 		mwl_printtxbuf(bf, txq->qnum, i);
4759 #endif
4760 		i++;
4761 	}
4762 	MWL_TXQ_UNLOCK(txq);
4763 }
4764 #endif
4765 
4766 static void
4767 mwl_watchdog(struct ifnet *ifp)
4768 {
4769 	struct mwl_softc *sc = ifp->if_softc;
4770 
4771 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4772 		if (mwl_hal_setkeepalive(sc->sc_mh))
4773 			if_printf(ifp, "transmit timeout (firmware hung?)\n");
4774 		else
4775 			if_printf(ifp, "transmit timeout\n");
4776 #if 0
4777 		mwl_reset(ifp);
4778 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4779 #endif
4780 		ifp->if_oerrors++;
4781 		sc->sc_stats.mst_watchdog++;
4782 	}
4783 }
4784 
4785 #ifdef MWL_DIAGAPI
4786 /*
4787  * Diagnostic interface to the HAL.  This is used by various
4788  * tools to do things like retrieve register contents for
4789  * debugging.  The mechanism is intentionally opaque so that
4790  * it can change frequently w/o concern for compatiblity.
4791  */
4792 static int
4793 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4794 {
4795 	struct mwl_hal *mh = sc->sc_mh;
4796 	u_int id = md->md_id & MWL_DIAG_ID;
4797 	void *indata = NULL;
4798 	void *outdata = NULL;
4799 	u_int32_t insize = md->md_in_size;
4800 	u_int32_t outsize = md->md_out_size;
4801 	int error = 0;
4802 
4803 	if (md->md_id & MWL_DIAG_IN) {
4804 		/*
4805 		 * Copy in data.
4806 		 */
4807 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4808 		if (indata == NULL) {
4809 			error = ENOMEM;
4810 			goto bad;
4811 		}
4812 		error = copyin(md->md_in_data, indata, insize);
4813 		if (error)
4814 			goto bad;
4815 	}
4816 	if (md->md_id & MWL_DIAG_DYN) {
4817 		/*
4818 		 * Allocate a buffer for the results (otherwise the HAL
4819 		 * returns a pointer to a buffer where we can read the
4820 		 * results).  Note that we depend on the HAL leaving this
4821 		 * pointer for us to use below in reclaiming the buffer;
4822 		 * may want to be more defensive.
4823 		 */
4824 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4825 		if (outdata == NULL) {
4826 			error = ENOMEM;
4827 			goto bad;
4828 		}
4829 	}
4830 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4831 		if (outsize < md->md_out_size)
4832 			md->md_out_size = outsize;
4833 		if (outdata != NULL)
4834 			error = copyout(outdata, md->md_out_data,
4835 					md->md_out_size);
4836 	} else {
4837 		error = EINVAL;
4838 	}
4839 bad:
4840 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4841 		free(indata, M_TEMP);
4842 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4843 		free(outdata, M_TEMP);
4844 	return error;
4845 }
4846 
4847 static int
4848 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4849 {
4850 	struct mwl_hal *mh = sc->sc_mh;
4851 	int error;
4852 
4853 	MWL_LOCK_ASSERT(sc);
4854 
4855 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4856 		device_printf(sc->sc_dev, "unable to load firmware\n");
4857 		return EIO;
4858 	}
4859 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4860 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4861 		return EIO;
4862 	}
4863 	error = mwl_setupdma(sc);
4864 	if (error != 0) {
4865 		/* NB: mwl_setupdma prints a msg */
4866 		return error;
4867 	}
4868 	/*
4869 	 * Reset tx/rx data structures; after reload we must
4870 	 * re-start the driver's notion of the next xmit/recv.
4871 	 */
4872 	mwl_draintxq(sc);		/* clear pending frames */
4873 	mwl_resettxq(sc);		/* rebuild tx q lists */
4874 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4875 	return 0;
4876 }
4877 #endif /* MWL_DIAGAPI */
4878 
4879 static int
4880 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4881 {
4882 #define	IS_RUNNING(ifp) \
4883 	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4884 	struct mwl_softc *sc = ifp->if_softc;
4885 	struct ieee80211com *ic = ifp->if_l2com;
4886 	struct ifreq *ifr = (struct ifreq *)data;
4887 	int error = 0, startall;
4888 
4889 	switch (cmd) {
4890 	case SIOCSIFFLAGS:
4891 		MWL_LOCK(sc);
4892 		startall = 0;
4893 		if (IS_RUNNING(ifp)) {
4894 			/*
4895 			 * To avoid rescanning another access point,
4896 			 * do not call mwl_init() here.  Instead,
4897 			 * only reflect promisc mode settings.
4898 			 */
4899 			mwl_mode_init(sc);
4900 		} else if (ifp->if_flags & IFF_UP) {
4901 			/*
4902 			 * Beware of being called during attach/detach
4903 			 * to reset promiscuous mode.  In that case we
4904 			 * will still be marked UP but not RUNNING.
4905 			 * However trying to re-init the interface
4906 			 * is the wrong thing to do as we've already
4907 			 * torn down much of our state.  There's
4908 			 * probably a better way to deal with this.
4909 			 */
4910 			if (!sc->sc_invalid) {
4911 				mwl_init_locked(sc);	/* XXX lose error */
4912 				startall = 1;
4913 			}
4914 		} else
4915 			mwl_stop_locked(ifp, 1);
4916 		MWL_UNLOCK(sc);
4917 		if (startall)
4918 			ieee80211_start_all(ic);
4919 		break;
4920 	case SIOCGMVSTATS:
4921 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4922 		/* NB: embed these numbers to get a consistent view */
4923 		sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4924 		sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4925 		/*
4926 		 * NB: Drop the softc lock in case of a page fault;
4927 		 * we'll accept any potential inconsisentcy in the
4928 		 * statistics.  The alternative is to copy the data
4929 		 * to a local structure.
4930 		 */
4931 		return copyout(&sc->sc_stats,
4932 				ifr->ifr_data, sizeof (sc->sc_stats));
4933 #ifdef MWL_DIAGAPI
4934 	case SIOCGMVDIAG:
4935 		/* XXX check privs */
4936 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4937 	case SIOCGMVRESET:
4938 		/* XXX check privs */
4939 		MWL_LOCK(sc);
4940 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4941 		MWL_UNLOCK(sc);
4942 		break;
4943 #endif /* MWL_DIAGAPI */
4944 	case SIOCGIFMEDIA:
4945 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4946 		break;
4947 	case SIOCGIFADDR:
4948 		error = ether_ioctl(ifp, cmd, data);
4949 		break;
4950 	default:
4951 		error = EINVAL;
4952 		break;
4953 	}
4954 	return error;
4955 #undef IS_RUNNING
4956 }
4957 
4958 #ifdef	MWL_DEBUG
4959 static int
4960 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4961 {
4962 	struct mwl_softc *sc = arg1;
4963 	int debug, error;
4964 
4965 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4966 	error = sysctl_handle_int(oidp, &debug, 0, req);
4967 	if (error || !req->newptr)
4968 		return error;
4969 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4970 	sc->sc_debug = debug & 0x00ffffff;
4971 	return 0;
4972 }
4973 #endif /* MWL_DEBUG */
4974 
4975 static void
4976 mwl_sysctlattach(struct mwl_softc *sc)
4977 {
4978 #ifdef	MWL_DEBUG
4979 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4980 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4981 
4982 	sc->sc_debug = mwl_debug;
4983 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4984 		"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
4985 		mwl_sysctl_debug, "I", "control debugging printfs");
4986 #endif
4987 }
4988 
4989 /*
4990  * Announce various information on device/driver attach.
4991  */
4992 static void
4993 mwl_announce(struct mwl_softc *sc)
4994 {
4995 	struct ifnet *ifp = sc->sc_ifp;
4996 
4997 	if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4998 		sc->sc_hwspecs.hwVersion,
4999 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5000 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5001 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5002 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5003 		sc->sc_hwspecs.regionCode);
5004 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5005 
5006 	if (bootverbose) {
5007 		int i;
5008 		for (i = 0; i <= WME_AC_VO; i++) {
5009 			struct mwl_txq *txq = sc->sc_ac2q[i];
5010 			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5011 				txq->qnum, ieee80211_wme_acnames[i]);
5012 		}
5013 	}
5014 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5015 		if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5016 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5017 		if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5018 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
5019 		if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5020 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5021 		if_printf(ifp, "multi-bss support\n");
5022 #ifdef MWL_TX_NODROP
5023 	if (bootverbose)
5024 		if_printf(ifp, "no tx drop\n");
5025 #endif
5026 }
5027