xref: /freebsd/sys/dev/mwl/if_mwl.c (revision ca389486a9599768e0ba69dca13c208020623083)
1cf4c5a53SSam Leffler /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni  *
4cf4c5a53SSam Leffler  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5cf4c5a53SSam Leffler  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6cf4c5a53SSam Leffler  * All rights reserved.
7cf4c5a53SSam Leffler  *
8cf4c5a53SSam Leffler  * Redistribution and use in source and binary forms, with or without
9cf4c5a53SSam Leffler  * modification, are permitted provided that the following conditions
10cf4c5a53SSam Leffler  * are met:
11cf4c5a53SSam Leffler  * 1. Redistributions of source code must retain the above copyright
12cf4c5a53SSam Leffler  *    notice, this list of conditions and the following disclaimer,
13cf4c5a53SSam Leffler  *    without modification.
14cf4c5a53SSam Leffler  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15cf4c5a53SSam Leffler  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16cf4c5a53SSam Leffler  *    redistribution must be conditioned upon including a substantially
17cf4c5a53SSam Leffler  *    similar Disclaimer requirement for further binary redistribution.
18cf4c5a53SSam Leffler  *
19cf4c5a53SSam Leffler  * NO WARRANTY
20cf4c5a53SSam Leffler  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21cf4c5a53SSam Leffler  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22cf4c5a53SSam Leffler  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23cf4c5a53SSam Leffler  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24cf4c5a53SSam Leffler  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25cf4c5a53SSam Leffler  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26cf4c5a53SSam Leffler  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27cf4c5a53SSam Leffler  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28cf4c5a53SSam Leffler  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29cf4c5a53SSam Leffler  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30cf4c5a53SSam Leffler  * THE POSSIBILITY OF SUCH DAMAGES.
31cf4c5a53SSam Leffler  */
32cf4c5a53SSam Leffler 
33cf4c5a53SSam Leffler #include <sys/cdefs.h>
34cf4c5a53SSam Leffler /*
35cf4c5a53SSam Leffler  * Driver for the Marvell 88W8363 Wireless LAN controller.
36cf4c5a53SSam Leffler  */
37cf4c5a53SSam Leffler 
38cf4c5a53SSam Leffler #include "opt_inet.h"
39cf4c5a53SSam Leffler #include "opt_mwl.h"
40865a6f73SAdrian Chadd #include "opt_wlan.h"
41cf4c5a53SSam Leffler 
42cf4c5a53SSam Leffler #include <sys/param.h>
43cf4c5a53SSam Leffler #include <sys/systm.h>
44cf4c5a53SSam Leffler #include <sys/sysctl.h>
45cf4c5a53SSam Leffler #include <sys/mbuf.h>
46cf4c5a53SSam Leffler #include <sys/malloc.h>
47cf4c5a53SSam Leffler #include <sys/lock.h>
48cf4c5a53SSam Leffler #include <sys/mutex.h>
49cf4c5a53SSam Leffler #include <sys/kernel.h>
50cf4c5a53SSam Leffler #include <sys/socket.h>
51cf4c5a53SSam Leffler #include <sys/sockio.h>
52cf4c5a53SSam Leffler #include <sys/errno.h>
53cf4c5a53SSam Leffler #include <sys/callout.h>
54cf4c5a53SSam Leffler #include <sys/bus.h>
55cf4c5a53SSam Leffler #include <sys/endian.h>
56cf4c5a53SSam Leffler #include <sys/kthread.h>
57cf4c5a53SSam Leffler #include <sys/taskqueue.h>
58cf4c5a53SSam Leffler 
59cf4c5a53SSam Leffler #include <machine/bus.h>
60cf4c5a53SSam Leffler 
61cf4c5a53SSam Leffler #include <net/if.h>
6276039bc8SGleb Smirnoff #include <net/if_var.h>
63cf4c5a53SSam Leffler #include <net/if_dl.h>
64cf4c5a53SSam Leffler #include <net/if_media.h>
65cf4c5a53SSam Leffler #include <net/if_types.h>
66cf4c5a53SSam Leffler #include <net/if_arp.h>
67cf4c5a53SSam Leffler #include <net/ethernet.h>
68cf4c5a53SSam Leffler #include <net/if_llc.h>
69cf4c5a53SSam Leffler 
70cf4c5a53SSam Leffler #include <net/bpf.h>
71cf4c5a53SSam Leffler 
72cf4c5a53SSam Leffler #include <net80211/ieee80211_var.h>
73d6166defSAdrian Chadd #include <net80211/ieee80211_input.h>
74cf4c5a53SSam Leffler #include <net80211/ieee80211_regdomain.h>
75cf4c5a53SSam Leffler 
76cf4c5a53SSam Leffler #ifdef INET
77cf4c5a53SSam Leffler #include <netinet/in.h>
78cf4c5a53SSam Leffler #include <netinet/if_ether.h>
79cf4c5a53SSam Leffler #endif /* INET */
80cf4c5a53SSam Leffler 
81cf4c5a53SSam Leffler #include <dev/mwl/if_mwlvar.h>
82cf4c5a53SSam Leffler #include <dev/mwl/mwldiag.h>
83cf4c5a53SSam Leffler 
84cf4c5a53SSam Leffler static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
85fcd9500fSBernhard Schmidt 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
86fcd9500fSBernhard Schmidt 		    const uint8_t [IEEE80211_ADDR_LEN],
87fcd9500fSBernhard Schmidt 		    const uint8_t [IEEE80211_ADDR_LEN]);
88cf4c5a53SSam Leffler static void	mwl_vap_delete(struct ieee80211vap *);
89cf4c5a53SSam Leffler static int	mwl_setupdma(struct mwl_softc *);
90cf4c5a53SSam Leffler static int	mwl_hal_reset(struct mwl_softc *sc);
917a79cebfSGleb Smirnoff static int	mwl_init(struct mwl_softc *);
927a79cebfSGleb Smirnoff static void	mwl_parent(struct ieee80211com *);
93cf4c5a53SSam Leffler static int	mwl_reset(struct ieee80211vap *, u_long);
947a79cebfSGleb Smirnoff static void	mwl_stop(struct mwl_softc *);
957a79cebfSGleb Smirnoff static void	mwl_start(struct mwl_softc *);
967a79cebfSGleb Smirnoff static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
97cf4c5a53SSam Leffler static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
98cf4c5a53SSam Leffler 			const struct ieee80211_bpf_params *);
9913f2ef16SJustin Hibbits static int	mwl_media_change(if_t);
1007cf545d0SJohn Baldwin static void	mwl_watchdog(void *);
1017a79cebfSGleb Smirnoff static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
102cf4c5a53SSam Leffler static void	mwl_radar_proc(void *, int);
103cf4c5a53SSam Leffler static void	mwl_chanswitch_proc(void *, int);
104cf4c5a53SSam Leffler static void	mwl_bawatchdog_proc(void *, int);
105cf4c5a53SSam Leffler static int	mwl_key_alloc(struct ieee80211vap *,
106cf4c5a53SSam Leffler 			struct ieee80211_key *,
107cf4c5a53SSam Leffler 			ieee80211_keyix *, ieee80211_keyix *);
108cf4c5a53SSam Leffler static int	mwl_key_delete(struct ieee80211vap *,
109cf4c5a53SSam Leffler 			const struct ieee80211_key *);
110bc813c40SAdrian Chadd static int	mwl_key_set(struct ieee80211vap *,
111bc813c40SAdrian Chadd 			const struct ieee80211_key *);
112bc813c40SAdrian Chadd static int	_mwl_key_set(struct ieee80211vap *,
113bc813c40SAdrian Chadd 			const struct ieee80211_key *,
114cf4c5a53SSam Leffler 			const uint8_t mac[IEEE80211_ADDR_LEN]);
115cf4c5a53SSam Leffler static int	mwl_mode_init(struct mwl_softc *);
116272f6adeSGleb Smirnoff static void	mwl_update_mcast(struct ieee80211com *);
117272f6adeSGleb Smirnoff static void	mwl_update_promisc(struct ieee80211com *);
118272f6adeSGleb Smirnoff static void	mwl_updateslot(struct ieee80211com *);
119cf4c5a53SSam Leffler static int	mwl_beacon_setup(struct ieee80211vap *);
120cf4c5a53SSam Leffler static void	mwl_beacon_update(struct ieee80211vap *, int);
121cf4c5a53SSam Leffler #ifdef MWL_HOST_PS_SUPPORT
122cf4c5a53SSam Leffler static void	mwl_update_ps(struct ieee80211vap *, int);
123cf4c5a53SSam Leffler static int	mwl_set_tim(struct ieee80211_node *, int);
124cf4c5a53SSam Leffler #endif
125cf4c5a53SSam Leffler static int	mwl_dma_setup(struct mwl_softc *);
126cf4c5a53SSam Leffler static void	mwl_dma_cleanup(struct mwl_softc *);
127cf4c5a53SSam Leffler static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128cf4c5a53SSam Leffler 		    const uint8_t [IEEE80211_ADDR_LEN]);
129cf4c5a53SSam Leffler static void	mwl_node_cleanup(struct ieee80211_node *);
130cf4c5a53SSam Leffler static void	mwl_node_drain(struct ieee80211_node *);
131cf4c5a53SSam Leffler static void	mwl_node_getsignal(const struct ieee80211_node *,
132cf4c5a53SSam Leffler 			int8_t *, int8_t *);
133cf4c5a53SSam Leffler static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134cf4c5a53SSam Leffler 			struct ieee80211_mimo_info *);
135cf4c5a53SSam Leffler static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136cf4c5a53SSam Leffler static void	mwl_rx_proc(void *, int);
137cf4c5a53SSam Leffler static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138cf4c5a53SSam Leffler static int	mwl_tx_setup(struct mwl_softc *, int, int);
139cf4c5a53SSam Leffler static int	mwl_wme_update(struct ieee80211com *);
140cf4c5a53SSam Leffler static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141cf4c5a53SSam Leffler static void	mwl_tx_cleanup(struct mwl_softc *);
142cf4c5a53SSam Leffler static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143cf4c5a53SSam Leffler static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144cf4c5a53SSam Leffler 			     struct mwl_txbuf *, struct mbuf *);
145cf4c5a53SSam Leffler static void	mwl_tx_proc(void *, int);
146cf4c5a53SSam Leffler static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147cf4c5a53SSam Leffler static void	mwl_draintxq(struct mwl_softc *);
148cf4c5a53SSam Leffler static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
14976340123SSam Leffler static int	mwl_recv_action(struct ieee80211_node *,
15076340123SSam Leffler 			const struct ieee80211_frame *,
151cf4c5a53SSam Leffler 			const uint8_t *, const uint8_t *);
152cf4c5a53SSam Leffler static int	mwl_addba_request(struct ieee80211_node *,
153cf4c5a53SSam Leffler 			struct ieee80211_tx_ampdu *, int dialogtoken,
154cf4c5a53SSam Leffler 			int baparamset, int batimeout);
155cf4c5a53SSam Leffler static int	mwl_addba_response(struct ieee80211_node *,
156cf4c5a53SSam Leffler 			struct ieee80211_tx_ampdu *, int status,
157cf4c5a53SSam Leffler 			int baparamset, int batimeout);
158cf4c5a53SSam Leffler static void	mwl_addba_stop(struct ieee80211_node *,
159cf4c5a53SSam Leffler 			struct ieee80211_tx_ampdu *);
160cf4c5a53SSam Leffler static int	mwl_startrecv(struct mwl_softc *);
161cf4c5a53SSam Leffler static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162cf4c5a53SSam Leffler 			struct ieee80211_channel *);
163cf4c5a53SSam Leffler static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164cf4c5a53SSam Leffler static void	mwl_scan_start(struct ieee80211com *);
165cf4c5a53SSam Leffler static void	mwl_scan_end(struct ieee80211com *);
166cf4c5a53SSam Leffler static void	mwl_set_channel(struct ieee80211com *);
167cf4c5a53SSam Leffler static int	mwl_peerstadb(struct ieee80211_node *,
168cf4c5a53SSam Leffler 			int aid, int staid, MWL_HAL_PEERINFO *pi);
169cf4c5a53SSam Leffler static int	mwl_localstadb(struct ieee80211vap *);
170cf4c5a53SSam Leffler static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171cf4c5a53SSam Leffler static int	allocstaid(struct mwl_softc *sc, int aid);
172cf4c5a53SSam Leffler static void	delstaid(struct mwl_softc *sc, int staid);
173cf4c5a53SSam Leffler static void	mwl_newassoc(struct ieee80211_node *, int);
174cf4c5a53SSam Leffler static void	mwl_agestations(void *);
175cf4c5a53SSam Leffler static int	mwl_setregdomain(struct ieee80211com *,
176cf4c5a53SSam Leffler 			struct ieee80211_regdomain *, int,
177cf4c5a53SSam Leffler 			struct ieee80211_channel []);
178cf4c5a53SSam Leffler static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179cf4c5a53SSam Leffler 			struct ieee80211_channel []);
180cf4c5a53SSam Leffler static int	mwl_getchannels(struct mwl_softc *);
181cf4c5a53SSam Leffler 
182cf4c5a53SSam Leffler static void	mwl_sysctlattach(struct mwl_softc *);
183cf4c5a53SSam Leffler static void	mwl_announce(struct mwl_softc *);
184cf4c5a53SSam Leffler 
1857029da5cSPawel Biernacki SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1867029da5cSPawel Biernacki     "Marvell driver parameters");
187cf4c5a53SSam Leffler 
188cf4c5a53SSam Leffler static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
189cf4c5a53SSam Leffler SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
190cf4c5a53SSam Leffler 	    0, "rx descriptors allocated");
191cf4c5a53SSam Leffler static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
192af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
193cf4c5a53SSam Leffler 	    0, "rx buffers allocated");
194cf4c5a53SSam Leffler static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
195af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
196cf4c5a53SSam Leffler 	    0, "tx buffers allocated");
197cf4c5a53SSam Leffler static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
198af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
199cf4c5a53SSam Leffler 	    0, "tx buffers to send at once");
200cf4c5a53SSam Leffler static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
202cf4c5a53SSam Leffler 	    0, "max rx buffers to process per interrupt");
203cf4c5a53SSam Leffler static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
204af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
205cf4c5a53SSam Leffler 	    0, "min free rx buffers before restarting traffic");
206cf4c5a53SSam Leffler 
207cf4c5a53SSam Leffler #ifdef MWL_DEBUG
208cf4c5a53SSam Leffler static	int mwl_debug = 0;
209af3b2549SHans Petter Selasky SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
210cf4c5a53SSam Leffler 	    0, "control debugging printfs");
211cf4c5a53SSam Leffler enum {
212cf4c5a53SSam Leffler 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
213cf4c5a53SSam Leffler 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
214cf4c5a53SSam Leffler 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
215cf4c5a53SSam Leffler 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
216cf4c5a53SSam Leffler 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
217cf4c5a53SSam Leffler 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
218cf4c5a53SSam Leffler 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
219cf4c5a53SSam Leffler 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
220cf4c5a53SSam Leffler 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
221cf4c5a53SSam Leffler 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
222cf4c5a53SSam Leffler 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
223cf4c5a53SSam Leffler 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
224cf4c5a53SSam Leffler 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
225cf4c5a53SSam Leffler 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
226cf4c5a53SSam Leffler 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
227cf4c5a53SSam Leffler 	MWL_DEBUG_ANY		= 0xffffffff
228cf4c5a53SSam Leffler };
229cf4c5a53SSam Leffler #define	IFF_DUMPPKTS_RECV(sc, wh) \
2307a79cebfSGleb Smirnoff     ((sc->sc_debug & MWL_DEBUG_RECV) && \
231c249cc38SAdrian Chadd       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh)))
232cf4c5a53SSam Leffler #define	IFF_DUMPPKTS_XMIT(sc) \
2337a79cebfSGleb Smirnoff 	(sc->sc_debug & MWL_DEBUG_XMIT)
2347a79cebfSGleb Smirnoff 
235cf4c5a53SSam Leffler #define	DPRINTF(sc, m, fmt, ...) do {				\
236cf4c5a53SSam Leffler 	if (sc->sc_debug & (m))					\
237cf4c5a53SSam Leffler 		printf(fmt, __VA_ARGS__);			\
238cf4c5a53SSam Leffler } while (0)
239cf4c5a53SSam Leffler #define	KEYPRINTF(sc, hk, mac) do {				\
240cf4c5a53SSam Leffler 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
241cf4c5a53SSam Leffler 		mwl_keyprint(sc, __func__, hk, mac);		\
242cf4c5a53SSam Leffler } while (0)
243cf4c5a53SSam Leffler static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
244cf4c5a53SSam Leffler static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
245cf4c5a53SSam Leffler #else
2467a79cebfSGleb Smirnoff #define	IFF_DUMPPKTS_RECV(sc, wh)	0
2477a79cebfSGleb Smirnoff #define	IFF_DUMPPKTS_XMIT(sc)		0
2487a79cebfSGleb Smirnoff #define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
2497a79cebfSGleb Smirnoff #define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
250cf4c5a53SSam Leffler #endif
251cf4c5a53SSam Leffler 
252d745c852SEd Schouten static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
253cf4c5a53SSam Leffler 
254cf4c5a53SSam Leffler /*
255cf4c5a53SSam Leffler  * Each packet has fixed front matter: a 2-byte length
256cf4c5a53SSam Leffler  * of the payload, followed by a 4-address 802.11 header
257cf4c5a53SSam Leffler  * (regardless of the actual header and always w/o any
258cf4c5a53SSam Leffler  * QoS header).  The payload then follows.
259cf4c5a53SSam Leffler  */
260cf4c5a53SSam Leffler struct mwltxrec {
261cf4c5a53SSam Leffler 	uint16_t fwlen;
262cf4c5a53SSam Leffler 	struct ieee80211_frame_addr4 wh;
263cf4c5a53SSam Leffler } __packed;
264cf4c5a53SSam Leffler 
265cf4c5a53SSam Leffler /*
266cf4c5a53SSam Leffler  * Read/Write shorthands for accesses to BAR 0.  Note
267cf4c5a53SSam Leffler  * that all BAR 1 operations are done in the "hal" and
268cf4c5a53SSam Leffler  * there should be no reference to them here.
269cf4c5a53SSam Leffler  */
270d4bd7d16SDimitry Andric #ifdef MWL_DEBUG
271cf4c5a53SSam Leffler static __inline uint32_t
RD4(struct mwl_softc * sc,bus_size_t off)272cf4c5a53SSam Leffler RD4(struct mwl_softc *sc, bus_size_t off)
273cf4c5a53SSam Leffler {
274cf4c5a53SSam Leffler 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
275cf4c5a53SSam Leffler }
276d4bd7d16SDimitry Andric #endif
277cf4c5a53SSam Leffler 
278cf4c5a53SSam Leffler static __inline void
WR4(struct mwl_softc * sc,bus_size_t off,uint32_t val)279cf4c5a53SSam Leffler WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
280cf4c5a53SSam Leffler {
281cf4c5a53SSam Leffler 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
282cf4c5a53SSam Leffler }
283cf4c5a53SSam Leffler 
284cf4c5a53SSam Leffler int
mwl_attach(uint16_t devid,struct mwl_softc * sc)285cf4c5a53SSam Leffler mwl_attach(uint16_t devid, struct mwl_softc *sc)
286cf4c5a53SSam Leffler {
2877a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
288cf4c5a53SSam Leffler 	struct mwl_hal *mh;
289cf4c5a53SSam Leffler 	int error = 0;
290cf4c5a53SSam Leffler 
291cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
292cf4c5a53SSam Leffler 
293baf94755SAdrian Chadd 	/*
294baf94755SAdrian Chadd 	 * Setup the RX free list lock early, so it can be consistently
295baf94755SAdrian Chadd 	 * removed.
296baf94755SAdrian Chadd 	 */
297baf94755SAdrian Chadd 	MWL_RXFREE_INIT(sc);
298baf94755SAdrian Chadd 
299cf4c5a53SSam Leffler 	mh = mwl_hal_attach(sc->sc_dev, devid,
300cf4c5a53SSam Leffler 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
301cf4c5a53SSam Leffler 	if (mh == NULL) {
3027a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to attach HAL\n");
303cf4c5a53SSam Leffler 		error = EIO;
304cf4c5a53SSam Leffler 		goto bad;
305cf4c5a53SSam Leffler 	}
306cf4c5a53SSam Leffler 	sc->sc_mh = mh;
307cf4c5a53SSam Leffler 	/*
308cf4c5a53SSam Leffler 	 * Load firmware so we can get setup.  We arbitrarily
309cf4c5a53SSam Leffler 	 * pick station firmware; we'll re-load firmware as
310cf4c5a53SSam Leffler 	 * needed so setting up the wrong mode isn't a big deal.
311cf4c5a53SSam Leffler 	 */
312cf4c5a53SSam Leffler 	if (mwl_hal_fwload(mh, NULL) != 0) {
3137a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
314cf4c5a53SSam Leffler 		error = EIO;
315cf4c5a53SSam Leffler 		goto bad1;
316cf4c5a53SSam Leffler 	}
317cf4c5a53SSam Leffler 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
3187a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
319cf4c5a53SSam Leffler 		error = EIO;
320cf4c5a53SSam Leffler 		goto bad1;
321cf4c5a53SSam Leffler 	}
322cf4c5a53SSam Leffler 	error = mwl_getchannels(sc);
323cf4c5a53SSam Leffler 	if (error != 0)
324cf4c5a53SSam Leffler 		goto bad1;
325cf4c5a53SSam Leffler 
326cf4c5a53SSam Leffler 	sc->sc_txantenna = 0;		/* h/w default */
327cf4c5a53SSam Leffler 	sc->sc_rxantenna = 0;		/* h/w default */
328cf4c5a53SSam Leffler 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
329cf4c5a53SSam Leffler 	sc->sc_ageinterval = MWL_AGEINTERVAL;
330cf4c5a53SSam Leffler 
331cf4c5a53SSam Leffler 	/*
332cf4c5a53SSam Leffler 	 * Allocate tx+rx descriptors and populate the lists.
333cf4c5a53SSam Leffler 	 * We immediately push the information to the firmware
334cf4c5a53SSam Leffler 	 * as otherwise it gets upset.
335cf4c5a53SSam Leffler 	 */
336cf4c5a53SSam Leffler 	error = mwl_dma_setup(sc);
337cf4c5a53SSam Leffler 	if (error != 0) {
3387a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
3397a79cebfSGleb Smirnoff 		    error);
340cf4c5a53SSam Leffler 		goto bad1;
341cf4c5a53SSam Leffler 	}
342cf4c5a53SSam Leffler 	error = mwl_setupdma(sc);	/* push to firmware */
343cf4c5a53SSam Leffler 	if (error != 0)			/* NB: mwl_setupdma prints msg */
344cf4c5a53SSam Leffler 		goto bad1;
345cf4c5a53SSam Leffler 
346fd90e2edSJung-uk Kim 	callout_init(&sc->sc_timer, 1);
3477cf545d0SJohn Baldwin 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
3487a79cebfSGleb Smirnoff 	mbufq_init(&sc->sc_snd, ifqmaxlen);
349cf4c5a53SSam Leffler 
350cf4c5a53SSam Leffler 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
351cf4c5a53SSam Leffler 		taskqueue_thread_enqueue, &sc->sc_tq);
352cf4c5a53SSam Leffler 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
3537a79cebfSGleb Smirnoff 		"%s taskq", device_get_nameunit(sc->sc_dev));
354cf4c5a53SSam Leffler 
3556c3e93cbSGleb Smirnoff 	NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
356cf4c5a53SSam Leffler 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
357cf4c5a53SSam Leffler 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
358cf4c5a53SSam Leffler 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
359cf4c5a53SSam Leffler 
360cf4c5a53SSam Leffler 	/* NB: insure BK queue is the lowest priority h/w queue */
361cf4c5a53SSam Leffler 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
3627a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev,
3637a79cebfSGleb Smirnoff 		    "unable to setup xmit queue for %s traffic!\n",
364cf4c5a53SSam Leffler 		     ieee80211_wme_acnames[WME_AC_BK]);
365cf4c5a53SSam Leffler 		error = EIO;
366cf4c5a53SSam Leffler 		goto bad2;
367cf4c5a53SSam Leffler 	}
368cf4c5a53SSam Leffler 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
369cf4c5a53SSam Leffler 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
370cf4c5a53SSam Leffler 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
371cf4c5a53SSam Leffler 		/*
372cf4c5a53SSam Leffler 		 * Not enough hardware tx queues to properly do WME;
373cf4c5a53SSam Leffler 		 * just punt and assign them all to the same h/w queue.
374cf4c5a53SSam Leffler 		 * We could do a better job of this if, for example,
375cf4c5a53SSam Leffler 		 * we allocate queues when we switch from station to
376cf4c5a53SSam Leffler 		 * AP mode.
377cf4c5a53SSam Leffler 		 */
378cf4c5a53SSam Leffler 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
379cf4c5a53SSam Leffler 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
380cf4c5a53SSam Leffler 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
381cf4c5a53SSam Leffler 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
382cf4c5a53SSam Leffler 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
383cf4c5a53SSam Leffler 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
384cf4c5a53SSam Leffler 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
385cf4c5a53SSam Leffler 	}
386cf4c5a53SSam Leffler 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
387cf4c5a53SSam Leffler 
38859686fe9SGleb Smirnoff 	ic->ic_softc = sc;
389c8550c02SGleb Smirnoff 	ic->ic_name = device_get_nameunit(sc->sc_dev);
390cf4c5a53SSam Leffler 	/* XXX not right but it's not used anywhere important */
391cf4c5a53SSam Leffler 	ic->ic_phytype = IEEE80211_T_OFDM;
392cf4c5a53SSam Leffler 	ic->ic_opmode = IEEE80211_M_STA;
393cf4c5a53SSam Leffler 	ic->ic_caps =
394cf4c5a53SSam Leffler 		  IEEE80211_C_STA		/* station mode supported */
395cf4c5a53SSam Leffler 		| IEEE80211_C_HOSTAP		/* hostap mode */
396cf4c5a53SSam Leffler 		| IEEE80211_C_MONITOR		/* monitor mode */
397cf4c5a53SSam Leffler #if 0
398cf4c5a53SSam Leffler 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
399cf4c5a53SSam Leffler 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
400cf4c5a53SSam Leffler #endif
40159aa14a9SRui Paulo 		| IEEE80211_C_MBSS		/* mesh point link mode */
402cf4c5a53SSam Leffler 		| IEEE80211_C_WDS		/* WDS supported */
403cf4c5a53SSam Leffler 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
404cf4c5a53SSam Leffler 		| IEEE80211_C_SHSLOT		/* short slot time supported */
405cf4c5a53SSam Leffler 		| IEEE80211_C_WME		/* WME/WMM supported */
406cf4c5a53SSam Leffler 		| IEEE80211_C_BURST		/* xmit bursting supported */
407cf4c5a53SSam Leffler 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
408cf4c5a53SSam Leffler 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
409cf4c5a53SSam Leffler 		| IEEE80211_C_TXFRAG		/* handle tx frags */
410cf4c5a53SSam Leffler 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
411cf4c5a53SSam Leffler 		| IEEE80211_C_DFS		/* DFS supported */
412cf4c5a53SSam Leffler 		;
413cf4c5a53SSam Leffler 
414cf4c5a53SSam Leffler 	ic->ic_htcaps =
415cf4c5a53SSam Leffler 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
416cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
417cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
418cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
419cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
420cf4c5a53SSam Leffler #if MWL_AGGR_SIZE == 7935
421cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
422cf4c5a53SSam Leffler #else
423cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
424cf4c5a53SSam Leffler #endif
425cf4c5a53SSam Leffler #if 0
426cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
427cf4c5a53SSam Leffler 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
428cf4c5a53SSam Leffler #endif
429cf4c5a53SSam Leffler 		/* s/w capabilities */
430cf4c5a53SSam Leffler 		| IEEE80211_HTC_HT		/* HT operation */
431cf4c5a53SSam Leffler 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
432cf4c5a53SSam Leffler 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
433cf4c5a53SSam Leffler 		| IEEE80211_HTC_SMPS		/* SMPS available */
434cf4c5a53SSam Leffler 		;
435cf4c5a53SSam Leffler 
436cf4c5a53SSam Leffler 	/*
437cf4c5a53SSam Leffler 	 * Mark h/w crypto support.
438cf4c5a53SSam Leffler 	 * XXX no way to query h/w support.
439cf4c5a53SSam Leffler 	 */
440cf4c5a53SSam Leffler 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
441cf4c5a53SSam Leffler 			  |  IEEE80211_CRYPTO_AES_CCM
442cf4c5a53SSam Leffler 			  |  IEEE80211_CRYPTO_TKIP
443cf4c5a53SSam Leffler 			  |  IEEE80211_CRYPTO_TKIPMIC
444cf4c5a53SSam Leffler 			  ;
445cf4c5a53SSam Leffler 	/*
446cf4c5a53SSam Leffler 	 * Transmit requires space in the packet for a special
447cf4c5a53SSam Leffler 	 * format transmit record and optional padding between
448cf4c5a53SSam Leffler 	 * this record and the payload.  Ask the net80211 layer
449cf4c5a53SSam Leffler 	 * to arrange this when encapsulating packets so we can
450cf4c5a53SSam Leffler 	 * add it efficiently.
451cf4c5a53SSam Leffler 	 */
452cf4c5a53SSam Leffler 	ic->ic_headroom = sizeof(struct mwltxrec) -
453cf4c5a53SSam Leffler 		sizeof(struct ieee80211_frame);
454cf4c5a53SSam Leffler 
4557a79cebfSGleb Smirnoff 	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
4567a79cebfSGleb Smirnoff 
457cf4c5a53SSam Leffler 	/* call MI attach routine. */
4587a79cebfSGleb Smirnoff 	ieee80211_ifattach(ic);
459cf4c5a53SSam Leffler 	ic->ic_setregdomain = mwl_setregdomain;
460cf4c5a53SSam Leffler 	ic->ic_getradiocaps = mwl_getradiocaps;
461cf4c5a53SSam Leffler 	/* override default methods */
462cf4c5a53SSam Leffler 	ic->ic_raw_xmit = mwl_raw_xmit;
463cf4c5a53SSam Leffler 	ic->ic_newassoc = mwl_newassoc;
464cf4c5a53SSam Leffler 	ic->ic_updateslot = mwl_updateslot;
465cf4c5a53SSam Leffler 	ic->ic_update_mcast = mwl_update_mcast;
466cf4c5a53SSam Leffler 	ic->ic_update_promisc = mwl_update_promisc;
467cf4c5a53SSam Leffler 	ic->ic_wme.wme_update = mwl_wme_update;
4687a79cebfSGleb Smirnoff 	ic->ic_transmit = mwl_transmit;
4697a79cebfSGleb Smirnoff 	ic->ic_ioctl = mwl_ioctl;
4707a79cebfSGleb Smirnoff 	ic->ic_parent = mwl_parent;
471cf4c5a53SSam Leffler 
472cf4c5a53SSam Leffler 	ic->ic_node_alloc = mwl_node_alloc;
473cf4c5a53SSam Leffler 	sc->sc_node_cleanup = ic->ic_node_cleanup;
474cf4c5a53SSam Leffler 	ic->ic_node_cleanup = mwl_node_cleanup;
475cf4c5a53SSam Leffler 	sc->sc_node_drain = ic->ic_node_drain;
476cf4c5a53SSam Leffler 	ic->ic_node_drain = mwl_node_drain;
477cf4c5a53SSam Leffler 	ic->ic_node_getsignal = mwl_node_getsignal;
478cf4c5a53SSam Leffler 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
479cf4c5a53SSam Leffler 
480cf4c5a53SSam Leffler 	ic->ic_scan_start = mwl_scan_start;
481cf4c5a53SSam Leffler 	ic->ic_scan_end = mwl_scan_end;
482cf4c5a53SSam Leffler 	ic->ic_set_channel = mwl_set_channel;
483cf4c5a53SSam Leffler 
484cf4c5a53SSam Leffler 	sc->sc_recv_action = ic->ic_recv_action;
485cf4c5a53SSam Leffler 	ic->ic_recv_action = mwl_recv_action;
486cf4c5a53SSam Leffler 	sc->sc_addba_request = ic->ic_addba_request;
487cf4c5a53SSam Leffler 	ic->ic_addba_request = mwl_addba_request;
488cf4c5a53SSam Leffler 	sc->sc_addba_response = ic->ic_addba_response;
489cf4c5a53SSam Leffler 	ic->ic_addba_response = mwl_addba_response;
490cf4c5a53SSam Leffler 	sc->sc_addba_stop = ic->ic_addba_stop;
491cf4c5a53SSam Leffler 	ic->ic_addba_stop = mwl_addba_stop;
492cf4c5a53SSam Leffler 
493cf4c5a53SSam Leffler 	ic->ic_vap_create = mwl_vap_create;
494cf4c5a53SSam Leffler 	ic->ic_vap_delete = mwl_vap_delete;
495cf4c5a53SSam Leffler 
496cf4c5a53SSam Leffler 	ieee80211_radiotap_attach(ic,
497cf4c5a53SSam Leffler 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
498cf4c5a53SSam Leffler 		MWL_TX_RADIOTAP_PRESENT,
499cf4c5a53SSam Leffler 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
500cf4c5a53SSam Leffler 		MWL_RX_RADIOTAP_PRESENT);
501cf4c5a53SSam Leffler 	/*
502cf4c5a53SSam Leffler 	 * Setup dynamic sysctl's now that country code and
503cf4c5a53SSam Leffler 	 * regdomain are available from the hal.
504cf4c5a53SSam Leffler 	 */
505cf4c5a53SSam Leffler 	mwl_sysctlattach(sc);
506cf4c5a53SSam Leffler 
507cf4c5a53SSam Leffler 	if (bootverbose)
508cf4c5a53SSam Leffler 		ieee80211_announce(ic);
509cf4c5a53SSam Leffler 	mwl_announce(sc);
510cf4c5a53SSam Leffler 	return 0;
511cf4c5a53SSam Leffler bad2:
512cf4c5a53SSam Leffler 	mwl_dma_cleanup(sc);
513cf4c5a53SSam Leffler bad1:
514cf4c5a53SSam Leffler 	mwl_hal_detach(mh);
515cf4c5a53SSam Leffler bad:
516baf94755SAdrian Chadd 	MWL_RXFREE_DESTROY(sc);
517cf4c5a53SSam Leffler 	sc->sc_invalid = 1;
518cf4c5a53SSam Leffler 	return error;
519cf4c5a53SSam Leffler }
520cf4c5a53SSam Leffler 
521cf4c5a53SSam Leffler int
mwl_detach(struct mwl_softc * sc)522cf4c5a53SSam Leffler mwl_detach(struct mwl_softc *sc)
523cf4c5a53SSam Leffler {
5247a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
525cf4c5a53SSam Leffler 
5267a79cebfSGleb Smirnoff 	MWL_LOCK(sc);
5277a79cebfSGleb Smirnoff 	mwl_stop(sc);
5287a79cebfSGleb Smirnoff 	MWL_UNLOCK(sc);
529cf4c5a53SSam Leffler 	/*
530cf4c5a53SSam Leffler 	 * NB: the order of these is important:
531cf4c5a53SSam Leffler 	 * o call the 802.11 layer before detaching the hal to
532cf4c5a53SSam Leffler 	 *   insure callbacks into the driver to delete global
533cf4c5a53SSam Leffler 	 *   key cache entries can be handled
534cf4c5a53SSam Leffler 	 * o reclaim the tx queue data structures after calling
535cf4c5a53SSam Leffler 	 *   the 802.11 layer as we'll get called back to reclaim
536cf4c5a53SSam Leffler 	 *   node state and potentially want to use them
537cf4c5a53SSam Leffler 	 * o to cleanup the tx queues the hal is called, so detach
538cf4c5a53SSam Leffler 	 *   it last
539cf4c5a53SSam Leffler 	 * Other than that, it's straightforward...
540cf4c5a53SSam Leffler 	 */
541cf4c5a53SSam Leffler 	ieee80211_ifdetach(ic);
5427cf545d0SJohn Baldwin 	callout_drain(&sc->sc_watchdog);
543cf4c5a53SSam Leffler 	mwl_dma_cleanup(sc);
544baf94755SAdrian Chadd 	MWL_RXFREE_DESTROY(sc);
545cf4c5a53SSam Leffler 	mwl_tx_cleanup(sc);
546cf4c5a53SSam Leffler 	mwl_hal_detach(sc->sc_mh);
5477a79cebfSGleb Smirnoff 	mbufq_drain(&sc->sc_snd);
548cf4c5a53SSam Leffler 
549cf4c5a53SSam Leffler 	return 0;
550cf4c5a53SSam Leffler }
551cf4c5a53SSam Leffler 
552cf4c5a53SSam Leffler /*
553cf4c5a53SSam Leffler  * MAC address handling for multiple BSS on the same radio.
554cf4c5a53SSam Leffler  * The first vap uses the MAC address from the EEPROM.  For
555cf4c5a53SSam Leffler  * subsequent vap's we set the U/L bit (bit 1) in the MAC
556cf4c5a53SSam Leffler  * address and use the next six bits as an index.
557cf4c5a53SSam Leffler  */
558cf4c5a53SSam Leffler static void
assign_address(struct mwl_softc * sc,uint8_t mac[IEEE80211_ADDR_LEN],int clone)559cf4c5a53SSam Leffler assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
560cf4c5a53SSam Leffler {
561cf4c5a53SSam Leffler 	int i;
562cf4c5a53SSam Leffler 
563cf4c5a53SSam Leffler 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
564cf4c5a53SSam Leffler 		/* NB: we only do this if h/w supports multiple bssid */
565cf4c5a53SSam Leffler 		for (i = 0; i < 32; i++)
566cf4c5a53SSam Leffler 			if ((sc->sc_bssidmask & (1<<i)) == 0)
567cf4c5a53SSam Leffler 				break;
568cf4c5a53SSam Leffler 		if (i != 0)
569cf4c5a53SSam Leffler 			mac[0] |= (i << 2)|0x2;
570cf4c5a53SSam Leffler 	} else
571cf4c5a53SSam Leffler 		i = 0;
572cf4c5a53SSam Leffler 	sc->sc_bssidmask |= 1<<i;
573cf4c5a53SSam Leffler 	if (i == 0)
574cf4c5a53SSam Leffler 		sc->sc_nbssid0++;
575cf4c5a53SSam Leffler }
576cf4c5a53SSam Leffler 
577cf4c5a53SSam Leffler static void
reclaim_address(struct mwl_softc * sc,const uint8_t mac[IEEE80211_ADDR_LEN])5787a79cebfSGleb Smirnoff reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
579cf4c5a53SSam Leffler {
580cf4c5a53SSam Leffler 	int i = mac[0] >> 2;
581cf4c5a53SSam Leffler 	if (i != 0 || --sc->sc_nbssid0 == 0)
582cf4c5a53SSam Leffler 		sc->sc_bssidmask &= ~(1<<i);
583cf4c5a53SSam Leffler }
584cf4c5a53SSam Leffler 
585cf4c5a53SSam Leffler static struct ieee80211vap *
mwl_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac0[IEEE80211_ADDR_LEN])586fcd9500fSBernhard Schmidt mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
587fcd9500fSBernhard Schmidt     enum ieee80211_opmode opmode, int flags,
588cf4c5a53SSam Leffler     const uint8_t bssid[IEEE80211_ADDR_LEN],
589cf4c5a53SSam Leffler     const uint8_t mac0[IEEE80211_ADDR_LEN])
590cf4c5a53SSam Leffler {
5917a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
592cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
593cf4c5a53SSam Leffler 	struct ieee80211vap *vap, *apvap;
594cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap;
595cf4c5a53SSam Leffler 	struct mwl_vap *mvp;
596cf4c5a53SSam Leffler 	uint8_t mac[IEEE80211_ADDR_LEN];
597cf4c5a53SSam Leffler 
598cf4c5a53SSam Leffler 	IEEE80211_ADDR_COPY(mac, mac0);
599cf4c5a53SSam Leffler 	switch (opmode) {
600cf4c5a53SSam Leffler 	case IEEE80211_M_HOSTAP:
60159aa14a9SRui Paulo 	case IEEE80211_M_MBSS:
602cf4c5a53SSam Leffler 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
603cf4c5a53SSam Leffler 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
604cf4c5a53SSam Leffler 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
605cf4c5a53SSam Leffler 		if (hvap == NULL) {
606cf4c5a53SSam Leffler 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
607cf4c5a53SSam Leffler 				reclaim_address(sc, mac);
608cf4c5a53SSam Leffler 			return NULL;
609cf4c5a53SSam Leffler 		}
610cf4c5a53SSam Leffler 		break;
611cf4c5a53SSam Leffler 	case IEEE80211_M_STA:
612cf4c5a53SSam Leffler 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
613cf4c5a53SSam Leffler 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
614cf4c5a53SSam Leffler 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
615cf4c5a53SSam Leffler 		if (hvap == NULL) {
616cf4c5a53SSam Leffler 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
617cf4c5a53SSam Leffler 				reclaim_address(sc, mac);
618cf4c5a53SSam Leffler 			return NULL;
619cf4c5a53SSam Leffler 		}
620cf4c5a53SSam Leffler 		/* no h/w beacon miss support; always use s/w */
621cf4c5a53SSam Leffler 		flags |= IEEE80211_CLONE_NOBEACONS;
622cf4c5a53SSam Leffler 		break;
623cf4c5a53SSam Leffler 	case IEEE80211_M_WDS:
624cf4c5a53SSam Leffler 		hvap = NULL;		/* NB: we use associated AP vap */
625cf4c5a53SSam Leffler 		if (sc->sc_napvaps == 0)
626cf4c5a53SSam Leffler 			return NULL;	/* no existing AP vap */
627cf4c5a53SSam Leffler 		break;
628cf4c5a53SSam Leffler 	case IEEE80211_M_MONITOR:
629cf4c5a53SSam Leffler 		hvap = NULL;
630cf4c5a53SSam Leffler 		break;
631cf4c5a53SSam Leffler 	case IEEE80211_M_IBSS:
632cf4c5a53SSam Leffler 	case IEEE80211_M_AHDEMO:
633cf4c5a53SSam Leffler 	default:
634cf4c5a53SSam Leffler 		return NULL;
635cf4c5a53SSam Leffler 	}
636cf4c5a53SSam Leffler 
6377a79cebfSGleb Smirnoff 	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
638cf4c5a53SSam Leffler 	mvp->mv_hvap = hvap;
639cf4c5a53SSam Leffler 	if (opmode == IEEE80211_M_WDS) {
640cf4c5a53SSam Leffler 		/*
641cf4c5a53SSam Leffler 		 * WDS vaps must have an associated AP vap; find one.
642cf4c5a53SSam Leffler 		 * XXX not right.
643cf4c5a53SSam Leffler 		 */
644cf4c5a53SSam Leffler 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
645cf4c5a53SSam Leffler 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
646cf4c5a53SSam Leffler 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
647cf4c5a53SSam Leffler 				break;
648cf4c5a53SSam Leffler 			}
649cf4c5a53SSam Leffler 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
650cf4c5a53SSam Leffler 	}
651cf4c5a53SSam Leffler 	vap = &mvp->mv_vap;
6527a79cebfSGleb Smirnoff 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
653cf4c5a53SSam Leffler 	/* override with driver methods */
654cf4c5a53SSam Leffler 	mvp->mv_newstate = vap->iv_newstate;
655cf4c5a53SSam Leffler 	vap->iv_newstate = mwl_newstate;
656cf4c5a53SSam Leffler 	vap->iv_max_keyix = 0;	/* XXX */
657cf4c5a53SSam Leffler 	vap->iv_key_alloc = mwl_key_alloc;
658cf4c5a53SSam Leffler 	vap->iv_key_delete = mwl_key_delete;
659cf4c5a53SSam Leffler 	vap->iv_key_set = mwl_key_set;
660cf4c5a53SSam Leffler #ifdef MWL_HOST_PS_SUPPORT
66159aa14a9SRui Paulo 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
662cf4c5a53SSam Leffler 		vap->iv_update_ps = mwl_update_ps;
663cf4c5a53SSam Leffler 		mvp->mv_set_tim = vap->iv_set_tim;
664cf4c5a53SSam Leffler 		vap->iv_set_tim = mwl_set_tim;
665cf4c5a53SSam Leffler 	}
666cf4c5a53SSam Leffler #endif
667cf4c5a53SSam Leffler 	vap->iv_reset = mwl_reset;
668cf4c5a53SSam Leffler 	vap->iv_update_beacon = mwl_beacon_update;
669cf4c5a53SSam Leffler 
670cf4c5a53SSam Leffler 	/* override max aid so sta's cannot assoc when we're out of sta id's */
671cf4c5a53SSam Leffler 	vap->iv_max_aid = MWL_MAXSTAID;
672cf4c5a53SSam Leffler 	/* override default A-MPDU rx parameters */
673cf4c5a53SSam Leffler 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
674cf4c5a53SSam Leffler 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
675cf4c5a53SSam Leffler 
676cf4c5a53SSam Leffler 	/* complete setup */
6777a79cebfSGleb Smirnoff 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
6787a79cebfSGleb Smirnoff 	    mac);
679cf4c5a53SSam Leffler 
680cf4c5a53SSam Leffler 	switch (vap->iv_opmode) {
681cf4c5a53SSam Leffler 	case IEEE80211_M_HOSTAP:
68259aa14a9SRui Paulo 	case IEEE80211_M_MBSS:
683cf4c5a53SSam Leffler 	case IEEE80211_M_STA:
684cf4c5a53SSam Leffler 		/*
685cf4c5a53SSam Leffler 		 * Setup sta db entry for local address.
686cf4c5a53SSam Leffler 		 */
687cf4c5a53SSam Leffler 		mwl_localstadb(vap);
68859aa14a9SRui Paulo 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
68959aa14a9SRui Paulo 		    vap->iv_opmode == IEEE80211_M_MBSS)
690cf4c5a53SSam Leffler 			sc->sc_napvaps++;
691cf4c5a53SSam Leffler 		else
692cf4c5a53SSam Leffler 			sc->sc_nstavaps++;
693cf4c5a53SSam Leffler 		break;
694cf4c5a53SSam Leffler 	case IEEE80211_M_WDS:
695cf4c5a53SSam Leffler 		sc->sc_nwdsvaps++;
696cf4c5a53SSam Leffler 		break;
697cf4c5a53SSam Leffler 	default:
698cf4c5a53SSam Leffler 		break;
699cf4c5a53SSam Leffler 	}
700cf4c5a53SSam Leffler 	/*
701cf4c5a53SSam Leffler 	 * Setup overall operating mode.
702cf4c5a53SSam Leffler 	 */
703cf4c5a53SSam Leffler 	if (sc->sc_napvaps)
704cf4c5a53SSam Leffler 		ic->ic_opmode = IEEE80211_M_HOSTAP;
705cf4c5a53SSam Leffler 	else if (sc->sc_nstavaps)
706cf4c5a53SSam Leffler 		ic->ic_opmode = IEEE80211_M_STA;
707cf4c5a53SSam Leffler 	else
708cf4c5a53SSam Leffler 		ic->ic_opmode = opmode;
709cf4c5a53SSam Leffler 
710cf4c5a53SSam Leffler 	return vap;
711cf4c5a53SSam Leffler }
712cf4c5a53SSam Leffler 
713cf4c5a53SSam Leffler static void
mwl_vap_delete(struct ieee80211vap * vap)714cf4c5a53SSam Leffler mwl_vap_delete(struct ieee80211vap *vap)
715cf4c5a53SSam Leffler {
716cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
7177a79cebfSGleb Smirnoff 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
718cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
719cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
720cf4c5a53SSam Leffler 	enum ieee80211_opmode opmode = vap->iv_opmode;
721cf4c5a53SSam Leffler 
722cf4c5a53SSam Leffler 	/* XXX disallow ap vap delete if WDS still present */
7237a79cebfSGleb Smirnoff 	if (sc->sc_running) {
724cf4c5a53SSam Leffler 		/* quiesce h/w while we remove the vap */
725cf4c5a53SSam Leffler 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
726cf4c5a53SSam Leffler 	}
727cf4c5a53SSam Leffler 	ieee80211_vap_detach(vap);
728cf4c5a53SSam Leffler 	switch (opmode) {
729cf4c5a53SSam Leffler 	case IEEE80211_M_HOSTAP:
73059aa14a9SRui Paulo 	case IEEE80211_M_MBSS:
731cf4c5a53SSam Leffler 	case IEEE80211_M_STA:
732cf4c5a53SSam Leffler 		KASSERT(hvap != NULL, ("no hal vap handle"));
733cf4c5a53SSam Leffler 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
734cf4c5a53SSam Leffler 		mwl_hal_delvap(hvap);
73559aa14a9SRui Paulo 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
736cf4c5a53SSam Leffler 			sc->sc_napvaps--;
737cf4c5a53SSam Leffler 		else
738cf4c5a53SSam Leffler 			sc->sc_nstavaps--;
739cf4c5a53SSam Leffler 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
740cf4c5a53SSam Leffler 		reclaim_address(sc, vap->iv_myaddr);
741cf4c5a53SSam Leffler 		break;
742cf4c5a53SSam Leffler 	case IEEE80211_M_WDS:
743cf4c5a53SSam Leffler 		sc->sc_nwdsvaps--;
744cf4c5a53SSam Leffler 		break;
745cf4c5a53SSam Leffler 	default:
746cf4c5a53SSam Leffler 		break;
747cf4c5a53SSam Leffler 	}
748cf4c5a53SSam Leffler 	mwl_cleartxq(sc, vap);
749cf4c5a53SSam Leffler 	free(mvp, M_80211_VAP);
7507a79cebfSGleb Smirnoff 	if (sc->sc_running)
751cf4c5a53SSam Leffler 		mwl_hal_intrset(mh, sc->sc_imask);
752cf4c5a53SSam Leffler }
753cf4c5a53SSam Leffler 
754cf4c5a53SSam Leffler void
mwl_suspend(struct mwl_softc * sc)755cf4c5a53SSam Leffler mwl_suspend(struct mwl_softc *sc)
756cf4c5a53SSam Leffler {
757cf4c5a53SSam Leffler 
7587a79cebfSGleb Smirnoff 	MWL_LOCK(sc);
7597a79cebfSGleb Smirnoff 	mwl_stop(sc);
7607a79cebfSGleb Smirnoff 	MWL_UNLOCK(sc);
761cf4c5a53SSam Leffler }
762cf4c5a53SSam Leffler 
763cf4c5a53SSam Leffler void
mwl_resume(struct mwl_softc * sc)764cf4c5a53SSam Leffler mwl_resume(struct mwl_softc *sc)
765cf4c5a53SSam Leffler {
7667a79cebfSGleb Smirnoff 	int error = EDOOFUS;
767cf4c5a53SSam Leffler 
7687a79cebfSGleb Smirnoff 	MWL_LOCK(sc);
7697a79cebfSGleb Smirnoff 	if (sc->sc_ic.ic_nrunning > 0)
7707a79cebfSGleb Smirnoff 		error = mwl_init(sc);
7717a79cebfSGleb Smirnoff 	MWL_UNLOCK(sc);
772cf4c5a53SSam Leffler 
7737a79cebfSGleb Smirnoff 	if (error == 0)
7747a79cebfSGleb Smirnoff 		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
775cf4c5a53SSam Leffler }
776cf4c5a53SSam Leffler 
777cf4c5a53SSam Leffler void
mwl_shutdown(void * arg)778cf4c5a53SSam Leffler mwl_shutdown(void *arg)
779cf4c5a53SSam Leffler {
780cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
781cf4c5a53SSam Leffler 
7827a79cebfSGleb Smirnoff 	MWL_LOCK(sc);
7837a79cebfSGleb Smirnoff 	mwl_stop(sc);
7847a79cebfSGleb Smirnoff 	MWL_UNLOCK(sc);
785cf4c5a53SSam Leffler }
786cf4c5a53SSam Leffler 
787cf4c5a53SSam Leffler /*
788cf4c5a53SSam Leffler  * Interrupt handler.  Most of the actual processing is deferred.
789cf4c5a53SSam Leffler  */
790cf4c5a53SSam Leffler void
mwl_intr(void * arg)791cf4c5a53SSam Leffler mwl_intr(void *arg)
792cf4c5a53SSam Leffler {
793cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
794cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
795cf4c5a53SSam Leffler 	uint32_t status;
796cf4c5a53SSam Leffler 
797cf4c5a53SSam Leffler 	if (sc->sc_invalid) {
798cf4c5a53SSam Leffler 		/*
799cf4c5a53SSam Leffler 		 * The hardware is not ready/present, don't touch anything.
800cf4c5a53SSam Leffler 		 * Note this can happen early on if the IRQ is shared.
801cf4c5a53SSam Leffler 		 */
802cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
803cf4c5a53SSam Leffler 		return;
804cf4c5a53SSam Leffler 	}
805cf4c5a53SSam Leffler 	/*
806cf4c5a53SSam Leffler 	 * Figure out the reason(s) for the interrupt.
807cf4c5a53SSam Leffler 	 */
808cf4c5a53SSam Leffler 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
809cf4c5a53SSam Leffler 	if (status == 0)			/* must be a shared irq */
810cf4c5a53SSam Leffler 		return;
811cf4c5a53SSam Leffler 
812cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
813cf4c5a53SSam Leffler 	    __func__, status, sc->sc_imask);
814cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
815cf4c5a53SSam Leffler 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
816cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
817cf4c5a53SSam Leffler 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
818cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
819cf4c5a53SSam Leffler 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
820cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
821cf4c5a53SSam Leffler 		mwl_hal_cmddone(mh);
822cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
823cf4c5a53SSam Leffler 		;
824cf4c5a53SSam Leffler 	}
825cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
826cf4c5a53SSam Leffler 		/* TKIP ICV error */
827cf4c5a53SSam Leffler 		sc->sc_stats.mst_rx_badtkipicv++;
828cf4c5a53SSam Leffler 	}
829cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
830cf4c5a53SSam Leffler 		/* 11n aggregation queue is empty, re-fill */
831cf4c5a53SSam Leffler 		;
832cf4c5a53SSam Leffler 	}
833cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
834cf4c5a53SSam Leffler 		;
835cf4c5a53SSam Leffler 	}
836cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
837cf4c5a53SSam Leffler 		/* radar detected, process event */
838cf4c5a53SSam Leffler 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
839cf4c5a53SSam Leffler 	}
840cf4c5a53SSam Leffler 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
841cf4c5a53SSam Leffler 		/* DFS channel switch */
842cf4c5a53SSam Leffler 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
843cf4c5a53SSam Leffler 	}
844cf4c5a53SSam Leffler }
845cf4c5a53SSam Leffler 
846cf4c5a53SSam Leffler static void
mwl_radar_proc(void * arg,int pending)847cf4c5a53SSam Leffler mwl_radar_proc(void *arg, int pending)
848cf4c5a53SSam Leffler {
849cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
8507a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
851cf4c5a53SSam Leffler 
852cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
853cf4c5a53SSam Leffler 	    __func__, pending);
854cf4c5a53SSam Leffler 
855cf4c5a53SSam Leffler 	sc->sc_stats.mst_radardetect++;
8567850fa71SSam Leffler 	/* XXX stop h/w BA streams? */
857cf4c5a53SSam Leffler 
858cf4c5a53SSam Leffler 	IEEE80211_LOCK(ic);
859cf4c5a53SSam Leffler 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
860cf4c5a53SSam Leffler 	IEEE80211_UNLOCK(ic);
861cf4c5a53SSam Leffler }
862cf4c5a53SSam Leffler 
863cf4c5a53SSam Leffler static void
mwl_chanswitch_proc(void * arg,int pending)864cf4c5a53SSam Leffler mwl_chanswitch_proc(void *arg, int pending)
865cf4c5a53SSam Leffler {
866cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
8677a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
868cf4c5a53SSam Leffler 
869cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
870cf4c5a53SSam Leffler 	    __func__, pending);
871cf4c5a53SSam Leffler 
872cf4c5a53SSam Leffler 	IEEE80211_LOCK(ic);
873cf4c5a53SSam Leffler 	sc->sc_csapending = 0;
874cf4c5a53SSam Leffler 	ieee80211_csa_completeswitch(ic);
875cf4c5a53SSam Leffler 	IEEE80211_UNLOCK(ic);
876cf4c5a53SSam Leffler }
877cf4c5a53SSam Leffler 
878cf4c5a53SSam Leffler static void
mwl_bawatchdog(const MWL_HAL_BASTREAM * sp)879cf4c5a53SSam Leffler mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
880cf4c5a53SSam Leffler {
881cf4c5a53SSam Leffler 	struct ieee80211_node *ni = sp->data[0];
882cf4c5a53SSam Leffler 
883cf4c5a53SSam Leffler 	/* send DELBA and drop the stream */
884cf4c5a53SSam Leffler 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
885cf4c5a53SSam Leffler }
886cf4c5a53SSam Leffler 
887cf4c5a53SSam Leffler static void
mwl_bawatchdog_proc(void * arg,int pending)888cf4c5a53SSam Leffler mwl_bawatchdog_proc(void *arg, int pending)
889cf4c5a53SSam Leffler {
890cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
891cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
892cf4c5a53SSam Leffler 	const MWL_HAL_BASTREAM *sp;
893cf4c5a53SSam Leffler 	uint8_t bitmap, n;
894cf4c5a53SSam Leffler 
895cf4c5a53SSam Leffler 	sc->sc_stats.mst_bawatchdog++;
896cf4c5a53SSam Leffler 
897cf4c5a53SSam Leffler 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
898cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU,
899cf4c5a53SSam Leffler 		    "%s: could not get bitmap\n", __func__);
900cf4c5a53SSam Leffler 		sc->sc_stats.mst_bawatchdog_failed++;
901cf4c5a53SSam Leffler 		return;
902cf4c5a53SSam Leffler 	}
903cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
904cf4c5a53SSam Leffler 	if (bitmap == 0xff) {
905cf4c5a53SSam Leffler 		n = 0;
906cf4c5a53SSam Leffler 		/* disable all ba streams */
907cf4c5a53SSam Leffler 		for (bitmap = 0; bitmap < 8; bitmap++) {
908cf4c5a53SSam Leffler 			sp = mwl_hal_bastream_lookup(mh, bitmap);
909cf4c5a53SSam Leffler 			if (sp != NULL) {
910cf4c5a53SSam Leffler 				mwl_bawatchdog(sp);
911cf4c5a53SSam Leffler 				n++;
912cf4c5a53SSam Leffler 			}
913cf4c5a53SSam Leffler 		}
914cf4c5a53SSam Leffler 		if (n == 0) {
915cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_AMPDU,
916cf4c5a53SSam Leffler 			    "%s: no BA streams found\n", __func__);
917cf4c5a53SSam Leffler 			sc->sc_stats.mst_bawatchdog_empty++;
918cf4c5a53SSam Leffler 		}
919cf4c5a53SSam Leffler 	} else if (bitmap != 0xaa) {
920cf4c5a53SSam Leffler 		/* disable a single ba stream */
921cf4c5a53SSam Leffler 		sp = mwl_hal_bastream_lookup(mh, bitmap);
922cf4c5a53SSam Leffler 		if (sp != NULL) {
923cf4c5a53SSam Leffler 			mwl_bawatchdog(sp);
924cf4c5a53SSam Leffler 		} else {
925cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_AMPDU,
926cf4c5a53SSam Leffler 			    "%s: no BA stream %d\n", __func__, bitmap);
927cf4c5a53SSam Leffler 			sc->sc_stats.mst_bawatchdog_notfound++;
928cf4c5a53SSam Leffler 		}
929cf4c5a53SSam Leffler 	}
930cf4c5a53SSam Leffler }
931cf4c5a53SSam Leffler 
932cf4c5a53SSam Leffler /*
933cf4c5a53SSam Leffler  * Convert net80211 channel to a HAL channel.
934cf4c5a53SSam Leffler  */
935cf4c5a53SSam Leffler static void
mwl_mapchan(MWL_HAL_CHANNEL * hc,const struct ieee80211_channel * chan)936cf4c5a53SSam Leffler mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
937cf4c5a53SSam Leffler {
938cf4c5a53SSam Leffler 	hc->channel = chan->ic_ieee;
939cf4c5a53SSam Leffler 
940cf4c5a53SSam Leffler 	*(uint32_t *)&hc->channelFlags = 0;
941cf4c5a53SSam Leffler 	if (IEEE80211_IS_CHAN_2GHZ(chan))
942cf4c5a53SSam Leffler 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
943cf4c5a53SSam Leffler 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
944cf4c5a53SSam Leffler 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
945cf4c5a53SSam Leffler 	if (IEEE80211_IS_CHAN_HT40(chan)) {
946cf4c5a53SSam Leffler 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
947cf4c5a53SSam Leffler 		if (IEEE80211_IS_CHAN_HT40U(chan))
948cf4c5a53SSam Leffler 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
949cf4c5a53SSam Leffler 		else
950cf4c5a53SSam Leffler 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
951cf4c5a53SSam Leffler 	} else
952cf4c5a53SSam Leffler 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
953cf4c5a53SSam Leffler 	/* XXX 10MHz channels */
954cf4c5a53SSam Leffler }
955cf4c5a53SSam Leffler 
956cf4c5a53SSam Leffler /*
957cf4c5a53SSam Leffler  * Inform firmware of our tx/rx dma setup.  The BAR 0
958cf4c5a53SSam Leffler  * writes below are for compatibility with older firmware.
959cf4c5a53SSam Leffler  * For current firmware we send this information with a
960cf4c5a53SSam Leffler  * cmd block via mwl_hal_sethwdma.
961cf4c5a53SSam Leffler  */
962cf4c5a53SSam Leffler static int
mwl_setupdma(struct mwl_softc * sc)963cf4c5a53SSam Leffler mwl_setupdma(struct mwl_softc *sc)
964cf4c5a53SSam Leffler {
965cf4c5a53SSam Leffler 	int error, i;
966cf4c5a53SSam Leffler 
967cf4c5a53SSam Leffler 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
968cf4c5a53SSam Leffler 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
969cf4c5a53SSam Leffler 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
970cf4c5a53SSam Leffler 
9717850fa71SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
972cf4c5a53SSam Leffler 		struct mwl_txq *txq = &sc->sc_txq[i];
973cf4c5a53SSam Leffler 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
974cf4c5a53SSam Leffler 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
975cf4c5a53SSam Leffler 	}
976cf4c5a53SSam Leffler 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
9777850fa71SSam Leffler 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
978cf4c5a53SSam Leffler 
979cf4c5a53SSam Leffler 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
980cf4c5a53SSam Leffler 	if (error != 0) {
981cf4c5a53SSam Leffler 		device_printf(sc->sc_dev,
982cf4c5a53SSam Leffler 		    "unable to setup tx/rx dma; hal status %u\n", error);
983cf4c5a53SSam Leffler 		/* XXX */
984cf4c5a53SSam Leffler 	}
985cf4c5a53SSam Leffler 	return error;
986cf4c5a53SSam Leffler }
987cf4c5a53SSam Leffler 
988cf4c5a53SSam Leffler /*
989cf4c5a53SSam Leffler  * Inform firmware of tx rate parameters.
990cf4c5a53SSam Leffler  * Called after a channel change.
991cf4c5a53SSam Leffler  */
992cf4c5a53SSam Leffler static int
mwl_setcurchanrates(struct mwl_softc * sc)993cf4c5a53SSam Leffler mwl_setcurchanrates(struct mwl_softc *sc)
994cf4c5a53SSam Leffler {
9957a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
996cf4c5a53SSam Leffler 	const struct ieee80211_rateset *rs;
997cf4c5a53SSam Leffler 	MWL_HAL_TXRATE rates;
998cf4c5a53SSam Leffler 
999cf4c5a53SSam Leffler 	memset(&rates, 0, sizeof(rates));
1000cf4c5a53SSam Leffler 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1001cf4c5a53SSam Leffler 	/* rate used to send management frames */
1002cf4c5a53SSam Leffler 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1003cf4c5a53SSam Leffler 	/* rate used to send multicast frames */
1004cf4c5a53SSam Leffler 	rates.McastRate = rates.MgtRate;
1005cf4c5a53SSam Leffler 
1006cf4c5a53SSam Leffler 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1007cf4c5a53SSam Leffler }
1008cf4c5a53SSam Leffler 
1009cf4c5a53SSam Leffler /*
1010cf4c5a53SSam Leffler  * Inform firmware of tx rate parameters.  Called whenever
1011cf4c5a53SSam Leffler  * user-settable params change and after a channel change.
1012cf4c5a53SSam Leffler  */
1013cf4c5a53SSam Leffler static int
mwl_setrates(struct ieee80211vap * vap)1014cf4c5a53SSam Leffler mwl_setrates(struct ieee80211vap *vap)
1015cf4c5a53SSam Leffler {
1016cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
1017cf4c5a53SSam Leffler 	struct ieee80211_node *ni = vap->iv_bss;
1018cf4c5a53SSam Leffler 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1019cf4c5a53SSam Leffler 	MWL_HAL_TXRATE rates;
1020cf4c5a53SSam Leffler 
1021cf4c5a53SSam Leffler 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1022cf4c5a53SSam Leffler 
1023cf4c5a53SSam Leffler 	/*
1024cf4c5a53SSam Leffler 	 * Update the h/w rate map.
1025cf4c5a53SSam Leffler 	 * NB: 0x80 for MCS is passed through unchanged
1026cf4c5a53SSam Leffler 	 */
1027cf4c5a53SSam Leffler 	memset(&rates, 0, sizeof(rates));
1028cf4c5a53SSam Leffler 	/* rate used to send management frames */
1029cf4c5a53SSam Leffler 	rates.MgtRate = tp->mgmtrate;
1030cf4c5a53SSam Leffler 	/* rate used to send multicast frames */
1031cf4c5a53SSam Leffler 	rates.McastRate = tp->mcastrate;
1032cf4c5a53SSam Leffler 
1033cf4c5a53SSam Leffler 	/* while here calculate EAPOL fixed rate cookie */
1034cf4c5a53SSam Leffler 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1035cf4c5a53SSam Leffler 
10367850fa71SSam Leffler 	return mwl_hal_settxrate(mvp->mv_hvap,
10377850fa71SSam Leffler 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
10387850fa71SSam Leffler 		RATE_FIXED : RATE_AUTO, &rates);
1039cf4c5a53SSam Leffler }
1040cf4c5a53SSam Leffler 
1041cf4c5a53SSam Leffler /*
1042cf4c5a53SSam Leffler  * Setup a fixed xmit rate cookie for EAPOL frames.
1043cf4c5a53SSam Leffler  */
1044cf4c5a53SSam Leffler static void
mwl_seteapolformat(struct ieee80211vap * vap)1045cf4c5a53SSam Leffler mwl_seteapolformat(struct ieee80211vap *vap)
1046cf4c5a53SSam Leffler {
1047cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
1048cf4c5a53SSam Leffler 	struct ieee80211_node *ni = vap->iv_bss;
1049cf4c5a53SSam Leffler 	enum ieee80211_phymode mode;
1050cf4c5a53SSam Leffler 	uint8_t rate;
1051cf4c5a53SSam Leffler 
1052cf4c5a53SSam Leffler 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1053cf4c5a53SSam Leffler 
1054cf4c5a53SSam Leffler 	mode = ieee80211_chan2mode(ni->ni_chan);
1055cf4c5a53SSam Leffler 	/*
1056cf4c5a53SSam Leffler 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1057cf4c5a53SSam Leffler 	 * NB: this may violate POLA for sta and wds vap's.
1058cf4c5a53SSam Leffler 	 */
1059cf4c5a53SSam Leffler 	if (mode == IEEE80211_MODE_11NA &&
106073c1905dSSam Leffler 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1061cf4c5a53SSam Leffler 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1062cf4c5a53SSam Leffler 	else if (mode == IEEE80211_MODE_11NG &&
106373c1905dSSam Leffler 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1064cf4c5a53SSam Leffler 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1065cf4c5a53SSam Leffler 	else
1066cf4c5a53SSam Leffler 		rate = vap->iv_txparms[mode].mgmtrate;
1067cf4c5a53SSam Leffler 
1068cf4c5a53SSam Leffler 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1069cf4c5a53SSam Leffler }
1070cf4c5a53SSam Leffler 
1071cf4c5a53SSam Leffler /*
1072cf4c5a53SSam Leffler  * Map SKU+country code to region code for radar bin'ing.
1073cf4c5a53SSam Leffler  */
1074cf4c5a53SSam Leffler static int
mwl_map2regioncode(const struct ieee80211_regdomain * rd)1075cf4c5a53SSam Leffler mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1076cf4c5a53SSam Leffler {
1077cf4c5a53SSam Leffler 	switch (rd->regdomain) {
1078cf4c5a53SSam Leffler 	case SKU_FCC:
1079cf4c5a53SSam Leffler 	case SKU_FCC3:
1080cf4c5a53SSam Leffler 		return DOMAIN_CODE_FCC;
1081cf4c5a53SSam Leffler 	case SKU_CA:
1082cf4c5a53SSam Leffler 		return DOMAIN_CODE_IC;
1083cf4c5a53SSam Leffler 	case SKU_ETSI:
1084cf4c5a53SSam Leffler 	case SKU_ETSI2:
1085cf4c5a53SSam Leffler 	case SKU_ETSI3:
1086cf4c5a53SSam Leffler 		if (rd->country == CTRY_SPAIN)
1087cf4c5a53SSam Leffler 			return DOMAIN_CODE_SPAIN;
1088cf4c5a53SSam Leffler 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1089cf4c5a53SSam Leffler 			return DOMAIN_CODE_FRANCE;
1090cf4c5a53SSam Leffler 		/* XXX force 1.3.1 radar type */
1091cf4c5a53SSam Leffler 		return DOMAIN_CODE_ETSI_131;
1092cf4c5a53SSam Leffler 	case SKU_JAPAN:
1093cf4c5a53SSam Leffler 		return DOMAIN_CODE_MKK;
1094cf4c5a53SSam Leffler 	case SKU_ROW:
1095cf4c5a53SSam Leffler 		return DOMAIN_CODE_DGT;	/* Taiwan */
1096cf4c5a53SSam Leffler 	case SKU_APAC:
1097cf4c5a53SSam Leffler 	case SKU_APAC2:
1098cf4c5a53SSam Leffler 	case SKU_APAC3:
1099cf4c5a53SSam Leffler 		return DOMAIN_CODE_AUS;	/* Australia */
1100cf4c5a53SSam Leffler 	}
1101cf4c5a53SSam Leffler 	/* XXX KOREA? */
1102cf4c5a53SSam Leffler 	return DOMAIN_CODE_FCC;			/* XXX? */
1103cf4c5a53SSam Leffler }
1104cf4c5a53SSam Leffler 
1105cf4c5a53SSam Leffler static int
mwl_hal_reset(struct mwl_softc * sc)1106cf4c5a53SSam Leffler mwl_hal_reset(struct mwl_softc *sc)
1107cf4c5a53SSam Leffler {
11087a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
1109cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
1110cf4c5a53SSam Leffler 
1111cf4c5a53SSam Leffler 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1112cf4c5a53SSam Leffler 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1113cf4c5a53SSam Leffler 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1114cf4c5a53SSam Leffler 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1115cf4c5a53SSam Leffler 	mwl_chan_set(sc, ic->ic_curchan);
11167850fa71SSam Leffler 	/* NB: RF/RA performance tuned for indoor mode */
11177850fa71SSam Leffler 	mwl_hal_setrateadaptmode(mh, 0);
1118cf4c5a53SSam Leffler 	mwl_hal_setoptimizationlevel(mh,
1119cf4c5a53SSam Leffler 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1120cf4c5a53SSam Leffler 
1121cf4c5a53SSam Leffler 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1122cf4c5a53SSam Leffler 
11237850fa71SSam Leffler 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
11247850fa71SSam Leffler 	mwl_hal_setcfend(mh, 0);			/* XXX */
11257850fa71SSam Leffler 
1126cf4c5a53SSam Leffler 	return 1;
1127cf4c5a53SSam Leffler }
1128cf4c5a53SSam Leffler 
1129cf4c5a53SSam Leffler static int
mwl_init(struct mwl_softc * sc)11307a79cebfSGleb Smirnoff mwl_init(struct mwl_softc *sc)
1131cf4c5a53SSam Leffler {
1132cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
1133cf4c5a53SSam Leffler 	int error = 0;
1134cf4c5a53SSam Leffler 
1135cf4c5a53SSam Leffler 	MWL_LOCK_ASSERT(sc);
1136cf4c5a53SSam Leffler 
1137cf4c5a53SSam Leffler 	/*
1138cf4c5a53SSam Leffler 	 * Stop anything previously setup.  This is safe
1139cf4c5a53SSam Leffler 	 * whether this is the first time through or not.
1140cf4c5a53SSam Leffler 	 */
11417a79cebfSGleb Smirnoff 	mwl_stop(sc);
1142cf4c5a53SSam Leffler 
1143cf4c5a53SSam Leffler 	/*
1144cf4c5a53SSam Leffler 	 * Push vap-independent state to the firmware.
1145cf4c5a53SSam Leffler 	 */
1146cf4c5a53SSam Leffler 	if (!mwl_hal_reset(sc)) {
11477a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to reset hardware\n");
1148cf4c5a53SSam Leffler 		return EIO;
1149cf4c5a53SSam Leffler 	}
1150cf4c5a53SSam Leffler 
1151cf4c5a53SSam Leffler 	/*
1152cf4c5a53SSam Leffler 	 * Setup recv (once); transmit is already good to go.
1153cf4c5a53SSam Leffler 	 */
1154cf4c5a53SSam Leffler 	error = mwl_startrecv(sc);
1155cf4c5a53SSam Leffler 	if (error != 0) {
11567a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to start recv logic\n");
1157cf4c5a53SSam Leffler 		return error;
1158cf4c5a53SSam Leffler 	}
1159cf4c5a53SSam Leffler 
1160cf4c5a53SSam Leffler 	/*
1161cf4c5a53SSam Leffler 	 * Enable interrupts.
1162cf4c5a53SSam Leffler 	 */
1163cf4c5a53SSam Leffler 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1164cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_TX_DONE
1165cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_OPC_DONE
1166cf4c5a53SSam Leffler #if 0
1167cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1168cf4c5a53SSam Leffler #endif
1169cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1170cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1171cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1172cf4c5a53SSam Leffler #if 0
1173cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1174cf4c5a53SSam Leffler #endif
1175cf4c5a53SSam Leffler 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
11767850fa71SSam Leffler 		     | MACREQ_A2HRIC_BIT_TX_ACK
1177cf4c5a53SSam Leffler 		     ;
1178cf4c5a53SSam Leffler 
11797a79cebfSGleb Smirnoff 	sc->sc_running = 1;
1180cf4c5a53SSam Leffler 	mwl_hal_intrset(mh, sc->sc_imask);
11817cf545d0SJohn Baldwin 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1182cf4c5a53SSam Leffler 
1183cf4c5a53SSam Leffler 	return 0;
1184cf4c5a53SSam Leffler }
1185cf4c5a53SSam Leffler 
1186cf4c5a53SSam Leffler static void
mwl_stop(struct mwl_softc * sc)11877a79cebfSGleb Smirnoff mwl_stop(struct mwl_softc *sc)
1188cf4c5a53SSam Leffler {
1189cf4c5a53SSam Leffler 
1190cf4c5a53SSam Leffler 	MWL_LOCK_ASSERT(sc);
11917a79cebfSGleb Smirnoff 	if (sc->sc_running) {
1192cf4c5a53SSam Leffler 		/*
1193cf4c5a53SSam Leffler 		 * Shutdown the hardware and driver.
1194cf4c5a53SSam Leffler 		 */
11957a79cebfSGleb Smirnoff 		sc->sc_running = 0;
11967cf545d0SJohn Baldwin 		callout_stop(&sc->sc_watchdog);
11977cf545d0SJohn Baldwin 		sc->sc_tx_timer = 0;
1198cf4c5a53SSam Leffler 		mwl_draintxq(sc);
1199cf4c5a53SSam Leffler 	}
1200cf4c5a53SSam Leffler }
1201cf4c5a53SSam Leffler 
1202cf4c5a53SSam Leffler static int
mwl_reset_vap(struct ieee80211vap * vap,int state)1203cf4c5a53SSam Leffler mwl_reset_vap(struct ieee80211vap *vap, int state)
1204cf4c5a53SSam Leffler {
1205cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1206cf4c5a53SSam Leffler 	struct ieee80211com *ic = vap->iv_ic;
1207cf4c5a53SSam Leffler 
1208cf4c5a53SSam Leffler 	if (state == IEEE80211_S_RUN)
1209cf4c5a53SSam Leffler 		mwl_setrates(vap);
1210cf4c5a53SSam Leffler 	/* XXX off by 1? */
1211cf4c5a53SSam Leffler 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1212cf4c5a53SSam Leffler 	/* XXX auto? 20/40 split? */
121373c1905dSSam Leffler 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
121473c1905dSSam Leffler 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1215cf4c5a53SSam Leffler 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1216cf4c5a53SSam Leffler 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1217cf4c5a53SSam Leffler 	/* XXX txpower cap */
1218cf4c5a53SSam Leffler 
1219cf4c5a53SSam Leffler 	/* re-setup beacons */
1220cf4c5a53SSam Leffler 	if (state == IEEE80211_S_RUN &&
1221cf4c5a53SSam Leffler 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
122259aa14a9SRui Paulo 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1223cf4c5a53SSam Leffler 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1224cf4c5a53SSam Leffler 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1225fe5ebb23SBjoern A. Zeeb 		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1226fe5ebb23SBjoern A. Zeeb 		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1227cf4c5a53SSam Leffler 		return mwl_beacon_setup(vap);
1228cf4c5a53SSam Leffler 	}
1229cf4c5a53SSam Leffler 	return 0;
1230cf4c5a53SSam Leffler }
1231cf4c5a53SSam Leffler 
1232cf4c5a53SSam Leffler /*
1233cf4c5a53SSam Leffler  * Reset the hardware w/o losing operational state.
1234caa7e52fSEitan Adler  * Used to reset or reload hardware state for a vap.
1235cf4c5a53SSam Leffler  */
1236cf4c5a53SSam Leffler static int
mwl_reset(struct ieee80211vap * vap,u_long cmd)1237cf4c5a53SSam Leffler mwl_reset(struct ieee80211vap *vap, u_long cmd)
1238cf4c5a53SSam Leffler {
1239cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1240cf4c5a53SSam Leffler 	int error = 0;
1241cf4c5a53SSam Leffler 
1242cf4c5a53SSam Leffler 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1243cf4c5a53SSam Leffler 		struct ieee80211com *ic = vap->iv_ic;
12447a79cebfSGleb Smirnoff 		struct mwl_softc *sc = ic->ic_softc;
1245cf4c5a53SSam Leffler 		struct mwl_hal *mh = sc->sc_mh;
1246cf4c5a53SSam Leffler 
12477850fa71SSam Leffler 		/* XXX handle DWDS sta vap change */
1248cf4c5a53SSam Leffler 		/* XXX do we need to disable interrupts? */
1249cf4c5a53SSam Leffler 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1250cf4c5a53SSam Leffler 		error = mwl_reset_vap(vap, vap->iv_state);
1251cf4c5a53SSam Leffler 		mwl_hal_intrset(mh, sc->sc_imask);
1252cf4c5a53SSam Leffler 	}
1253cf4c5a53SSam Leffler 	return error;
1254cf4c5a53SSam Leffler }
1255cf4c5a53SSam Leffler 
1256cf4c5a53SSam Leffler /*
1257cf4c5a53SSam Leffler  * Allocate a tx buffer for sending a frame.  The
1258cf4c5a53SSam Leffler  * packet is assumed to have the WME AC stored so
1259cf4c5a53SSam Leffler  * we can use it to select the appropriate h/w queue.
1260cf4c5a53SSam Leffler  */
1261cf4c5a53SSam Leffler static struct mwl_txbuf *
mwl_gettxbuf(struct mwl_softc * sc,struct mwl_txq * txq)1262cf4c5a53SSam Leffler mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1263cf4c5a53SSam Leffler {
1264cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
1265cf4c5a53SSam Leffler 
1266cf4c5a53SSam Leffler 	/*
1267cf4c5a53SSam Leffler 	 * Grab a TX buffer and associated resources.
1268cf4c5a53SSam Leffler 	 */
1269cf4c5a53SSam Leffler 	MWL_TXQ_LOCK(txq);
1270cf4c5a53SSam Leffler 	bf = STAILQ_FIRST(&txq->free);
1271cf4c5a53SSam Leffler 	if (bf != NULL) {
1272cf4c5a53SSam Leffler 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1273cf4c5a53SSam Leffler 		txq->nfree--;
1274cf4c5a53SSam Leffler 	}
1275cf4c5a53SSam Leffler 	MWL_TXQ_UNLOCK(txq);
1276cf4c5a53SSam Leffler 	if (bf == NULL)
1277cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_XMIT,
1278cf4c5a53SSam Leffler 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1279cf4c5a53SSam Leffler 	return bf;
1280cf4c5a53SSam Leffler }
1281cf4c5a53SSam Leffler 
1282cf4c5a53SSam Leffler /*
1283cf4c5a53SSam Leffler  * Return a tx buffer to the queue it came from.  Note there
1284cf4c5a53SSam Leffler  * are two cases because we must preserve the order of buffers
1285cf4c5a53SSam Leffler  * as it reflects the fixed order of descriptors in memory
1286cf4c5a53SSam Leffler  * (the firmware pre-fetches descriptors so we cannot reorder).
1287cf4c5a53SSam Leffler  */
1288cf4c5a53SSam Leffler static void
mwl_puttxbuf_head(struct mwl_txq * txq,struct mwl_txbuf * bf)1289cf4c5a53SSam Leffler mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1290cf4c5a53SSam Leffler {
1291cf4c5a53SSam Leffler 	bf->bf_m = NULL;
1292cf4c5a53SSam Leffler 	bf->bf_node = NULL;
1293cf4c5a53SSam Leffler 	MWL_TXQ_LOCK(txq);
1294cf4c5a53SSam Leffler 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1295cf4c5a53SSam Leffler 	txq->nfree++;
1296cf4c5a53SSam Leffler 	MWL_TXQ_UNLOCK(txq);
1297cf4c5a53SSam Leffler }
1298cf4c5a53SSam Leffler 
1299cf4c5a53SSam Leffler static void
mwl_puttxbuf_tail(struct mwl_txq * txq,struct mwl_txbuf * bf)1300cf4c5a53SSam Leffler mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1301cf4c5a53SSam Leffler {
1302cf4c5a53SSam Leffler 	bf->bf_m = NULL;
1303cf4c5a53SSam Leffler 	bf->bf_node = NULL;
1304cf4c5a53SSam Leffler 	MWL_TXQ_LOCK(txq);
1305cf4c5a53SSam Leffler 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1306cf4c5a53SSam Leffler 	txq->nfree++;
1307cf4c5a53SSam Leffler 	MWL_TXQ_UNLOCK(txq);
1308cf4c5a53SSam Leffler }
1309cf4c5a53SSam Leffler 
13107a79cebfSGleb Smirnoff static int
mwl_transmit(struct ieee80211com * ic,struct mbuf * m)13117a79cebfSGleb Smirnoff mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
131279d2c5e8SGleb Smirnoff {
13137a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
13147a79cebfSGleb Smirnoff 	int error;
13157a79cebfSGleb Smirnoff 
13167a79cebfSGleb Smirnoff 	MWL_LOCK(sc);
13177a79cebfSGleb Smirnoff 	if (!sc->sc_running) {
13187a79cebfSGleb Smirnoff 		MWL_UNLOCK(sc);
13197a79cebfSGleb Smirnoff 		return (ENXIO);
13207a79cebfSGleb Smirnoff 	}
13217a79cebfSGleb Smirnoff 	error = mbufq_enqueue(&sc->sc_snd, m);
13227a79cebfSGleb Smirnoff 	if (error) {
13237a79cebfSGleb Smirnoff 		MWL_UNLOCK(sc);
13247a79cebfSGleb Smirnoff 		return (error);
13257a79cebfSGleb Smirnoff 	}
13267a79cebfSGleb Smirnoff 	mwl_start(sc);
13277a79cebfSGleb Smirnoff 	MWL_UNLOCK(sc);
13287a79cebfSGleb Smirnoff 	return (0);
13297a79cebfSGleb Smirnoff }
13307a79cebfSGleb Smirnoff 
13317a79cebfSGleb Smirnoff static void
mwl_start(struct mwl_softc * sc)13327a79cebfSGleb Smirnoff mwl_start(struct mwl_softc *sc)
13337a79cebfSGleb Smirnoff {
1334cf4c5a53SSam Leffler 	struct ieee80211_node *ni;
1335cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
1336cf4c5a53SSam Leffler 	struct mbuf *m;
1337cf4c5a53SSam Leffler 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1338cf4c5a53SSam Leffler 	int nqueued;
1339cf4c5a53SSam Leffler 
13407a79cebfSGleb Smirnoff 	MWL_LOCK_ASSERT(sc);
13417a79cebfSGleb Smirnoff 	if (!sc->sc_running || sc->sc_invalid)
1342cf4c5a53SSam Leffler 		return;
1343cf4c5a53SSam Leffler 	nqueued = 0;
13447a79cebfSGleb Smirnoff 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1345cf4c5a53SSam Leffler 		/*
1346cf4c5a53SSam Leffler 		 * Grab the node for the destination.
1347cf4c5a53SSam Leffler 		 */
1348cf4c5a53SSam Leffler 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1349cf4c5a53SSam Leffler 		KASSERT(ni != NULL, ("no node"));
1350cf4c5a53SSam Leffler 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1351cf4c5a53SSam Leffler 		/*
1352cf4c5a53SSam Leffler 		 * Grab a TX buffer and associated resources.
1353cf4c5a53SSam Leffler 		 * We honor the classification by the 802.11 layer.
1354cf4c5a53SSam Leffler 		 */
1355cf4c5a53SSam Leffler 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1356cf4c5a53SSam Leffler 		bf = mwl_gettxbuf(sc, txq);
1357cf4c5a53SSam Leffler 		if (bf == NULL) {
1358cf4c5a53SSam Leffler 			m_freem(m);
1359cf4c5a53SSam Leffler 			ieee80211_free_node(ni);
1360cf4c5a53SSam Leffler #ifdef MWL_TX_NODROP
1361cf4c5a53SSam Leffler 			sc->sc_stats.mst_tx_qstop++;
1362cf4c5a53SSam Leffler 			break;
1363cf4c5a53SSam Leffler #else
1364cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_XMIT,
1365cf4c5a53SSam Leffler 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1366cf4c5a53SSam Leffler 			sc->sc_stats.mst_tx_qdrop++;
1367cf4c5a53SSam Leffler 			continue;
1368cf4c5a53SSam Leffler #endif /* MWL_TX_NODROP */
1369cf4c5a53SSam Leffler 		}
1370cf4c5a53SSam Leffler 
1371cf4c5a53SSam Leffler 		/*
1372cf4c5a53SSam Leffler 		 * Pass the frame to the h/w for transmission.
1373cf4c5a53SSam Leffler 		 */
1374cf4c5a53SSam Leffler 		if (mwl_tx_start(sc, ni, bf, m)) {
13757a79cebfSGleb Smirnoff 			if_inc_counter(ni->ni_vap->iv_ifp,
13767a79cebfSGleb Smirnoff 			    IFCOUNTER_OERRORS, 1);
1377cf4c5a53SSam Leffler 			mwl_puttxbuf_head(txq, bf);
1378cf4c5a53SSam Leffler 			ieee80211_free_node(ni);
1379cf4c5a53SSam Leffler 			continue;
1380cf4c5a53SSam Leffler 		}
1381cf4c5a53SSam Leffler 		nqueued++;
1382cf4c5a53SSam Leffler 		if (nqueued >= mwl_txcoalesce) {
1383cf4c5a53SSam Leffler 			/*
1384cf4c5a53SSam Leffler 			 * Poke the firmware to process queued frames;
1385cf4c5a53SSam Leffler 			 * see below about (lack of) locking.
1386cf4c5a53SSam Leffler 			 */
1387cf4c5a53SSam Leffler 			nqueued = 0;
1388cf4c5a53SSam Leffler 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1389cf4c5a53SSam Leffler 		}
1390cf4c5a53SSam Leffler 	}
1391cf4c5a53SSam Leffler 	if (nqueued) {
1392cf4c5a53SSam Leffler 		/*
1393cf4c5a53SSam Leffler 		 * NB: We don't need to lock against tx done because
1394cf4c5a53SSam Leffler 		 * this just prods the firmware to check the transmit
1395cf4c5a53SSam Leffler 		 * descriptors.  The firmware will also start fetching
1396cf4c5a53SSam Leffler 		 * descriptors by itself if it notices new ones are
1397cf4c5a53SSam Leffler 		 * present when it goes to deliver a tx done interrupt
1398cf4c5a53SSam Leffler 		 * to the host. So if we race with tx done processing
1399cf4c5a53SSam Leffler 		 * it's ok.  Delivering the kick here rather than in
1400cf4c5a53SSam Leffler 		 * mwl_tx_start is an optimization to avoid poking the
1401cf4c5a53SSam Leffler 		 * firmware for each packet.
1402cf4c5a53SSam Leffler 		 *
1403cf4c5a53SSam Leffler 		 * NB: the queue id isn't used so 0 is ok.
1404cf4c5a53SSam Leffler 		 */
1405cf4c5a53SSam Leffler 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1406cf4c5a53SSam Leffler 	}
1407cf4c5a53SSam Leffler }
1408cf4c5a53SSam Leffler 
1409cf4c5a53SSam Leffler static int
mwl_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)1410cf4c5a53SSam Leffler mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1411cf4c5a53SSam Leffler 	const struct ieee80211_bpf_params *params)
1412cf4c5a53SSam Leffler {
1413cf4c5a53SSam Leffler 	struct ieee80211com *ic = ni->ni_ic;
14147a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
1415cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
1416cf4c5a53SSam Leffler 	struct mwl_txq *txq;
1417cf4c5a53SSam Leffler 
14187a79cebfSGleb Smirnoff 	if (!sc->sc_running || sc->sc_invalid) {
1419cf4c5a53SSam Leffler 		m_freem(m);
1420cf4c5a53SSam Leffler 		return ENETDOWN;
1421cf4c5a53SSam Leffler 	}
1422cf4c5a53SSam Leffler 	/*
1423cf4c5a53SSam Leffler 	 * Grab a TX buffer and associated resources.
1424cf4c5a53SSam Leffler 	 * Note that we depend on the classification
1425cf4c5a53SSam Leffler 	 * by the 802.11 layer to get to the right h/w
1426cf4c5a53SSam Leffler 	 * queue.  Management frames must ALWAYS go on
1427cf4c5a53SSam Leffler 	 * queue 1 but we cannot just force that here
1428cf4c5a53SSam Leffler 	 * because we may receive non-mgt frames.
1429cf4c5a53SSam Leffler 	 */
1430cf4c5a53SSam Leffler 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1431cf4c5a53SSam Leffler 	bf = mwl_gettxbuf(sc, txq);
1432cf4c5a53SSam Leffler 	if (bf == NULL) {
1433cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_qstop++;
1434cf4c5a53SSam Leffler 		m_freem(m);
1435cf4c5a53SSam Leffler 		return ENOBUFS;
1436cf4c5a53SSam Leffler 	}
1437cf4c5a53SSam Leffler 	/*
1438cf4c5a53SSam Leffler 	 * Pass the frame to the h/w for transmission.
1439cf4c5a53SSam Leffler 	 */
1440cf4c5a53SSam Leffler 	if (mwl_tx_start(sc, ni, bf, m)) {
1441cf4c5a53SSam Leffler 		mwl_puttxbuf_head(txq, bf);
1442cf4c5a53SSam Leffler 
1443cf4c5a53SSam Leffler 		return EIO;		/* XXX */
1444cf4c5a53SSam Leffler 	}
1445cf4c5a53SSam Leffler 	/*
1446cf4c5a53SSam Leffler 	 * NB: We don't need to lock against tx done because
1447cf4c5a53SSam Leffler 	 * this just prods the firmware to check the transmit
1448cf4c5a53SSam Leffler 	 * descriptors.  The firmware will also start fetching
1449cf4c5a53SSam Leffler 	 * descriptors by itself if it notices new ones are
1450cf4c5a53SSam Leffler 	 * present when it goes to deliver a tx done interrupt
1451cf4c5a53SSam Leffler 	 * to the host. So if we race with tx done processing
1452cf4c5a53SSam Leffler 	 * it's ok.  Delivering the kick here rather than in
1453cf4c5a53SSam Leffler 	 * mwl_tx_start is an optimization to avoid poking the
1454cf4c5a53SSam Leffler 	 * firmware for each packet.
1455cf4c5a53SSam Leffler 	 *
1456cf4c5a53SSam Leffler 	 * NB: the queue id isn't used so 0 is ok.
1457cf4c5a53SSam Leffler 	 */
1458cf4c5a53SSam Leffler 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1459cf4c5a53SSam Leffler 	return 0;
1460cf4c5a53SSam Leffler }
1461cf4c5a53SSam Leffler 
1462cf4c5a53SSam Leffler static int
mwl_media_change(if_t ifp)146313f2ef16SJustin Hibbits mwl_media_change(if_t ifp)
1464cf4c5a53SSam Leffler {
1465c6167b4bSBjoern A. Zeeb 	struct ieee80211vap *vap;
1466cf4c5a53SSam Leffler 	int error;
1467cf4c5a53SSam Leffler 
1468cf4c5a53SSam Leffler 	/* NB: only the fixed rate can change and that doesn't need a reset */
1469c6167b4bSBjoern A. Zeeb 	error = ieee80211_media_change(ifp);
1470c6167b4bSBjoern A. Zeeb 	if (error != 0)
1471c6167b4bSBjoern A. Zeeb 		return (error);
1472c6167b4bSBjoern A. Zeeb 
147313f2ef16SJustin Hibbits 	vap = if_getsoftc(ifp);
1474cf4c5a53SSam Leffler 	mwl_setrates(vap);
1475c6167b4bSBjoern A. Zeeb 	return (0);
1476cf4c5a53SSam Leffler }
1477cf4c5a53SSam Leffler 
1478cf4c5a53SSam Leffler #ifdef MWL_DEBUG
1479cf4c5a53SSam Leffler static void
mwl_keyprint(struct mwl_softc * sc,const char * tag,const MWL_HAL_KEYVAL * hk,const uint8_t mac[IEEE80211_ADDR_LEN])1480cf4c5a53SSam Leffler mwl_keyprint(struct mwl_softc *sc, const char *tag,
1481cf4c5a53SSam Leffler 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1482cf4c5a53SSam Leffler {
1483cf4c5a53SSam Leffler 	static const char *ciphers[] = {
1484cf4c5a53SSam Leffler 		"WEP",
1485cf4c5a53SSam Leffler 		"TKIP",
1486cf4c5a53SSam Leffler 		"AES-CCM",
1487cf4c5a53SSam Leffler 	};
1488cf4c5a53SSam Leffler 	int i, n;
1489cf4c5a53SSam Leffler 
1490cf4c5a53SSam Leffler 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1491cf4c5a53SSam Leffler 	for (i = 0, n = hk->keyLen; i < n; i++)
1492cf4c5a53SSam Leffler 		printf(" %02x", hk->key.aes[i]);
1493cf4c5a53SSam Leffler 	printf(" mac %s", ether_sprintf(mac));
1494cf4c5a53SSam Leffler 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1495cf4c5a53SSam Leffler 		printf(" %s", "rxmic");
1496cf4c5a53SSam Leffler 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1497cf4c5a53SSam Leffler 			printf(" %02x", hk->key.tkip.rxMic[i]);
1498cf4c5a53SSam Leffler 		printf(" txmic");
1499cf4c5a53SSam Leffler 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1500cf4c5a53SSam Leffler 			printf(" %02x", hk->key.tkip.txMic[i]);
1501cf4c5a53SSam Leffler 	}
1502cf4c5a53SSam Leffler 	printf(" flags 0x%x\n", hk->keyFlags);
1503cf4c5a53SSam Leffler }
1504cf4c5a53SSam Leffler #endif
1505cf4c5a53SSam Leffler 
1506cf4c5a53SSam Leffler /*
1507cf4c5a53SSam Leffler  * Allocate a key cache slot for a unicast key.  The
1508cf4c5a53SSam Leffler  * firmware handles key allocation and every station is
1509cf4c5a53SSam Leffler  * guaranteed key space so we are always successful.
1510cf4c5a53SSam Leffler  */
1511cf4c5a53SSam Leffler static int
mwl_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)1512cf4c5a53SSam Leffler mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1513cf4c5a53SSam Leffler 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1514cf4c5a53SSam Leffler {
15157a79cebfSGleb Smirnoff 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1516cf4c5a53SSam Leffler 
1517cf4c5a53SSam Leffler 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1518cf4c5a53SSam Leffler 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
15192589197aSAdrian Chadd 		if (!ieee80211_is_key_global(vap, k)) {
1520cf4c5a53SSam Leffler 			/* should not happen */
1521cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1522cf4c5a53SSam Leffler 				"%s: bogus group key\n", __func__);
1523cf4c5a53SSam Leffler 			return 0;
1524cf4c5a53SSam Leffler 		}
1525cf4c5a53SSam Leffler 		/* give the caller what they requested */
15264a19d712SAndriy Voskoboinyk 		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1527cf4c5a53SSam Leffler 	} else {
1528cf4c5a53SSam Leffler 		/*
1529cf4c5a53SSam Leffler 		 * Firmware handles key allocation.
1530cf4c5a53SSam Leffler 		 */
1531cf4c5a53SSam Leffler 		*keyix = *rxkeyix = 0;
1532cf4c5a53SSam Leffler 	}
1533cf4c5a53SSam Leffler 	return 1;
1534cf4c5a53SSam Leffler }
1535cf4c5a53SSam Leffler 
1536cf4c5a53SSam Leffler /*
1537cf4c5a53SSam Leffler  * Delete a key entry allocated by mwl_key_alloc.
1538cf4c5a53SSam Leffler  */
1539cf4c5a53SSam Leffler static int
mwl_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)1540cf4c5a53SSam Leffler mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1541cf4c5a53SSam Leffler {
15427a79cebfSGleb Smirnoff 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1543cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1544cf4c5a53SSam Leffler 	MWL_HAL_KEYVAL hk;
1545cf4c5a53SSam Leffler 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1546cf4c5a53SSam Leffler 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1547cf4c5a53SSam Leffler 
1548cf4c5a53SSam Leffler 	if (hvap == NULL) {
1549cf4c5a53SSam Leffler 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1550cf4c5a53SSam Leffler 			/* XXX monitor mode? */
1551cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1552cf4c5a53SSam Leffler 			    "%s: no hvap for opmode %d\n", __func__,
1553cf4c5a53SSam Leffler 			    vap->iv_opmode);
1554cf4c5a53SSam Leffler 			return 0;
1555cf4c5a53SSam Leffler 		}
1556cf4c5a53SSam Leffler 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1557cf4c5a53SSam Leffler 	}
1558cf4c5a53SSam Leffler 
1559cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1560cf4c5a53SSam Leffler 	    __func__, k->wk_keyix);
1561cf4c5a53SSam Leffler 
1562cf4c5a53SSam Leffler 	memset(&hk, 0, sizeof(hk));
1563cf4c5a53SSam Leffler 	hk.keyIndex = k->wk_keyix;
1564cf4c5a53SSam Leffler 	switch (k->wk_cipher->ic_cipher) {
1565cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_WEP:
1566cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1567cf4c5a53SSam Leffler 		break;
1568cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_TKIP:
1569cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1570cf4c5a53SSam Leffler 		break;
1571cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_AES_CCM:
1572cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_AES;
1573cf4c5a53SSam Leffler 		break;
1574cf4c5a53SSam Leffler 	default:
1575cf4c5a53SSam Leffler 		/* XXX should not happen */
1576cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1577cf4c5a53SSam Leffler 		    __func__, k->wk_cipher->ic_cipher);
1578cf4c5a53SSam Leffler 		return 0;
1579cf4c5a53SSam Leffler 	}
1580cf4c5a53SSam Leffler 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1581cf4c5a53SSam Leffler }
1582cf4c5a53SSam Leffler 
1583cf4c5a53SSam Leffler static __inline int
addgroupflags(MWL_HAL_KEYVAL * hk,const struct ieee80211_key * k)1584cf4c5a53SSam Leffler addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1585cf4c5a53SSam Leffler {
1586cf4c5a53SSam Leffler 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1587cf4c5a53SSam Leffler 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1588cf4c5a53SSam Leffler 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1589cf4c5a53SSam Leffler 		if (k->wk_flags & IEEE80211_KEY_RECV)
1590cf4c5a53SSam Leffler 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1591cf4c5a53SSam Leffler 		return 1;
1592cf4c5a53SSam Leffler 	} else
1593cf4c5a53SSam Leffler 		return 0;
1594cf4c5a53SSam Leffler }
1595cf4c5a53SSam Leffler 
1596cf4c5a53SSam Leffler /*
1597cf4c5a53SSam Leffler  * Set the key cache contents for the specified key.  Key cache
1598cf4c5a53SSam Leffler  * slot(s) must already have been allocated by mwl_key_alloc.
1599cf4c5a53SSam Leffler  */
1600cf4c5a53SSam Leffler static int
mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)1601bc813c40SAdrian Chadd mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1602bc813c40SAdrian Chadd {
1603bc813c40SAdrian Chadd 	return (_mwl_key_set(vap, k, k->wk_macaddr));
1604bc813c40SAdrian Chadd }
1605bc813c40SAdrian Chadd 
1606bc813c40SAdrian Chadd static int
_mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1607bc813c40SAdrian Chadd _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1608cf4c5a53SSam Leffler 	const uint8_t mac[IEEE80211_ADDR_LEN])
1609cf4c5a53SSam Leffler {
1610cf4c5a53SSam Leffler #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1611cf4c5a53SSam Leffler /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1612cf4c5a53SSam Leffler #define	IEEE80211_IS_STATICKEY(k) \
1613cf4c5a53SSam Leffler 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1614cf4c5a53SSam Leffler 	 (GRPXMIT|IEEE80211_KEY_RECV))
16157a79cebfSGleb Smirnoff 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1616cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1617cf4c5a53SSam Leffler 	const struct ieee80211_cipher *cip = k->wk_cipher;
1618cf4c5a53SSam Leffler 	const uint8_t *macaddr;
1619cf4c5a53SSam Leffler 	MWL_HAL_KEYVAL hk;
1620cf4c5a53SSam Leffler 
1621cf4c5a53SSam Leffler 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1622cf4c5a53SSam Leffler 		("s/w crypto set?"));
1623cf4c5a53SSam Leffler 
1624cf4c5a53SSam Leffler 	if (hvap == NULL) {
1625cf4c5a53SSam Leffler 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1626cf4c5a53SSam Leffler 			/* XXX monitor mode? */
1627cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1628cf4c5a53SSam Leffler 			    "%s: no hvap for opmode %d\n", __func__,
1629cf4c5a53SSam Leffler 			    vap->iv_opmode);
1630cf4c5a53SSam Leffler 			return 0;
1631cf4c5a53SSam Leffler 		}
1632cf4c5a53SSam Leffler 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1633cf4c5a53SSam Leffler 	}
1634cf4c5a53SSam Leffler 	memset(&hk, 0, sizeof(hk));
1635cf4c5a53SSam Leffler 	hk.keyIndex = k->wk_keyix;
1636cf4c5a53SSam Leffler 	switch (cip->ic_cipher) {
1637cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_WEP:
1638cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1639cf4c5a53SSam Leffler 		hk.keyLen = k->wk_keylen;
1640cf4c5a53SSam Leffler 		if (k->wk_keyix == vap->iv_def_txkey)
1641cf4c5a53SSam Leffler 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1642cf4c5a53SSam Leffler 		if (!IEEE80211_IS_STATICKEY(k)) {
1643cf4c5a53SSam Leffler 			/* NB: WEP is never used for the PTK */
1644cf4c5a53SSam Leffler 			(void) addgroupflags(&hk, k);
1645cf4c5a53SSam Leffler 		}
1646cf4c5a53SSam Leffler 		break;
1647cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_TKIP:
1648cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1649cf4c5a53SSam Leffler 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1650cf4c5a53SSam Leffler 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1651cf4c5a53SSam Leffler 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1652cf4c5a53SSam Leffler 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1653cf4c5a53SSam Leffler 		if (!addgroupflags(&hk, k))
1654cf4c5a53SSam Leffler 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1655cf4c5a53SSam Leffler 		break;
1656cf4c5a53SSam Leffler 	case IEEE80211_CIPHER_AES_CCM:
1657cf4c5a53SSam Leffler 		hk.keyTypeId = KEY_TYPE_ID_AES;
1658cf4c5a53SSam Leffler 		hk.keyLen = k->wk_keylen;
1659cf4c5a53SSam Leffler 		if (!addgroupflags(&hk, k))
1660cf4c5a53SSam Leffler 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1661cf4c5a53SSam Leffler 		break;
1662cf4c5a53SSam Leffler 	default:
1663cf4c5a53SSam Leffler 		/* XXX should not happen */
1664cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1665cf4c5a53SSam Leffler 		    __func__, k->wk_cipher->ic_cipher);
1666cf4c5a53SSam Leffler 		return 0;
1667cf4c5a53SSam Leffler 	}
1668cf4c5a53SSam Leffler 	/*
1669cf4c5a53SSam Leffler 	 * NB: tkip mic keys get copied here too; the layout
1670cf4c5a53SSam Leffler 	 *     just happens to match that in ieee80211_key.
1671cf4c5a53SSam Leffler 	 */
1672cf4c5a53SSam Leffler 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1673cf4c5a53SSam Leffler 
1674cf4c5a53SSam Leffler 	/*
1675cf4c5a53SSam Leffler 	 * Locate address of sta db entry for writing key;
1676cf4c5a53SSam Leffler 	 * the convention unfortunately is somewhat different
1677cf4c5a53SSam Leffler 	 * than how net80211, hostapd, and wpa_supplicant think.
1678cf4c5a53SSam Leffler 	 */
1679cf4c5a53SSam Leffler 	if (vap->iv_opmode == IEEE80211_M_STA) {
1680cf4c5a53SSam Leffler 		/*
1681cf4c5a53SSam Leffler 		 * NB: keys plumbed before the sta reaches AUTH state
1682cf4c5a53SSam Leffler 		 * will be discarded or written to the wrong sta db
1683cf4c5a53SSam Leffler 		 * entry because iv_bss is meaningless.  This is ok
1684cf4c5a53SSam Leffler 		 * (right now) because we handle deferred plumbing of
1685cf4c5a53SSam Leffler 		 * WEP keys when the sta reaches AUTH state.
1686cf4c5a53SSam Leffler 		 */
1687cf4c5a53SSam Leffler 		macaddr = vap->iv_bss->ni_bssid;
1688e26b433fSSam Leffler 		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1689e26b433fSSam Leffler 			/* XXX plumb to local sta db too for static key wep */
1690e26b433fSSam Leffler 			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1691e26b433fSSam Leffler 		}
1692cf4c5a53SSam Leffler 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1693cf4c5a53SSam Leffler 	    vap->iv_state != IEEE80211_S_RUN) {
1694cf4c5a53SSam Leffler 		/*
1695cf4c5a53SSam Leffler 		 * Prior to RUN state a WDS vap will not it's BSS node
1696cf4c5a53SSam Leffler 		 * setup so we will plumb the key to the wrong mac
1697cf4c5a53SSam Leffler 		 * address (it'll be our local address).  Workaround
1698cf4c5a53SSam Leffler 		 * this for the moment by grabbing the correct address.
1699cf4c5a53SSam Leffler 		 */
1700cf4c5a53SSam Leffler 		macaddr = vap->iv_des_bssid;
1701cf4c5a53SSam Leffler 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1702cf4c5a53SSam Leffler 		macaddr = vap->iv_myaddr;
1703cf4c5a53SSam Leffler 	else
1704cf4c5a53SSam Leffler 		macaddr = mac;
1705cf4c5a53SSam Leffler 	KEYPRINTF(sc, &hk, macaddr);
1706cf4c5a53SSam Leffler 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1707cf4c5a53SSam Leffler #undef IEEE80211_IS_STATICKEY
1708cf4c5a53SSam Leffler #undef GRPXMIT
1709cf4c5a53SSam Leffler }
1710cf4c5a53SSam Leffler 
1711cf4c5a53SSam Leffler /*
1712cf4c5a53SSam Leffler  * Set the multicast filter contents into the hardware.
1713cf4c5a53SSam Leffler  * XXX f/w has no support; just defer to the os.
1714cf4c5a53SSam Leffler  */
1715cf4c5a53SSam Leffler static void
mwl_setmcastfilter(struct mwl_softc * sc)1716cf4c5a53SSam Leffler mwl_setmcastfilter(struct mwl_softc *sc)
1717cf4c5a53SSam Leffler {
1718cf4c5a53SSam Leffler #if 0
1719cf4c5a53SSam Leffler 	struct ether_multi *enm;
1720cf4c5a53SSam Leffler 	struct ether_multistep estep;
1721cf4c5a53SSam Leffler 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1722cf4c5a53SSam Leffler 	uint8_t *mp;
1723cf4c5a53SSam Leffler 	int nmc;
1724cf4c5a53SSam Leffler 
1725cf4c5a53SSam Leffler 	mp = macs;
1726cf4c5a53SSam Leffler 	nmc = 0;
1727cf4c5a53SSam Leffler 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1728cf4c5a53SSam Leffler 	while (enm != NULL) {
1729cf4c5a53SSam Leffler 		/* XXX Punt on ranges. */
1730cf4c5a53SSam Leffler 		if (nmc == MWL_HAL_MCAST_MAX ||
1731cf4c5a53SSam Leffler 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
173213f2ef16SJustin Hibbits 			if_setflagsbit(ifp, IFF_ALLMULTI, 0);
1733cf4c5a53SSam Leffler 			return;
1734cf4c5a53SSam Leffler 		}
1735cf4c5a53SSam Leffler 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1736cf4c5a53SSam Leffler 		mp += IEEE80211_ADDR_LEN, nmc++;
1737cf4c5a53SSam Leffler 		ETHER_NEXT_MULTI(estep, enm);
1738cf4c5a53SSam Leffler 	}
173913f2ef16SJustin Hibbits 	if_setflagsbit(ifp, 0, IFF_ALLMULTI);
1740cf4c5a53SSam Leffler 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1741cf4c5a53SSam Leffler #endif
1742cf4c5a53SSam Leffler }
1743cf4c5a53SSam Leffler 
1744cf4c5a53SSam Leffler static int
mwl_mode_init(struct mwl_softc * sc)1745cf4c5a53SSam Leffler mwl_mode_init(struct mwl_softc *sc)
1746cf4c5a53SSam Leffler {
17477a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
1748cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
1749cf4c5a53SSam Leffler 
17506459bd28SAndriy Voskoboinyk 	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1751cf4c5a53SSam Leffler 	mwl_setmcastfilter(sc);
1752cf4c5a53SSam Leffler 
1753cf4c5a53SSam Leffler 	return 0;
1754cf4c5a53SSam Leffler }
1755cf4c5a53SSam Leffler 
1756cf4c5a53SSam Leffler /*
1757cf4c5a53SSam Leffler  * Callback from the 802.11 layer after a multicast state change.
1758cf4c5a53SSam Leffler  */
1759cf4c5a53SSam Leffler static void
mwl_update_mcast(struct ieee80211com * ic)1760272f6adeSGleb Smirnoff mwl_update_mcast(struct ieee80211com *ic)
1761cf4c5a53SSam Leffler {
1762272f6adeSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
1763cf4c5a53SSam Leffler 
1764cf4c5a53SSam Leffler 	mwl_setmcastfilter(sc);
1765cf4c5a53SSam Leffler }
1766cf4c5a53SSam Leffler 
1767cf4c5a53SSam Leffler /*
1768cf4c5a53SSam Leffler  * Callback from the 802.11 layer after a promiscuous mode change.
1769cf4c5a53SSam Leffler  * Note this interface does not check the operating mode as this
1770cf4c5a53SSam Leffler  * is an internal callback and we are expected to honor the current
1771cf4c5a53SSam Leffler  * state (e.g. this is used for setting the interface in promiscuous
1772cf4c5a53SSam Leffler  * mode when operating in hostap mode to do ACS).
1773cf4c5a53SSam Leffler  */
1774cf4c5a53SSam Leffler static void
mwl_update_promisc(struct ieee80211com * ic)1775272f6adeSGleb Smirnoff mwl_update_promisc(struct ieee80211com *ic)
1776cf4c5a53SSam Leffler {
1777272f6adeSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
1778cf4c5a53SSam Leffler 
17797a79cebfSGleb Smirnoff 	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1780cf4c5a53SSam Leffler }
1781cf4c5a53SSam Leffler 
1782cf4c5a53SSam Leffler /*
1783cf4c5a53SSam Leffler  * Callback from the 802.11 layer to update the slot time
1784cf4c5a53SSam Leffler  * based on the current setting.  We use it to notify the
1785cf4c5a53SSam Leffler  * firmware of ERP changes and the f/w takes care of things
1786cf4c5a53SSam Leffler  * like slot time and preamble.
1787cf4c5a53SSam Leffler  */
1788cf4c5a53SSam Leffler static void
mwl_updateslot(struct ieee80211com * ic)1789272f6adeSGleb Smirnoff mwl_updateslot(struct ieee80211com *ic)
1790cf4c5a53SSam Leffler {
1791272f6adeSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
1792cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
1793cf4c5a53SSam Leffler 	int prot;
1794cf4c5a53SSam Leffler 
1795cf4c5a53SSam Leffler 	/* NB: can be called early; suppress needless cmds */
17967a79cebfSGleb Smirnoff 	if (!sc->sc_running)
1797cf4c5a53SSam Leffler 		return;
1798cf4c5a53SSam Leffler 
1799cf4c5a53SSam Leffler 	/*
1800cf4c5a53SSam Leffler 	 * Calculate the ERP flags.  The firwmare will use
1801cf4c5a53SSam Leffler 	 * this to carry out the appropriate measures.
1802cf4c5a53SSam Leffler 	 */
1803cf4c5a53SSam Leffler 	prot = 0;
1804cf4c5a53SSam Leffler 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1805cf4c5a53SSam Leffler 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1806cf4c5a53SSam Leffler 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1807cf4c5a53SSam Leffler 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1808cf4c5a53SSam Leffler 			prot |= IEEE80211_ERP_USE_PROTECTION;
1809cf4c5a53SSam Leffler 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1810cf4c5a53SSam Leffler 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1811cf4c5a53SSam Leffler 	}
1812cf4c5a53SSam Leffler 
1813cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_RESET,
1814cf4c5a53SSam Leffler 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1815cf4c5a53SSam Leffler 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1816cf4c5a53SSam Leffler 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1817cf4c5a53SSam Leffler 	    ic->ic_flags);
1818cf4c5a53SSam Leffler 
1819cf4c5a53SSam Leffler 	mwl_hal_setgprot(mh, prot);
1820cf4c5a53SSam Leffler }
1821cf4c5a53SSam Leffler 
1822cf4c5a53SSam Leffler /*
1823cf4c5a53SSam Leffler  * Setup the beacon frame.
1824cf4c5a53SSam Leffler  */
1825cf4c5a53SSam Leffler static int
mwl_beacon_setup(struct ieee80211vap * vap)1826cf4c5a53SSam Leffler mwl_beacon_setup(struct ieee80211vap *vap)
1827cf4c5a53SSam Leffler {
1828cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1829cf4c5a53SSam Leffler 	struct ieee80211_node *ni = vap->iv_bss;
1830cf4c5a53SSam Leffler 	struct mbuf *m;
1831cf4c5a53SSam Leffler 
1832210ab3c2SAdrian Chadd 	m = ieee80211_beacon_alloc(ni);
1833cf4c5a53SSam Leffler 	if (m == NULL)
1834cf4c5a53SSam Leffler 		return ENOBUFS;
1835cf4c5a53SSam Leffler 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1836cf4c5a53SSam Leffler 	m_free(m);
1837cf4c5a53SSam Leffler 
1838cf4c5a53SSam Leffler 	return 0;
1839cf4c5a53SSam Leffler }
1840cf4c5a53SSam Leffler 
1841cf4c5a53SSam Leffler /*
1842cf4c5a53SSam Leffler  * Update the beacon frame in response to a change.
1843cf4c5a53SSam Leffler  */
1844cf4c5a53SSam Leffler static void
mwl_beacon_update(struct ieee80211vap * vap,int item)1845cf4c5a53SSam Leffler mwl_beacon_update(struct ieee80211vap *vap, int item)
1846cf4c5a53SSam Leffler {
1847cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1848cf4c5a53SSam Leffler 	struct ieee80211com *ic = vap->iv_ic;
1849cf4c5a53SSam Leffler 
1850cf4c5a53SSam Leffler 	KASSERT(hvap != NULL, ("no beacon"));
1851cf4c5a53SSam Leffler 	switch (item) {
1852cf4c5a53SSam Leffler 	case IEEE80211_BEACON_ERP:
1853272f6adeSGleb Smirnoff 		mwl_updateslot(ic);
1854cf4c5a53SSam Leffler 		break;
1855cf4c5a53SSam Leffler 	case IEEE80211_BEACON_HTINFO:
1856fe5ebb23SBjoern A. Zeeb 		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1857fe5ebb23SBjoern A. Zeeb 		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1858cf4c5a53SSam Leffler 		break;
1859cf4c5a53SSam Leffler 	case IEEE80211_BEACON_CAPS:
1860cf4c5a53SSam Leffler 	case IEEE80211_BEACON_WME:
1861cf4c5a53SSam Leffler 	case IEEE80211_BEACON_APPIE:
1862cf4c5a53SSam Leffler 	case IEEE80211_BEACON_CSA:
1863cf4c5a53SSam Leffler 		break;
1864cf4c5a53SSam Leffler 	case IEEE80211_BEACON_TIM:
1865cf4c5a53SSam Leffler 		/* NB: firmware always forms TIM */
1866cf4c5a53SSam Leffler 		return;
1867cf4c5a53SSam Leffler 	}
1868cf4c5a53SSam Leffler 	/* XXX retain beacon frame and update */
1869cf4c5a53SSam Leffler 	mwl_beacon_setup(vap);
1870cf4c5a53SSam Leffler }
1871cf4c5a53SSam Leffler 
1872cf4c5a53SSam Leffler static void
mwl_load_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1873cf4c5a53SSam Leffler mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1874cf4c5a53SSam Leffler {
1875cf4c5a53SSam Leffler 	bus_addr_t *paddr = (bus_addr_t*) arg;
1876cf4c5a53SSam Leffler 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1877cf4c5a53SSam Leffler 	*paddr = segs->ds_addr;
1878cf4c5a53SSam Leffler }
1879cf4c5a53SSam Leffler 
1880cf4c5a53SSam Leffler #ifdef MWL_HOST_PS_SUPPORT
1881cf4c5a53SSam Leffler /*
1882cf4c5a53SSam Leffler  * Handle power save station occupancy changes.
1883cf4c5a53SSam Leffler  */
1884cf4c5a53SSam Leffler static void
mwl_update_ps(struct ieee80211vap * vap,int nsta)1885cf4c5a53SSam Leffler mwl_update_ps(struct ieee80211vap *vap, int nsta)
1886cf4c5a53SSam Leffler {
1887cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
1888cf4c5a53SSam Leffler 
1889cf4c5a53SSam Leffler 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1890cf4c5a53SSam Leffler 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1891cf4c5a53SSam Leffler 	mvp->mv_last_ps_sta = nsta;
1892cf4c5a53SSam Leffler }
1893cf4c5a53SSam Leffler 
1894cf4c5a53SSam Leffler /*
1895cf4c5a53SSam Leffler  * Handle associated station power save state changes.
1896cf4c5a53SSam Leffler  */
1897cf4c5a53SSam Leffler static int
mwl_set_tim(struct ieee80211_node * ni,int set)1898cf4c5a53SSam Leffler mwl_set_tim(struct ieee80211_node *ni, int set)
1899cf4c5a53SSam Leffler {
1900cf4c5a53SSam Leffler 	struct ieee80211vap *vap = ni->ni_vap;
1901cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
1902cf4c5a53SSam Leffler 
1903cf4c5a53SSam Leffler 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1904cf4c5a53SSam Leffler 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1905cf4c5a53SSam Leffler 		    IEEE80211_AID(ni->ni_associd), set);
1906cf4c5a53SSam Leffler 		return 1;
1907cf4c5a53SSam Leffler 	} else
1908cf4c5a53SSam Leffler 		return 0;
1909cf4c5a53SSam Leffler }
1910cf4c5a53SSam Leffler #endif /* MWL_HOST_PS_SUPPORT */
1911cf4c5a53SSam Leffler 
1912cf4c5a53SSam Leffler static int
mwl_desc_setup(struct mwl_softc * sc,const char * name,struct mwl_descdma * dd,int nbuf,size_t bufsize,int ndesc,size_t descsize)1913cf4c5a53SSam Leffler mwl_desc_setup(struct mwl_softc *sc, const char *name,
1914cf4c5a53SSam Leffler 	struct mwl_descdma *dd,
1915cf4c5a53SSam Leffler 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1916cf4c5a53SSam Leffler {
1917cf4c5a53SSam Leffler 	uint8_t *ds;
1918cf4c5a53SSam Leffler 	int error;
1919cf4c5a53SSam Leffler 
1920cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_RESET,
1921cf4c5a53SSam Leffler 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1922cf4c5a53SSam Leffler 	    __func__, name, nbuf, (uintmax_t) bufsize,
1923cf4c5a53SSam Leffler 	    ndesc, (uintmax_t) descsize);
1924cf4c5a53SSam Leffler 
1925cf4c5a53SSam Leffler 	dd->dd_name = name;
1926cf4c5a53SSam Leffler 	dd->dd_desc_len = nbuf * ndesc * descsize;
1927cf4c5a53SSam Leffler 
1928cf4c5a53SSam Leffler 	/*
1929cf4c5a53SSam Leffler 	 * Setup DMA descriptor area.
1930cf4c5a53SSam Leffler 	 */
1931cf4c5a53SSam Leffler 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1932cf4c5a53SSam Leffler 		       PAGE_SIZE, 0,		/* alignment, bounds */
1933cf4c5a53SSam Leffler 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1934cf4c5a53SSam Leffler 		       BUS_SPACE_MAXADDR,	/* highaddr */
1935cf4c5a53SSam Leffler 		       NULL, NULL,		/* filter, filterarg */
1936cf4c5a53SSam Leffler 		       dd->dd_desc_len,		/* maxsize */
1937cf4c5a53SSam Leffler 		       1,			/* nsegments */
1938cf4c5a53SSam Leffler 		       dd->dd_desc_len,		/* maxsegsize */
1939cf4c5a53SSam Leffler 		       BUS_DMA_ALLOCNOW,	/* flags */
1940cf4c5a53SSam Leffler 		       NULL,			/* lockfunc */
1941cf4c5a53SSam Leffler 		       NULL,			/* lockarg */
1942cf4c5a53SSam Leffler 		       &dd->dd_dmat);
1943cf4c5a53SSam Leffler 	if (error != 0) {
19447a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1945cf4c5a53SSam Leffler 		return error;
1946cf4c5a53SSam Leffler 	}
1947cf4c5a53SSam Leffler 
1948cf4c5a53SSam Leffler 	/* allocate descriptors */
1949cf4c5a53SSam Leffler 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1950cf4c5a53SSam Leffler 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1951cf4c5a53SSam Leffler 				 &dd->dd_dmamap);
1952cf4c5a53SSam Leffler 	if (error != 0) {
19537a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1954cf4c5a53SSam Leffler 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1955cf4c5a53SSam Leffler 		goto fail1;
1956cf4c5a53SSam Leffler 	}
1957cf4c5a53SSam Leffler 
1958cf4c5a53SSam Leffler 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1959cf4c5a53SSam Leffler 				dd->dd_desc, dd->dd_desc_len,
1960cf4c5a53SSam Leffler 				mwl_load_cb, &dd->dd_desc_paddr,
1961cf4c5a53SSam Leffler 				BUS_DMA_NOWAIT);
1962cf4c5a53SSam Leffler 	if (error != 0) {
19637a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1964cf4c5a53SSam Leffler 			dd->dd_name, error);
1965cf4c5a53SSam Leffler 		goto fail2;
1966cf4c5a53SSam Leffler 	}
1967cf4c5a53SSam Leffler 
1968cf4c5a53SSam Leffler 	ds = dd->dd_desc;
1969cf4c5a53SSam Leffler 	memset(ds, 0, dd->dd_desc_len);
19702706e872SMarius Strobl 	DPRINTF(sc, MWL_DEBUG_RESET,
19712706e872SMarius Strobl 	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1972cf4c5a53SSam Leffler 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
19732706e872SMarius Strobl 	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1974cf4c5a53SSam Leffler 
1975cf4c5a53SSam Leffler 	return 0;
1976cf4c5a53SSam Leffler fail2:
1977cf4c5a53SSam Leffler 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1978cf4c5a53SSam Leffler fail1:
1979cf4c5a53SSam Leffler 	bus_dma_tag_destroy(dd->dd_dmat);
1980cf4c5a53SSam Leffler 	memset(dd, 0, sizeof(*dd));
1981cf4c5a53SSam Leffler 	return error;
1982cf4c5a53SSam Leffler #undef DS2PHYS
1983cf4c5a53SSam Leffler }
1984cf4c5a53SSam Leffler 
1985cf4c5a53SSam Leffler static void
mwl_desc_cleanup(struct mwl_softc * sc,struct mwl_descdma * dd)1986cf4c5a53SSam Leffler mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1987cf4c5a53SSam Leffler {
1988cf4c5a53SSam Leffler 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1989cf4c5a53SSam Leffler 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1990cf4c5a53SSam Leffler 	bus_dma_tag_destroy(dd->dd_dmat);
1991cf4c5a53SSam Leffler 
1992cf4c5a53SSam Leffler 	memset(dd, 0, sizeof(*dd));
1993cf4c5a53SSam Leffler }
1994cf4c5a53SSam Leffler 
1995cf4c5a53SSam Leffler /*
1996cf4c5a53SSam Leffler  * Construct a tx q's free list.  The order of entries on
1997cf4c5a53SSam Leffler  * the list must reflect the physical layout of tx descriptors
1998cf4c5a53SSam Leffler  * because the firmware pre-fetches descriptors.
1999cf4c5a53SSam Leffler  *
2000cf4c5a53SSam Leffler  * XXX might be better to use indices into the buffer array.
2001cf4c5a53SSam Leffler  */
2002cf4c5a53SSam Leffler static void
mwl_txq_reset(struct mwl_softc * sc,struct mwl_txq * txq)2003cf4c5a53SSam Leffler mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2004cf4c5a53SSam Leffler {
2005cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
2006cf4c5a53SSam Leffler 	int i;
2007cf4c5a53SSam Leffler 
2008cf4c5a53SSam Leffler 	bf = txq->dma.dd_bufptr;
2009cf4c5a53SSam Leffler 	STAILQ_INIT(&txq->free);
2010cf4c5a53SSam Leffler 	for (i = 0; i < mwl_txbuf; i++, bf++)
2011cf4c5a53SSam Leffler 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2012cf4c5a53SSam Leffler 	txq->nfree = i;
2013cf4c5a53SSam Leffler }
2014cf4c5a53SSam Leffler 
2015cf4c5a53SSam Leffler #define	DS2PHYS(_dd, _ds) \
2016cf4c5a53SSam Leffler 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2017cf4c5a53SSam Leffler 
2018cf4c5a53SSam Leffler static int
mwl_txdma_setup(struct mwl_softc * sc,struct mwl_txq * txq)2019cf4c5a53SSam Leffler mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2020cf4c5a53SSam Leffler {
2021cf4c5a53SSam Leffler 	int error, bsize, i;
2022cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
2023cf4c5a53SSam Leffler 	struct mwl_txdesc *ds;
2024cf4c5a53SSam Leffler 
2025cf4c5a53SSam Leffler 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2026cf4c5a53SSam Leffler 			mwl_txbuf, sizeof(struct mwl_txbuf),
2027cf4c5a53SSam Leffler 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2028cf4c5a53SSam Leffler 	if (error != 0)
2029cf4c5a53SSam Leffler 		return error;
2030cf4c5a53SSam Leffler 
2031cf4c5a53SSam Leffler 	/* allocate and setup tx buffers */
2032cf4c5a53SSam Leffler 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2033cf4c5a53SSam Leffler 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2034cf4c5a53SSam Leffler 	if (bf == NULL) {
20357a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2036cf4c5a53SSam Leffler 			mwl_txbuf);
2037cf4c5a53SSam Leffler 		return ENOMEM;
2038cf4c5a53SSam Leffler 	}
2039cf4c5a53SSam Leffler 	txq->dma.dd_bufptr = bf;
2040cf4c5a53SSam Leffler 
2041cf4c5a53SSam Leffler 	ds = txq->dma.dd_desc;
2042cf4c5a53SSam Leffler 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2043cf4c5a53SSam Leffler 		bf->bf_desc = ds;
2044cf4c5a53SSam Leffler 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2045cf4c5a53SSam Leffler 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2046cf4c5a53SSam Leffler 				&bf->bf_dmamap);
2047cf4c5a53SSam Leffler 		if (error != 0) {
20487a79cebfSGleb Smirnoff 			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2049cf4c5a53SSam Leffler 				"buffer %u, error %u\n", i, error);
2050cf4c5a53SSam Leffler 			return error;
2051cf4c5a53SSam Leffler 		}
2052cf4c5a53SSam Leffler 	}
2053cf4c5a53SSam Leffler 	mwl_txq_reset(sc, txq);
2054cf4c5a53SSam Leffler 	return 0;
2055cf4c5a53SSam Leffler }
2056cf4c5a53SSam Leffler 
2057cf4c5a53SSam Leffler static void
mwl_txdma_cleanup(struct mwl_softc * sc,struct mwl_txq * txq)2058cf4c5a53SSam Leffler mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2059cf4c5a53SSam Leffler {
2060cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
2061cf4c5a53SSam Leffler 	int i;
2062cf4c5a53SSam Leffler 
2063cf4c5a53SSam Leffler 	bf = txq->dma.dd_bufptr;
2064cf4c5a53SSam Leffler 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2065cf4c5a53SSam Leffler 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2066cf4c5a53SSam Leffler 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2067cf4c5a53SSam Leffler 		if (bf->bf_dmamap != NULL)
2068cf4c5a53SSam Leffler 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2069cf4c5a53SSam Leffler 	}
2070cf4c5a53SSam Leffler 	STAILQ_INIT(&txq->free);
2071cf4c5a53SSam Leffler 	txq->nfree = 0;
2072cf4c5a53SSam Leffler 	if (txq->dma.dd_bufptr != NULL) {
2073cf4c5a53SSam Leffler 		free(txq->dma.dd_bufptr, M_MWLDEV);
2074cf4c5a53SSam Leffler 		txq->dma.dd_bufptr = NULL;
2075cf4c5a53SSam Leffler 	}
2076cf4c5a53SSam Leffler 	if (txq->dma.dd_desc_len != 0)
2077cf4c5a53SSam Leffler 		mwl_desc_cleanup(sc, &txq->dma);
2078cf4c5a53SSam Leffler }
2079cf4c5a53SSam Leffler 
2080cf4c5a53SSam Leffler static int
mwl_rxdma_setup(struct mwl_softc * sc)2081cf4c5a53SSam Leffler mwl_rxdma_setup(struct mwl_softc *sc)
2082cf4c5a53SSam Leffler {
2083cf4c5a53SSam Leffler 	int error, jumbosize, bsize, i;
2084cf4c5a53SSam Leffler 	struct mwl_rxbuf *bf;
2085cf4c5a53SSam Leffler 	struct mwl_jumbo *rbuf;
2086cf4c5a53SSam Leffler 	struct mwl_rxdesc *ds;
2087cf4c5a53SSam Leffler 	caddr_t data;
2088cf4c5a53SSam Leffler 
2089cf4c5a53SSam Leffler 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2090cf4c5a53SSam Leffler 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2091cf4c5a53SSam Leffler 			1, sizeof(struct mwl_rxdesc));
2092cf4c5a53SSam Leffler 	if (error != 0)
2093cf4c5a53SSam Leffler 		return error;
2094cf4c5a53SSam Leffler 
2095cf4c5a53SSam Leffler 	/*
2096cf4c5a53SSam Leffler 	 * Receive is done to a private pool of jumbo buffers.
2097cf4c5a53SSam Leffler 	 * This allows us to attach to mbuf's and avoid re-mapping
2098cf4c5a53SSam Leffler 	 * memory on each rx we post.  We allocate a large chunk
2099cf4c5a53SSam Leffler 	 * of memory and manage it in the driver.  The mbuf free
2100cf4c5a53SSam Leffler 	 * callback method is used to reclaim frames after sending
2101cf4c5a53SSam Leffler 	 * them up the stack.  By default we allocate 2x the number of
2102cf4c5a53SSam Leffler 	 * rx descriptors configured so we have some slop to hold
2103cf4c5a53SSam Leffler 	 * us while frames are processed.
2104cf4c5a53SSam Leffler 	 */
2105cf4c5a53SSam Leffler 	if (mwl_rxbuf < 2*mwl_rxdesc) {
21067a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev,
2107cf4c5a53SSam Leffler 		    "too few rx dma buffers (%d); increasing to %d\n",
2108cf4c5a53SSam Leffler 		    mwl_rxbuf, 2*mwl_rxdesc);
2109cf4c5a53SSam Leffler 		mwl_rxbuf = 2*mwl_rxdesc;
2110cf4c5a53SSam Leffler 	}
2111cf4c5a53SSam Leffler 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2112cf4c5a53SSam Leffler 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2113cf4c5a53SSam Leffler 
2114cf4c5a53SSam Leffler 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2115cf4c5a53SSam Leffler 		       PAGE_SIZE, 0,		/* alignment, bounds */
2116cf4c5a53SSam Leffler 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2117cf4c5a53SSam Leffler 		       BUS_SPACE_MAXADDR,	/* highaddr */
2118cf4c5a53SSam Leffler 		       NULL, NULL,		/* filter, filterarg */
2119cf4c5a53SSam Leffler 		       sc->sc_rxmemsize,	/* maxsize */
2120cf4c5a53SSam Leffler 		       1,			/* nsegments */
2121cf4c5a53SSam Leffler 		       sc->sc_rxmemsize,	/* maxsegsize */
2122cf4c5a53SSam Leffler 		       BUS_DMA_ALLOCNOW,	/* flags */
2123cf4c5a53SSam Leffler 		       NULL,			/* lockfunc */
2124cf4c5a53SSam Leffler 		       NULL,			/* lockarg */
2125cf4c5a53SSam Leffler 		       &sc->sc_rxdmat);
2126cf4c5a53SSam Leffler 	if (error != 0) {
21277a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2128cf4c5a53SSam Leffler 		return error;
2129cf4c5a53SSam Leffler 	}
2130cf4c5a53SSam Leffler 
2131cf4c5a53SSam Leffler 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2132cf4c5a53SSam Leffler 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2133cf4c5a53SSam Leffler 				 &sc->sc_rxmap);
2134cf4c5a53SSam Leffler 	if (error != 0) {
21357a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2136cf4c5a53SSam Leffler 		    (uintmax_t) sc->sc_rxmemsize);
2137cf4c5a53SSam Leffler 		return error;
2138cf4c5a53SSam Leffler 	}
2139cf4c5a53SSam Leffler 
2140cf4c5a53SSam Leffler 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2141cf4c5a53SSam Leffler 				sc->sc_rxmem, sc->sc_rxmemsize,
2142cf4c5a53SSam Leffler 				mwl_load_cb, &sc->sc_rxmem_paddr,
2143cf4c5a53SSam Leffler 				BUS_DMA_NOWAIT);
2144cf4c5a53SSam Leffler 	if (error != 0) {
21457a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2146cf4c5a53SSam Leffler 		return error;
2147cf4c5a53SSam Leffler 	}
2148cf4c5a53SSam Leffler 
2149cf4c5a53SSam Leffler 	/*
2150cf4c5a53SSam Leffler 	 * Allocate rx buffers and set them up.
2151cf4c5a53SSam Leffler 	 */
2152cf4c5a53SSam Leffler 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2153cf4c5a53SSam Leffler 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2154cf4c5a53SSam Leffler 	if (bf == NULL) {
21557a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2156cf4c5a53SSam Leffler 		return error;
2157cf4c5a53SSam Leffler 	}
2158cf4c5a53SSam Leffler 	sc->sc_rxdma.dd_bufptr = bf;
2159cf4c5a53SSam Leffler 
2160cf4c5a53SSam Leffler 	STAILQ_INIT(&sc->sc_rxbuf);
2161cf4c5a53SSam Leffler 	ds = sc->sc_rxdma.dd_desc;
2162cf4c5a53SSam Leffler 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2163cf4c5a53SSam Leffler 		bf->bf_desc = ds;
2164cf4c5a53SSam Leffler 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2165cf4c5a53SSam Leffler 		/* pre-assign dma buffer */
2166cf4c5a53SSam Leffler 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2167cf4c5a53SSam Leffler 		/* NB: tail is intentional to preserve descriptor order */
2168cf4c5a53SSam Leffler 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2169cf4c5a53SSam Leffler 	}
2170cf4c5a53SSam Leffler 
2171cf4c5a53SSam Leffler 	/*
2172cf4c5a53SSam Leffler 	 * Place remainder of dma memory buffers on the free list.
2173cf4c5a53SSam Leffler 	 */
2174cf4c5a53SSam Leffler 	SLIST_INIT(&sc->sc_rxfree);
2175cf4c5a53SSam Leffler 	for (; i < mwl_rxbuf; i++) {
2176cf4c5a53SSam Leffler 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2177cf4c5a53SSam Leffler 		rbuf = MWL_JUMBO_DATA2BUF(data);
2178cf4c5a53SSam Leffler 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2179cf4c5a53SSam Leffler 		sc->sc_nrxfree++;
2180cf4c5a53SSam Leffler 	}
2181cf4c5a53SSam Leffler 	return 0;
2182cf4c5a53SSam Leffler }
2183cf4c5a53SSam Leffler #undef DS2PHYS
2184cf4c5a53SSam Leffler 
2185cf4c5a53SSam Leffler static void
mwl_rxdma_cleanup(struct mwl_softc * sc)2186cf4c5a53SSam Leffler mwl_rxdma_cleanup(struct mwl_softc *sc)
2187cf4c5a53SSam Leffler {
2188f07894dbSJohn Baldwin 	if (sc->sc_rxmem_paddr != 0) {
2189cf4c5a53SSam Leffler 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2190f07894dbSJohn Baldwin 		sc->sc_rxmem_paddr = 0;
2191f07894dbSJohn Baldwin 	}
2192cf4c5a53SSam Leffler 	if (sc->sc_rxmem != NULL) {
2193cf4c5a53SSam Leffler 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2194cf4c5a53SSam Leffler 		sc->sc_rxmem = NULL;
2195cf4c5a53SSam Leffler 	}
2196cf4c5a53SSam Leffler 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2197cf4c5a53SSam Leffler 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2198cf4c5a53SSam Leffler 		sc->sc_rxdma.dd_bufptr = NULL;
2199cf4c5a53SSam Leffler 	}
2200cf4c5a53SSam Leffler 	if (sc->sc_rxdma.dd_desc_len != 0)
2201cf4c5a53SSam Leffler 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2202cf4c5a53SSam Leffler }
2203cf4c5a53SSam Leffler 
2204cf4c5a53SSam Leffler static int
mwl_dma_setup(struct mwl_softc * sc)2205cf4c5a53SSam Leffler mwl_dma_setup(struct mwl_softc *sc)
2206cf4c5a53SSam Leffler {
2207cf4c5a53SSam Leffler 	int error, i;
2208cf4c5a53SSam Leffler 
2209cf4c5a53SSam Leffler 	error = mwl_rxdma_setup(sc);
221016d452b4SRui Paulo 	if (error != 0) {
221116d452b4SRui Paulo 		mwl_rxdma_cleanup(sc);
2212cf4c5a53SSam Leffler 		return error;
221316d452b4SRui Paulo 	}
2214cf4c5a53SSam Leffler 
2215cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2216cf4c5a53SSam Leffler 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2217cf4c5a53SSam Leffler 		if (error != 0) {
2218cf4c5a53SSam Leffler 			mwl_dma_cleanup(sc);
2219cf4c5a53SSam Leffler 			return error;
2220cf4c5a53SSam Leffler 		}
2221cf4c5a53SSam Leffler 	}
2222cf4c5a53SSam Leffler 	return 0;
2223cf4c5a53SSam Leffler }
2224cf4c5a53SSam Leffler 
2225cf4c5a53SSam Leffler static void
mwl_dma_cleanup(struct mwl_softc * sc)2226cf4c5a53SSam Leffler mwl_dma_cleanup(struct mwl_softc *sc)
2227cf4c5a53SSam Leffler {
2228cf4c5a53SSam Leffler 	int i;
2229cf4c5a53SSam Leffler 
2230cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2231cf4c5a53SSam Leffler 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2232cf4c5a53SSam Leffler 	mwl_rxdma_cleanup(sc);
2233cf4c5a53SSam Leffler }
2234cf4c5a53SSam Leffler 
2235cf4c5a53SSam Leffler static struct ieee80211_node *
mwl_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])2236cf4c5a53SSam Leffler mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2237cf4c5a53SSam Leffler {
2238cf4c5a53SSam Leffler 	struct ieee80211com *ic = vap->iv_ic;
22397a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
2240cf4c5a53SSam Leffler 	const size_t space = sizeof(struct mwl_node);
2241cf4c5a53SSam Leffler 	struct mwl_node *mn;
2242cf4c5a53SSam Leffler 
2243cf4c5a53SSam Leffler 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2244cf4c5a53SSam Leffler 	if (mn == NULL) {
2245cf4c5a53SSam Leffler 		/* XXX stat+msg */
2246cf4c5a53SSam Leffler 		return NULL;
2247cf4c5a53SSam Leffler 	}
2248cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2249cf4c5a53SSam Leffler 	return &mn->mn_node;
2250cf4c5a53SSam Leffler }
2251cf4c5a53SSam Leffler 
2252cf4c5a53SSam Leffler static void
mwl_node_cleanup(struct ieee80211_node * ni)2253cf4c5a53SSam Leffler mwl_node_cleanup(struct ieee80211_node *ni)
2254cf4c5a53SSam Leffler {
2255cf4c5a53SSam Leffler 	struct ieee80211com *ic = ni->ni_ic;
22567a79cebfSGleb Smirnoff         struct mwl_softc *sc = ic->ic_softc;
2257cf4c5a53SSam Leffler 	struct mwl_node *mn = MWL_NODE(ni);
2258cf4c5a53SSam Leffler 
2259cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2260cf4c5a53SSam Leffler 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2261cf4c5a53SSam Leffler 
2262cf4c5a53SSam Leffler 	if (mn->mn_staid != 0) {
2263cf4c5a53SSam Leffler 		struct ieee80211vap *vap = ni->ni_vap;
2264cf4c5a53SSam Leffler 
2265cf4c5a53SSam Leffler 		if (mn->mn_hvap != NULL) {
2266cf4c5a53SSam Leffler 			if (vap->iv_opmode == IEEE80211_M_STA)
2267cf4c5a53SSam Leffler 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2268cf4c5a53SSam Leffler 			else
2269cf4c5a53SSam Leffler 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2270cf4c5a53SSam Leffler 		}
2271cf4c5a53SSam Leffler 		/*
2272cf4c5a53SSam Leffler 		 * NB: legacy WDS peer sta db entry is installed using
2273cf4c5a53SSam Leffler 		 * the associate ap's hvap; use it again to delete it.
2274cf4c5a53SSam Leffler 		 * XXX can vap be NULL?
2275cf4c5a53SSam Leffler 		 */
2276cf4c5a53SSam Leffler 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2277cf4c5a53SSam Leffler 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2278cf4c5a53SSam Leffler 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2279cf4c5a53SSam Leffler 			    ni->ni_macaddr);
2280cf4c5a53SSam Leffler 		delstaid(sc, mn->mn_staid);
2281cf4c5a53SSam Leffler 		mn->mn_staid = 0;
2282cf4c5a53SSam Leffler 	}
2283cf4c5a53SSam Leffler 	sc->sc_node_cleanup(ni);
2284cf4c5a53SSam Leffler }
2285cf4c5a53SSam Leffler 
2286cf4c5a53SSam Leffler /*
2287cf4c5a53SSam Leffler  * Reclaim rx dma buffers from packets sitting on the ampdu
2288cf4c5a53SSam Leffler  * reorder queue for a station.  We replace buffers with a
2289cf4c5a53SSam Leffler  * system cluster (if available).
2290cf4c5a53SSam Leffler  */
2291cf4c5a53SSam Leffler static void
mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu * rap)2292cf4c5a53SSam Leffler mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2293cf4c5a53SSam Leffler {
2294cf4c5a53SSam Leffler #if 0
2295cf4c5a53SSam Leffler 	int i, n, off;
2296cf4c5a53SSam Leffler 	struct mbuf *m;
2297cf4c5a53SSam Leffler 	void *cl;
2298cf4c5a53SSam Leffler 
2299cf4c5a53SSam Leffler 	n = rap->rxa_qframes;
2300cf4c5a53SSam Leffler 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2301cf4c5a53SSam Leffler 		m = rap->rxa_m[i];
2302cf4c5a53SSam Leffler 		if (m == NULL)
2303cf4c5a53SSam Leffler 			continue;
2304cf4c5a53SSam Leffler 		n--;
2305cf4c5a53SSam Leffler 		/* our dma buffers have a well-known free routine */
2306cf4c5a53SSam Leffler 		if ((m->m_flags & M_EXT) == 0 ||
2307cf4c5a53SSam Leffler 		    m->m_ext.ext_free != mwl_ext_free)
2308cf4c5a53SSam Leffler 			continue;
2309cf4c5a53SSam Leffler 		/*
2310cf4c5a53SSam Leffler 		 * Try to allocate a cluster and move the data.
2311cf4c5a53SSam Leffler 		 */
2312cf4c5a53SSam Leffler 		off = m->m_data - m->m_ext.ext_buf;
2313cf4c5a53SSam Leffler 		if (off + m->m_pkthdr.len > MCLBYTES) {
2314cf4c5a53SSam Leffler 			/* XXX no AMSDU for now */
2315cf4c5a53SSam Leffler 			continue;
2316cf4c5a53SSam Leffler 		}
2317cf4c5a53SSam Leffler 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2318cf4c5a53SSam Leffler 		    &m->m_ext.ext_paddr);
2319cf4c5a53SSam Leffler 		if (cl != NULL) {
2320cf4c5a53SSam Leffler 			/*
2321cf4c5a53SSam Leffler 			 * Copy the existing data to the cluster, remove
2322cf4c5a53SSam Leffler 			 * the rx dma buffer, and attach the cluster in
2323cf4c5a53SSam Leffler 			 * its place.  Note we preserve the offset to the
2324cf4c5a53SSam Leffler 			 * data so frames being bridged can still prepend
2325cf4c5a53SSam Leffler 			 * their headers without adding another mbuf.
2326cf4c5a53SSam Leffler 			 */
2327cf4c5a53SSam Leffler 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2328cf4c5a53SSam Leffler 			MEXTREMOVE(m);
2329cf4c5a53SSam Leffler 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2330cf4c5a53SSam Leffler 			/* setup mbuf like _MCLGET does */
2331cf4c5a53SSam Leffler 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2332cf4c5a53SSam Leffler 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2333cf4c5a53SSam Leffler 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2334cf4c5a53SSam Leffler 			m->m_data += off;
2335cf4c5a53SSam Leffler 		}
2336cf4c5a53SSam Leffler 	}
2337cf4c5a53SSam Leffler #endif
2338cf4c5a53SSam Leffler }
2339cf4c5a53SSam Leffler 
2340cf4c5a53SSam Leffler /*
2341cf4c5a53SSam Leffler  * Callback to reclaim resources.  We first let the
2342cf4c5a53SSam Leffler  * net80211 layer do it's thing, then if we are still
2343cf4c5a53SSam Leffler  * blocked by a lack of rx dma buffers we walk the ampdu
2344cf4c5a53SSam Leffler  * reorder q's to reclaim buffers by copying to a system
2345cf4c5a53SSam Leffler  * cluster.
2346cf4c5a53SSam Leffler  */
2347cf4c5a53SSam Leffler static void
mwl_node_drain(struct ieee80211_node * ni)2348cf4c5a53SSam Leffler mwl_node_drain(struct ieee80211_node *ni)
2349cf4c5a53SSam Leffler {
2350cf4c5a53SSam Leffler 	struct ieee80211com *ic = ni->ni_ic;
23517a79cebfSGleb Smirnoff         struct mwl_softc *sc = ic->ic_softc;
2352cf4c5a53SSam Leffler 	struct mwl_node *mn = MWL_NODE(ni);
2353cf4c5a53SSam Leffler 
2354cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2355cf4c5a53SSam Leffler 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2356cf4c5a53SSam Leffler 
2357cf4c5a53SSam Leffler 	/* NB: call up first to age out ampdu q's */
2358cf4c5a53SSam Leffler 	sc->sc_node_drain(ni);
2359cf4c5a53SSam Leffler 
2360cf4c5a53SSam Leffler 	/* XXX better to not check low water mark? */
2361cf4c5a53SSam Leffler 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2362cf4c5a53SSam Leffler 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2363cf4c5a53SSam Leffler 		uint8_t tid;
2364cf4c5a53SSam Leffler 		/*
2365cf4c5a53SSam Leffler 		 * Walk the reorder q and reclaim rx dma buffers by copying
2366cf4c5a53SSam Leffler 		 * the packet contents into clusters.
2367cf4c5a53SSam Leffler 		 */
2368cf4c5a53SSam Leffler 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2369cf4c5a53SSam Leffler 			struct ieee80211_rx_ampdu *rap;
2370cf4c5a53SSam Leffler 
2371cf4c5a53SSam Leffler 			rap = &ni->ni_rx_ampdu[tid];
2372cf4c5a53SSam Leffler 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2373cf4c5a53SSam Leffler 				continue;
2374cf4c5a53SSam Leffler 			if (rap->rxa_qframes)
2375cf4c5a53SSam Leffler 				mwl_ampdu_rxdma_reclaim(rap);
2376cf4c5a53SSam Leffler 		}
2377cf4c5a53SSam Leffler 	}
2378cf4c5a53SSam Leffler }
2379cf4c5a53SSam Leffler 
2380cf4c5a53SSam Leffler static void
mwl_node_getsignal(const struct ieee80211_node * ni,int8_t * rssi,int8_t * noise)2381cf4c5a53SSam Leffler mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2382cf4c5a53SSam Leffler {
2383cf4c5a53SSam Leffler 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2384cf4c5a53SSam Leffler #ifdef MWL_ANT_INFO_SUPPORT
2385cf4c5a53SSam Leffler #if 0
2386cf4c5a53SSam Leffler 	/* XXX need to smooth data */
2387cf4c5a53SSam Leffler 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2388cf4c5a53SSam Leffler #else
2389cf4c5a53SSam Leffler 	*noise = -95;		/* XXX */
2390cf4c5a53SSam Leffler #endif
2391cf4c5a53SSam Leffler #else
2392cf4c5a53SSam Leffler 	*noise = -95;		/* XXX */
2393cf4c5a53SSam Leffler #endif
2394cf4c5a53SSam Leffler }
2395cf4c5a53SSam Leffler 
2396cf4c5a53SSam Leffler /*
2397cf4c5a53SSam Leffler  * Convert Hardware per-antenna rssi info to common format:
2398cf4c5a53SSam Leffler  * Let a1, a2, a3 represent the amplitudes per chain
2399cf4c5a53SSam Leffler  * Let amax represent max[a1, a2, a3]
2400cf4c5a53SSam Leffler  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2401cf4c5a53SSam Leffler  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2402cf4c5a53SSam Leffler  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2403cf4c5a53SSam Leffler  * maintain some extra precision.
2404cf4c5a53SSam Leffler  *
2405cf4c5a53SSam Leffler  * Values are stored in .5 db format capped at 127.
2406cf4c5a53SSam Leffler  */
2407cf4c5a53SSam Leffler static void
mwl_node_getmimoinfo(const struct ieee80211_node * ni,struct ieee80211_mimo_info * mi)2408cf4c5a53SSam Leffler mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2409cf4c5a53SSam Leffler 	struct ieee80211_mimo_info *mi)
2410cf4c5a53SSam Leffler {
2411cf4c5a53SSam Leffler #define	CVT(_dst, _src) do {						\
2412cf4c5a53SSam Leffler 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2413cf4c5a53SSam Leffler 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2414cf4c5a53SSam Leffler } while (0)
2415cf4c5a53SSam Leffler 	static const int8_t logdbtbl[32] = {
2416cf4c5a53SSam Leffler 	       0,   0,  24,  38,  48,  56,  62,  68,
2417cf4c5a53SSam Leffler 	      72,  76,  80,  83,  86,  89,  92,  94,
2418cf4c5a53SSam Leffler 	      96,  98, 100, 102, 104, 106, 107, 109,
2419cf4c5a53SSam Leffler 	     110, 112, 113, 115, 116, 117, 118, 119
2420cf4c5a53SSam Leffler 	};
2421cf4c5a53SSam Leffler 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2422cf4c5a53SSam Leffler 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2423cf4c5a53SSam Leffler 	uint32_t rssi_max;
2424cf4c5a53SSam Leffler 
2425cf4c5a53SSam Leffler 	rssi_max = mn->mn_ai.rssi_a;
2426cf4c5a53SSam Leffler 	if (mn->mn_ai.rssi_b > rssi_max)
2427cf4c5a53SSam Leffler 		rssi_max = mn->mn_ai.rssi_b;
2428cf4c5a53SSam Leffler 	if (mn->mn_ai.rssi_c > rssi_max)
2429cf4c5a53SSam Leffler 		rssi_max = mn->mn_ai.rssi_c;
2430cf4c5a53SSam Leffler 
2431617f8b10SAdrian Chadd 	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2432617f8b10SAdrian Chadd 	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2433617f8b10SAdrian Chadd 	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2434cf4c5a53SSam Leffler 
2435617f8b10SAdrian Chadd 	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2436617f8b10SAdrian Chadd 	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2437617f8b10SAdrian Chadd 	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2438cf4c5a53SSam Leffler #undef CVT
2439cf4c5a53SSam Leffler }
2440cf4c5a53SSam Leffler 
2441cf4c5a53SSam Leffler static __inline void *
mwl_getrxdma(struct mwl_softc * sc)2442cf4c5a53SSam Leffler mwl_getrxdma(struct mwl_softc *sc)
2443cf4c5a53SSam Leffler {
2444cf4c5a53SSam Leffler 	struct mwl_jumbo *buf;
2445cf4c5a53SSam Leffler 	void *data;
2446cf4c5a53SSam Leffler 
2447cf4c5a53SSam Leffler 	/*
2448cf4c5a53SSam Leffler 	 * Allocate from jumbo pool.
2449cf4c5a53SSam Leffler 	 */
2450cf4c5a53SSam Leffler 	MWL_RXFREE_LOCK(sc);
2451cf4c5a53SSam Leffler 	buf = SLIST_FIRST(&sc->sc_rxfree);
2452cf4c5a53SSam Leffler 	if (buf == NULL) {
2453cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_ANY,
2454cf4c5a53SSam Leffler 		    "%s: out of rx dma buffers\n", __func__);
2455cf4c5a53SSam Leffler 		sc->sc_stats.mst_rx_nodmabuf++;
2456cf4c5a53SSam Leffler 		data = NULL;
2457cf4c5a53SSam Leffler 	} else {
2458cf4c5a53SSam Leffler 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2459cf4c5a53SSam Leffler 		sc->sc_nrxfree--;
2460cf4c5a53SSam Leffler 		data = MWL_JUMBO_BUF2DATA(buf);
2461cf4c5a53SSam Leffler 	}
2462cf4c5a53SSam Leffler 	MWL_RXFREE_UNLOCK(sc);
2463cf4c5a53SSam Leffler 	return data;
2464cf4c5a53SSam Leffler }
2465cf4c5a53SSam Leffler 
2466cf4c5a53SSam Leffler static __inline void
mwl_putrxdma(struct mwl_softc * sc,void * data)2467cf4c5a53SSam Leffler mwl_putrxdma(struct mwl_softc *sc, void *data)
2468cf4c5a53SSam Leffler {
2469cf4c5a53SSam Leffler 	struct mwl_jumbo *buf;
2470cf4c5a53SSam Leffler 
2471cf4c5a53SSam Leffler 	/* XXX bounds check data */
2472cf4c5a53SSam Leffler 	MWL_RXFREE_LOCK(sc);
2473cf4c5a53SSam Leffler 	buf = MWL_JUMBO_DATA2BUF(data);
2474cf4c5a53SSam Leffler 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2475cf4c5a53SSam Leffler 	sc->sc_nrxfree++;
2476cf4c5a53SSam Leffler 	MWL_RXFREE_UNLOCK(sc);
2477cf4c5a53SSam Leffler }
2478cf4c5a53SSam Leffler 
2479cf4c5a53SSam Leffler static int
mwl_rxbuf_init(struct mwl_softc * sc,struct mwl_rxbuf * bf)2480cf4c5a53SSam Leffler mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2481cf4c5a53SSam Leffler {
2482cf4c5a53SSam Leffler 	struct mwl_rxdesc *ds;
2483cf4c5a53SSam Leffler 
2484cf4c5a53SSam Leffler 	ds = bf->bf_desc;
2485cf4c5a53SSam Leffler 	if (bf->bf_data == NULL) {
2486cf4c5a53SSam Leffler 		bf->bf_data = mwl_getrxdma(sc);
2487cf4c5a53SSam Leffler 		if (bf->bf_data == NULL) {
2488cf4c5a53SSam Leffler 			/* mark descriptor to be skipped */
2489cf4c5a53SSam Leffler 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2490cf4c5a53SSam Leffler 			/* NB: don't need PREREAD */
2491cf4c5a53SSam Leffler 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2492cf4c5a53SSam Leffler 			sc->sc_stats.mst_rxbuf_failed++;
2493cf4c5a53SSam Leffler 			return ENOMEM;
2494cf4c5a53SSam Leffler 		}
2495cf4c5a53SSam Leffler 	}
2496cf4c5a53SSam Leffler 	/*
2497cf4c5a53SSam Leffler 	 * NB: DMA buffer contents is known to be unmodified
2498cf4c5a53SSam Leffler 	 *     so there's no need to flush the data cache.
2499cf4c5a53SSam Leffler 	 */
2500cf4c5a53SSam Leffler 
2501cf4c5a53SSam Leffler 	/*
2502cf4c5a53SSam Leffler 	 * Setup descriptor.
2503cf4c5a53SSam Leffler 	 */
2504cf4c5a53SSam Leffler 	ds->QosCtrl = 0;
2505cf4c5a53SSam Leffler 	ds->RSSI = 0;
2506cf4c5a53SSam Leffler 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2507cf4c5a53SSam Leffler 	ds->Channel = 0;
2508cf4c5a53SSam Leffler 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2509cf4c5a53SSam Leffler 	ds->SQ2 = 0;
2510cf4c5a53SSam Leffler 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2511cf4c5a53SSam Leffler 	/* NB: don't touch pPhysNext, set once */
2512cf4c5a53SSam Leffler 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2513cf4c5a53SSam Leffler 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2514cf4c5a53SSam Leffler 
2515cf4c5a53SSam Leffler 	return 0;
2516cf4c5a53SSam Leffler }
2517cf4c5a53SSam Leffler 
251815c28f87SGleb Smirnoff static void
mwl_ext_free(struct mbuf * m)2519e8fd18f3SGleb Smirnoff mwl_ext_free(struct mbuf *m)
2520cf4c5a53SSam Leffler {
2521e8fd18f3SGleb Smirnoff 	struct mwl_softc *sc = m->m_ext.ext_arg1;
2522cf4c5a53SSam Leffler 
2523cf4c5a53SSam Leffler 	/* XXX bounds check data */
2524e8fd18f3SGleb Smirnoff 	mwl_putrxdma(sc, m->m_ext.ext_buf);
2525cf4c5a53SSam Leffler 	/*
2526cf4c5a53SSam Leffler 	 * If we were previously blocked by a lack of rx dma buffers
2527cf4c5a53SSam Leffler 	 * check if we now have enough to restart rx interrupt handling.
2528cf4c5a53SSam Leffler 	 */
2529cf4c5a53SSam Leffler 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2530cf4c5a53SSam Leffler 		sc->sc_rxblocked = 0;
2531cf4c5a53SSam Leffler 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2532cf4c5a53SSam Leffler 	}
2533cf4c5a53SSam Leffler }
2534cf4c5a53SSam Leffler 
2535cf4c5a53SSam Leffler struct mwl_frame_bar {
2536cf4c5a53SSam Leffler 	u_int8_t	i_fc[2];
2537cf4c5a53SSam Leffler 	u_int8_t	i_dur[2];
2538cf4c5a53SSam Leffler 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2539cf4c5a53SSam Leffler 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2540cf4c5a53SSam Leffler 	/* ctl, seq, FCS */
2541cf4c5a53SSam Leffler } __packed;
2542cf4c5a53SSam Leffler 
2543cf4c5a53SSam Leffler /*
2544cf4c5a53SSam Leffler  * Like ieee80211_anyhdrsize, but handles BAR frames
2545cf4c5a53SSam Leffler  * specially so the logic below to piece the 802.11
2546cf4c5a53SSam Leffler  * header together works.
2547cf4c5a53SSam Leffler  */
2548cf4c5a53SSam Leffler static __inline int
mwl_anyhdrsize(const void * data)2549cf4c5a53SSam Leffler mwl_anyhdrsize(const void *data)
2550cf4c5a53SSam Leffler {
2551cf4c5a53SSam Leffler 	const struct ieee80211_frame *wh = data;
2552cf4c5a53SSam Leffler 
2553c249cc38SAdrian Chadd 	if (IEEE80211_IS_CTL(wh)) {
2554cf4c5a53SSam Leffler 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2555cf4c5a53SSam Leffler 		case IEEE80211_FC0_SUBTYPE_CTS:
2556cf4c5a53SSam Leffler 		case IEEE80211_FC0_SUBTYPE_ACK:
2557cf4c5a53SSam Leffler 			return sizeof(struct ieee80211_frame_ack);
2558cf4c5a53SSam Leffler 		case IEEE80211_FC0_SUBTYPE_BAR:
2559cf4c5a53SSam Leffler 			return sizeof(struct mwl_frame_bar);
2560cf4c5a53SSam Leffler 		}
2561cf4c5a53SSam Leffler 		return sizeof(struct ieee80211_frame_min);
2562cf4c5a53SSam Leffler 	} else
2563cf4c5a53SSam Leffler 		return ieee80211_hdrsize(data);
2564cf4c5a53SSam Leffler }
2565cf4c5a53SSam Leffler 
2566cf4c5a53SSam Leffler static void
mwl_handlemicerror(struct ieee80211com * ic,const uint8_t * data)2567cf4c5a53SSam Leffler mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2568cf4c5a53SSam Leffler {
2569cf4c5a53SSam Leffler 	const struct ieee80211_frame *wh;
2570cf4c5a53SSam Leffler 	struct ieee80211_node *ni;
2571cf4c5a53SSam Leffler 
2572cf4c5a53SSam Leffler 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2573cf4c5a53SSam Leffler 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2574cf4c5a53SSam Leffler 	if (ni != NULL) {
2575cf4c5a53SSam Leffler 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2576cf4c5a53SSam Leffler 		ieee80211_free_node(ni);
2577cf4c5a53SSam Leffler 	}
2578cf4c5a53SSam Leffler }
2579cf4c5a53SSam Leffler 
2580cf4c5a53SSam Leffler /*
2581cf4c5a53SSam Leffler  * Convert hardware signal strength to rssi.  The value
2582cf4c5a53SSam Leffler  * provided by the device has the noise floor added in;
2583cf4c5a53SSam Leffler  * we need to compensate for this but we don't have that
2584cf4c5a53SSam Leffler  * so we use a fixed value.
2585cf4c5a53SSam Leffler  *
2586cf4c5a53SSam Leffler  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2587cf4c5a53SSam Leffler  * offset is already set as part of the initial gain.  This
2588cf4c5a53SSam Leffler  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2589cf4c5a53SSam Leffler  */
2590cf4c5a53SSam Leffler static __inline int
cvtrssi(uint8_t ssi)2591cf4c5a53SSam Leffler cvtrssi(uint8_t ssi)
2592cf4c5a53SSam Leffler {
2593cf4c5a53SSam Leffler 	int rssi = (int) ssi + 8;
2594cf4c5a53SSam Leffler 	/* XXX hack guess until we have a real noise floor */
2595cf4c5a53SSam Leffler 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2596cf4c5a53SSam Leffler 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2597cf4c5a53SSam Leffler }
2598cf4c5a53SSam Leffler 
2599cf4c5a53SSam Leffler static void
mwl_rx_proc(void * arg,int npending)2600cf4c5a53SSam Leffler mwl_rx_proc(void *arg, int npending)
2601cf4c5a53SSam Leffler {
2602cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
26037a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
2604cf4c5a53SSam Leffler 	struct mwl_rxbuf *bf;
2605cf4c5a53SSam Leffler 	struct mwl_rxdesc *ds;
2606cf4c5a53SSam Leffler 	struct mbuf *m;
2607cf4c5a53SSam Leffler 	struct ieee80211_qosframe *wh;
2608cf4c5a53SSam Leffler 	struct ieee80211_node *ni;
2609cf4c5a53SSam Leffler 	struct mwl_node *mn;
2610cf4c5a53SSam Leffler 	int off, len, hdrlen, pktlen, rssi, ntodo;
2611cf4c5a53SSam Leffler 	uint8_t *data, status;
2612cf4c5a53SSam Leffler 	void *newdata;
2613cf4c5a53SSam Leffler 	int16_t nf;
2614cf4c5a53SSam Leffler 
2615cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2616cf4c5a53SSam Leffler 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2617cf4c5a53SSam Leffler 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2618cf4c5a53SSam Leffler 	nf = -96;			/* XXX */
2619cf4c5a53SSam Leffler 	bf = sc->sc_rxnext;
2620cf4c5a53SSam Leffler 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2621cf4c5a53SSam Leffler 		if (bf == NULL)
2622cf4c5a53SSam Leffler 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2623cf4c5a53SSam Leffler 		ds = bf->bf_desc;
2624cf4c5a53SSam Leffler 		data = bf->bf_data;
2625cf4c5a53SSam Leffler 		if (data == NULL) {
2626cf4c5a53SSam Leffler 			/*
2627cf4c5a53SSam Leffler 			 * If data allocation failed previously there
2628cf4c5a53SSam Leffler 			 * will be no buffer; try again to re-populate it.
2629cf4c5a53SSam Leffler 			 * Note the firmware will not advance to the next
2630cf4c5a53SSam Leffler 			 * descriptor with a dma buffer so we must mimic
2631cf4c5a53SSam Leffler 			 * this or we'll get out of sync.
2632cf4c5a53SSam Leffler 			 */
2633cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_ANY,
2634cf4c5a53SSam Leffler 			    "%s: rx buf w/o dma memory\n", __func__);
2635cf4c5a53SSam Leffler 			(void) mwl_rxbuf_init(sc, bf);
2636cf4c5a53SSam Leffler 			sc->sc_stats.mst_rx_dmabufmissing++;
2637cf4c5a53SSam Leffler 			break;
2638cf4c5a53SSam Leffler 		}
2639cf4c5a53SSam Leffler 		MWL_RXDESC_SYNC(sc, ds,
2640cf4c5a53SSam Leffler 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2641cf4c5a53SSam Leffler 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2642cf4c5a53SSam Leffler 			break;
2643cf4c5a53SSam Leffler #ifdef MWL_DEBUG
2644cf4c5a53SSam Leffler 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2645cf4c5a53SSam Leffler 			mwl_printrxbuf(bf, 0);
2646cf4c5a53SSam Leffler #endif
2647cf4c5a53SSam Leffler 		status = ds->Status;
2648cf4c5a53SSam Leffler 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
26497a79cebfSGleb Smirnoff 			counter_u64_add(ic->ic_ierrors, 1);
2650cf4c5a53SSam Leffler 			sc->sc_stats.mst_rx_crypto++;
2651cf4c5a53SSam Leffler 			/*
2652cf4c5a53SSam Leffler 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2653cf4c5a53SSam Leffler 			 *     for backwards compatibility.
2654cf4c5a53SSam Leffler 			 */
2655cf4c5a53SSam Leffler 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2656cf4c5a53SSam Leffler 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2657cf4c5a53SSam Leffler 				/*
2658cf4c5a53SSam Leffler 				 * MIC error, notify upper layers.
2659cf4c5a53SSam Leffler 				 */
2660cf4c5a53SSam Leffler 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2661cf4c5a53SSam Leffler 				    BUS_DMASYNC_POSTREAD);
2662cf4c5a53SSam Leffler 				mwl_handlemicerror(ic, data);
2663cf4c5a53SSam Leffler 				sc->sc_stats.mst_rx_tkipmic++;
2664cf4c5a53SSam Leffler 			}
2665cf4c5a53SSam Leffler 			/* XXX too painful to tap packets */
2666cf4c5a53SSam Leffler 			goto rx_next;
2667cf4c5a53SSam Leffler 		}
2668cf4c5a53SSam Leffler 		/*
2669cf4c5a53SSam Leffler 		 * Sync the data buffer.
2670cf4c5a53SSam Leffler 		 */
2671cf4c5a53SSam Leffler 		len = le16toh(ds->PktLen);
2672cf4c5a53SSam Leffler 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2673cf4c5a53SSam Leffler 		/*
2674cf4c5a53SSam Leffler 		 * The 802.11 header is provided all or in part at the front;
2675cf4c5a53SSam Leffler 		 * use it to calculate the true size of the header that we'll
2676cf4c5a53SSam Leffler 		 * construct below.  We use this to figure out where to copy
2677cf4c5a53SSam Leffler 		 * payload prior to constructing the header.
2678cf4c5a53SSam Leffler 		 */
2679cf4c5a53SSam Leffler 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2680cf4c5a53SSam Leffler 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2681cf4c5a53SSam Leffler 
2682cf4c5a53SSam Leffler 		/* calculate rssi early so we can re-use for each aggregate */
2683cf4c5a53SSam Leffler 		rssi = cvtrssi(ds->RSSI);
2684cf4c5a53SSam Leffler 
2685cf4c5a53SSam Leffler 		pktlen = hdrlen + (len - off);
2686cf4c5a53SSam Leffler 		/*
2687cf4c5a53SSam Leffler 		 * NB: we know our frame is at least as large as
2688cf4c5a53SSam Leffler 		 * IEEE80211_MIN_LEN because there is a 4-address
2689cf4c5a53SSam Leffler 		 * frame at the front.  Hence there's no need to
2690cf4c5a53SSam Leffler 		 * vet the packet length.  If the frame in fact
2691cf4c5a53SSam Leffler 		 * is too small it should be discarded at the
2692cf4c5a53SSam Leffler 		 * net80211 layer.
2693cf4c5a53SSam Leffler 		 */
2694cf4c5a53SSam Leffler 
2695cf4c5a53SSam Leffler 		/*
2696cf4c5a53SSam Leffler 		 * Attach dma buffer to an mbuf.  We tried
2697cf4c5a53SSam Leffler 		 * doing this based on the packet size (i.e.
2698cf4c5a53SSam Leffler 		 * copying small packets) but it turns out to
2699cf4c5a53SSam Leffler 		 * be a net loss.  The tradeoff might be system
2700cf4c5a53SSam Leffler 		 * dependent (cache architecture is important).
2701cf4c5a53SSam Leffler 		 */
2702c6499eccSGleb Smirnoff 		MGETHDR(m, M_NOWAIT, MT_DATA);
2703cf4c5a53SSam Leffler 		if (m == NULL) {
2704cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_ANY,
2705cf4c5a53SSam Leffler 			    "%s: no rx mbuf\n", __func__);
2706cf4c5a53SSam Leffler 			sc->sc_stats.mst_rx_nombuf++;
2707cf4c5a53SSam Leffler 			goto rx_next;
2708cf4c5a53SSam Leffler 		}
2709cf4c5a53SSam Leffler 		/*
2710cf4c5a53SSam Leffler 		 * Acquire the replacement dma buffer before
2711cf4c5a53SSam Leffler 		 * processing the frame.  If we're out of dma
2712cf4c5a53SSam Leffler 		 * buffers we disable rx interrupts and wait
2713cf4c5a53SSam Leffler 		 * for the free pool to reach mlw_rxdmalow buffers
2714cf4c5a53SSam Leffler 		 * before starting to do work again.  If the firmware
2715cf4c5a53SSam Leffler 		 * runs out of descriptors then it will toss frames
2716cf4c5a53SSam Leffler 		 * which is better than our doing it as that can
2717cf4c5a53SSam Leffler 		 * starve our processing.  It is also important that
2718cf4c5a53SSam Leffler 		 * we always process rx'd frames in case they are
2719cf4c5a53SSam Leffler 		 * A-MPDU as otherwise the host's view of the BA
2720cf4c5a53SSam Leffler 		 * window may get out of sync with the firmware.
2721cf4c5a53SSam Leffler 		 */
2722cf4c5a53SSam Leffler 		newdata = mwl_getrxdma(sc);
2723cf4c5a53SSam Leffler 		if (newdata == NULL) {
2724cf4c5a53SSam Leffler 			/* NB: stat+msg in mwl_getrxdma */
2725cf4c5a53SSam Leffler 			m_free(m);
2726cf4c5a53SSam Leffler 			/* disable RX interrupt and mark state */
2727cf4c5a53SSam Leffler 			mwl_hal_intrset(sc->sc_mh,
2728cf4c5a53SSam Leffler 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2729cf4c5a53SSam Leffler 			sc->sc_rxblocked = 1;
2730cf4c5a53SSam Leffler 			ieee80211_drain(ic);
2731cf4c5a53SSam Leffler 			/* XXX check rxblocked and immediately start again? */
2732cf4c5a53SSam Leffler 			goto rx_stop;
2733cf4c5a53SSam Leffler 		}
2734cf4c5a53SSam Leffler 		bf->bf_data = newdata;
2735cf4c5a53SSam Leffler 		/*
2736cf4c5a53SSam Leffler 		 * Attach the dma buffer to the mbuf;
2737cf4c5a53SSam Leffler 		 * mwl_rxbuf_init will re-setup the rx
2738cf4c5a53SSam Leffler 		 * descriptor using the replacement dma
2739cf4c5a53SSam Leffler 		 * buffer we just installed above.
2740cf4c5a53SSam Leffler 		 */
2741e8fd18f3SGleb Smirnoff 		m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2742e8fd18f3SGleb Smirnoff 		    EXT_NET_DRV);
2743cf4c5a53SSam Leffler 		m->m_data += off - hdrlen;
2744cf4c5a53SSam Leffler 		m->m_pkthdr.len = m->m_len = pktlen;
2745cf4c5a53SSam Leffler 		/* NB: dma buffer assumed read-only */
2746cf4c5a53SSam Leffler 
2747cf4c5a53SSam Leffler 		/*
2748cf4c5a53SSam Leffler 		 * Piece 802.11 header together.
2749cf4c5a53SSam Leffler 		 */
2750cf4c5a53SSam Leffler 		wh = mtod(m, struct ieee80211_qosframe *);
2751cf4c5a53SSam Leffler 		/* NB: don't need to do this sometimes but ... */
2752cf4c5a53SSam Leffler 		/* XXX special case so we can memcpy after m_devget? */
2753cf4c5a53SSam Leffler 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2754f3f08e16SAndriy Voskoboinyk 		if (IEEE80211_QOS_HAS_SEQ(wh))
2755f3f08e16SAndriy Voskoboinyk 			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2756cf4c5a53SSam Leffler 		/*
2757cf4c5a53SSam Leffler 		 * The f/w strips WEP header but doesn't clear
2758cf4c5a53SSam Leffler 		 * the WEP bit; mark the packet with M_WEP so
2759cf4c5a53SSam Leffler 		 * net80211 will treat the data as decrypted.
2760cf4c5a53SSam Leffler 		 * While here also clear the PWR_MGT bit since
2761cf4c5a53SSam Leffler 		 * power save is handled by the firmware and
2762cf4c5a53SSam Leffler 		 * passing this up will potentially cause the
2763cf4c5a53SSam Leffler 		 * upper layer to put a station in power save
2764cf4c5a53SSam Leffler 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2765cf4c5a53SSam Leffler 		 */
27665945b5f5SKevin Lo 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2767cf4c5a53SSam Leffler 			m->m_flags |= M_WEP;
2768cf4c5a53SSam Leffler #ifdef MWL_HOST_PS_SUPPORT
27695945b5f5SKevin Lo 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2770cf4c5a53SSam Leffler #else
27715945b5f5SKevin Lo 		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
27725945b5f5SKevin Lo 		    IEEE80211_FC1_PWR_MGT);
2773cf4c5a53SSam Leffler #endif
2774cf4c5a53SSam Leffler 
2775cf4c5a53SSam Leffler 		if (ieee80211_radiotap_active(ic)) {
2776cf4c5a53SSam Leffler 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2777cf4c5a53SSam Leffler 
2778cf4c5a53SSam Leffler 			tap->wr_flags = 0;
2779cf4c5a53SSam Leffler 			tap->wr_rate = ds->Rate;
2780cf4c5a53SSam Leffler 			tap->wr_antsignal = rssi + nf;
2781cf4c5a53SSam Leffler 			tap->wr_antnoise = nf;
2782cf4c5a53SSam Leffler 		}
2783cf4c5a53SSam Leffler 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2784cf4c5a53SSam Leffler 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2785cf4c5a53SSam Leffler 			    len, ds->Rate, rssi);
2786cf4c5a53SSam Leffler 		}
2787cf4c5a53SSam Leffler 		/* dispatch */
2788cf4c5a53SSam Leffler 		ni = ieee80211_find_rxnode(ic,
2789cf4c5a53SSam Leffler 		    (const struct ieee80211_frame_min *) wh);
2790cf4c5a53SSam Leffler 		if (ni != NULL) {
2791cf4c5a53SSam Leffler 			mn = MWL_NODE(ni);
2792cf4c5a53SSam Leffler #ifdef MWL_ANT_INFO_SUPPORT
2793cf4c5a53SSam Leffler 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2794cf4c5a53SSam Leffler 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2795cf4c5a53SSam Leffler 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2796cf4c5a53SSam Leffler 			mn->mn_ai.rsvd1 = rssi;
2797cf4c5a53SSam Leffler #endif
2798cf4c5a53SSam Leffler 			/* tag AMPDU aggregates for reorder processing */
2799cf4c5a53SSam Leffler 			if (ni->ni_flags & IEEE80211_NODE_HT)
2800cf4c5a53SSam Leffler 				m->m_flags |= M_AMPDU;
2801cf4c5a53SSam Leffler 			(void) ieee80211_input(ni, m, rssi, nf);
2802cf4c5a53SSam Leffler 			ieee80211_free_node(ni);
2803cf4c5a53SSam Leffler 		} else
2804cf4c5a53SSam Leffler 			(void) ieee80211_input_all(ic, m, rssi, nf);
2805cf4c5a53SSam Leffler rx_next:
2806cf4c5a53SSam Leffler 		/* NB: ignore ENOMEM so we process more descriptors */
2807cf4c5a53SSam Leffler 		(void) mwl_rxbuf_init(sc, bf);
2808cf4c5a53SSam Leffler 		bf = STAILQ_NEXT(bf, bf_list);
2809cf4c5a53SSam Leffler 	}
2810cf4c5a53SSam Leffler rx_stop:
2811cf4c5a53SSam Leffler 	sc->sc_rxnext = bf;
2812cf4c5a53SSam Leffler 
28137a79cebfSGleb Smirnoff 	if (mbufq_first(&sc->sc_snd) != NULL) {
2814cf4c5a53SSam Leffler 		/* NB: kick fw; the tx thread may have been preempted */
2815cf4c5a53SSam Leffler 		mwl_hal_txstart(sc->sc_mh, 0);
28167a79cebfSGleb Smirnoff 		mwl_start(sc);
2817cf4c5a53SSam Leffler 	}
2818cf4c5a53SSam Leffler }
2819cf4c5a53SSam Leffler 
2820cf4c5a53SSam Leffler static void
mwl_txq_init(struct mwl_softc * sc,struct mwl_txq * txq,int qnum)2821cf4c5a53SSam Leffler mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2822cf4c5a53SSam Leffler {
2823cf4c5a53SSam Leffler 	struct mwl_txbuf *bf, *bn;
2824cf4c5a53SSam Leffler 	struct mwl_txdesc *ds;
2825cf4c5a53SSam Leffler 
2826cf4c5a53SSam Leffler 	MWL_TXQ_LOCK_INIT(sc, txq);
2827cf4c5a53SSam Leffler 	txq->qnum = qnum;
2828cf4c5a53SSam Leffler 	txq->txpri = 0;	/* XXX */
2829cf4c5a53SSam Leffler #if 0
2830cf4c5a53SSam Leffler 	/* NB: q setup by mwl_txdma_setup XXX */
2831cf4c5a53SSam Leffler 	STAILQ_INIT(&txq->free);
2832cf4c5a53SSam Leffler #endif
2833cf4c5a53SSam Leffler 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2834cf4c5a53SSam Leffler 		bf->bf_txq = txq;
2835cf4c5a53SSam Leffler 
2836cf4c5a53SSam Leffler 		ds = bf->bf_desc;
2837cf4c5a53SSam Leffler 		bn = STAILQ_NEXT(bf, bf_list);
2838cf4c5a53SSam Leffler 		if (bn == NULL)
2839cf4c5a53SSam Leffler 			bn = STAILQ_FIRST(&txq->free);
2840cf4c5a53SSam Leffler 		ds->pPhysNext = htole32(bn->bf_daddr);
2841cf4c5a53SSam Leffler 	}
2842cf4c5a53SSam Leffler 	STAILQ_INIT(&txq->active);
2843cf4c5a53SSam Leffler }
2844cf4c5a53SSam Leffler 
2845cf4c5a53SSam Leffler /*
2846cf4c5a53SSam Leffler  * Setup a hardware data transmit queue for the specified
2847cf4c5a53SSam Leffler  * access control.  We record the mapping from ac's
2848cf4c5a53SSam Leffler  * to h/w queues for use by mwl_tx_start.
2849cf4c5a53SSam Leffler  */
2850cf4c5a53SSam Leffler static int
mwl_tx_setup(struct mwl_softc * sc,int ac,int mvtype)2851cf4c5a53SSam Leffler mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2852cf4c5a53SSam Leffler {
2853cf4c5a53SSam Leffler 	struct mwl_txq *txq;
2854cf4c5a53SSam Leffler 
2855d6166defSAdrian Chadd 	if (ac >= nitems(sc->sc_ac2q)) {
2856cf4c5a53SSam Leffler 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2857d6166defSAdrian Chadd 			ac, nitems(sc->sc_ac2q));
2858cf4c5a53SSam Leffler 		return 0;
2859cf4c5a53SSam Leffler 	}
2860cf4c5a53SSam Leffler 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2861cf4c5a53SSam Leffler 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2862cf4c5a53SSam Leffler 			mvtype, MWL_NUM_TX_QUEUES);
2863cf4c5a53SSam Leffler 		return 0;
2864cf4c5a53SSam Leffler 	}
2865cf4c5a53SSam Leffler 	txq = &sc->sc_txq[mvtype];
2866cf4c5a53SSam Leffler 	mwl_txq_init(sc, txq, mvtype);
2867cf4c5a53SSam Leffler 	sc->sc_ac2q[ac] = txq;
2868cf4c5a53SSam Leffler 	return 1;
2869cf4c5a53SSam Leffler }
2870cf4c5a53SSam Leffler 
2871cf4c5a53SSam Leffler /*
2872cf4c5a53SSam Leffler  * Update WME parameters for a transmit queue.
2873cf4c5a53SSam Leffler  */
2874cf4c5a53SSam Leffler static int
mwl_txq_update(struct mwl_softc * sc,int ac)2875cf4c5a53SSam Leffler mwl_txq_update(struct mwl_softc *sc, int ac)
2876cf4c5a53SSam Leffler {
2877cf4c5a53SSam Leffler #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
28787a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
28799fbe631aSAdrian Chadd 	struct chanAccParams chp;
2880cf4c5a53SSam Leffler 	struct mwl_txq *txq = sc->sc_ac2q[ac];
28819fbe631aSAdrian Chadd 	struct wmeParams *wmep;
2882cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
2883cf4c5a53SSam Leffler 	int aifs, cwmin, cwmax, txoplim;
2884cf4c5a53SSam Leffler 
28859fbe631aSAdrian Chadd 	ieee80211_wme_ic_getparams(ic, &chp);
28869fbe631aSAdrian Chadd 	wmep = &chp.cap_wmeParams[ac];
28879fbe631aSAdrian Chadd 
2888cf4c5a53SSam Leffler 	aifs = wmep->wmep_aifsn;
2889cf4c5a53SSam Leffler 	/* XXX in sta mode need to pass log values for cwmin/max */
2890cf4c5a53SSam Leffler 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2891cf4c5a53SSam Leffler 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2892cf4c5a53SSam Leffler 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2893cf4c5a53SSam Leffler 
2894cf4c5a53SSam Leffler 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2895cf4c5a53SSam Leffler 		device_printf(sc->sc_dev, "unable to update hardware queue "
2896cf4c5a53SSam Leffler 			"parameters for %s traffic!\n",
2897cf4c5a53SSam Leffler 			ieee80211_wme_acnames[ac]);
2898cf4c5a53SSam Leffler 		return 0;
2899cf4c5a53SSam Leffler 	}
2900cf4c5a53SSam Leffler 	return 1;
2901cf4c5a53SSam Leffler #undef MWL_EXPONENT_TO_VALUE
2902cf4c5a53SSam Leffler }
2903cf4c5a53SSam Leffler 
2904cf4c5a53SSam Leffler /*
2905cf4c5a53SSam Leffler  * Callback from the 802.11 layer to update WME parameters.
2906cf4c5a53SSam Leffler  */
2907cf4c5a53SSam Leffler static int
mwl_wme_update(struct ieee80211com * ic)2908cf4c5a53SSam Leffler mwl_wme_update(struct ieee80211com *ic)
2909cf4c5a53SSam Leffler {
29107a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
2911cf4c5a53SSam Leffler 
2912cf4c5a53SSam Leffler 	return !mwl_txq_update(sc, WME_AC_BE) ||
2913cf4c5a53SSam Leffler 	    !mwl_txq_update(sc, WME_AC_BK) ||
2914cf4c5a53SSam Leffler 	    !mwl_txq_update(sc, WME_AC_VI) ||
2915cf4c5a53SSam Leffler 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2916cf4c5a53SSam Leffler }
2917cf4c5a53SSam Leffler 
2918cf4c5a53SSam Leffler /*
2919cf4c5a53SSam Leffler  * Reclaim resources for a setup queue.
2920cf4c5a53SSam Leffler  */
2921cf4c5a53SSam Leffler static void
mwl_tx_cleanupq(struct mwl_softc * sc,struct mwl_txq * txq)2922cf4c5a53SSam Leffler mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2923cf4c5a53SSam Leffler {
2924cf4c5a53SSam Leffler 	/* XXX hal work? */
2925cf4c5a53SSam Leffler 	MWL_TXQ_LOCK_DESTROY(txq);
2926cf4c5a53SSam Leffler }
2927cf4c5a53SSam Leffler 
2928cf4c5a53SSam Leffler /*
2929cf4c5a53SSam Leffler  * Reclaim all tx queue resources.
2930cf4c5a53SSam Leffler  */
2931cf4c5a53SSam Leffler static void
mwl_tx_cleanup(struct mwl_softc * sc)2932cf4c5a53SSam Leffler mwl_tx_cleanup(struct mwl_softc *sc)
2933cf4c5a53SSam Leffler {
2934cf4c5a53SSam Leffler 	int i;
2935cf4c5a53SSam Leffler 
2936cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2937cf4c5a53SSam Leffler 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2938cf4c5a53SSam Leffler }
2939cf4c5a53SSam Leffler 
2940cf4c5a53SSam Leffler static int
mwl_tx_dmasetup(struct mwl_softc * sc,struct mwl_txbuf * bf,struct mbuf * m0)2941cf4c5a53SSam Leffler mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2942cf4c5a53SSam Leffler {
2943cf4c5a53SSam Leffler 	struct mbuf *m;
2944cf4c5a53SSam Leffler 	int error;
2945cf4c5a53SSam Leffler 
2946cf4c5a53SSam Leffler 	/*
2947cf4c5a53SSam Leffler 	 * Load the DMA map so any coalescing is done.  This
2948cf4c5a53SSam Leffler 	 * also calculates the number of descriptors we need.
2949cf4c5a53SSam Leffler 	 */
2950cf4c5a53SSam Leffler 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2951cf4c5a53SSam Leffler 				     bf->bf_segs, &bf->bf_nseg,
2952cf4c5a53SSam Leffler 				     BUS_DMA_NOWAIT);
2953cf4c5a53SSam Leffler 	if (error == EFBIG) {
2954cf4c5a53SSam Leffler 		/* XXX packet requires too many descriptors */
2955cf4c5a53SSam Leffler 		bf->bf_nseg = MWL_TXDESC+1;
2956cf4c5a53SSam Leffler 	} else if (error != 0) {
2957cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_busdma++;
2958cf4c5a53SSam Leffler 		m_freem(m0);
2959cf4c5a53SSam Leffler 		return error;
2960cf4c5a53SSam Leffler 	}
2961cf4c5a53SSam Leffler 	/*
2962cf4c5a53SSam Leffler 	 * Discard null packets and check for packets that
2963cf4c5a53SSam Leffler 	 * require too many TX descriptors.  We try to convert
2964cf4c5a53SSam Leffler 	 * the latter to a cluster.
2965cf4c5a53SSam Leffler 	 */
2966cf4c5a53SSam Leffler 	if (error == EFBIG) {		/* too many desc's, linearize */
2967cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_linear++;
2968cf4c5a53SSam Leffler #if MWL_TXDESC > 1
2969c6499eccSGleb Smirnoff 		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2970cf4c5a53SSam Leffler #else
2971c6499eccSGleb Smirnoff 		m = m_defrag(m0, M_NOWAIT);
2972cf4c5a53SSam Leffler #endif
2973cf4c5a53SSam Leffler 		if (m == NULL) {
2974cf4c5a53SSam Leffler 			m_freem(m0);
2975cf4c5a53SSam Leffler 			sc->sc_stats.mst_tx_nombuf++;
2976cf4c5a53SSam Leffler 			return ENOMEM;
2977cf4c5a53SSam Leffler 		}
2978cf4c5a53SSam Leffler 		m0 = m;
2979cf4c5a53SSam Leffler 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2980cf4c5a53SSam Leffler 					     bf->bf_segs, &bf->bf_nseg,
2981cf4c5a53SSam Leffler 					     BUS_DMA_NOWAIT);
2982cf4c5a53SSam Leffler 		if (error != 0) {
2983cf4c5a53SSam Leffler 			sc->sc_stats.mst_tx_busdma++;
2984cf4c5a53SSam Leffler 			m_freem(m0);
2985cf4c5a53SSam Leffler 			return error;
2986cf4c5a53SSam Leffler 		}
2987cf4c5a53SSam Leffler 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
2988cf4c5a53SSam Leffler 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
2989cf4c5a53SSam Leffler 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
2990cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_nodata++;
2991cf4c5a53SSam Leffler 		m_freem(m0);
2992cf4c5a53SSam Leffler 		return EIO;
2993cf4c5a53SSam Leffler 	}
2994cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
2995cf4c5a53SSam Leffler 		__func__, m0, m0->m_pkthdr.len);
2996cf4c5a53SSam Leffler 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2997cf4c5a53SSam Leffler 	bf->bf_m = m0;
2998cf4c5a53SSam Leffler 
2999cf4c5a53SSam Leffler 	return 0;
3000cf4c5a53SSam Leffler }
3001cf4c5a53SSam Leffler 
3002cf4c5a53SSam Leffler static __inline int
mwl_cvtlegacyrate(int rate)3003cf4c5a53SSam Leffler mwl_cvtlegacyrate(int rate)
3004cf4c5a53SSam Leffler {
3005cf4c5a53SSam Leffler 	switch (rate) {
3006cf4c5a53SSam Leffler 	case 2:	 return 0;
3007cf4c5a53SSam Leffler 	case 4:	 return 1;
3008cf4c5a53SSam Leffler 	case 11: return 2;
3009cf4c5a53SSam Leffler 	case 22: return 3;
3010cf4c5a53SSam Leffler 	case 44: return 4;
3011cf4c5a53SSam Leffler 	case 12: return 5;
3012cf4c5a53SSam Leffler 	case 18: return 6;
3013cf4c5a53SSam Leffler 	case 24: return 7;
3014cf4c5a53SSam Leffler 	case 36: return 8;
3015cf4c5a53SSam Leffler 	case 48: return 9;
3016cf4c5a53SSam Leffler 	case 72: return 10;
3017cf4c5a53SSam Leffler 	case 96: return 11;
3018cf4c5a53SSam Leffler 	case 108:return 12;
3019cf4c5a53SSam Leffler 	}
3020cf4c5a53SSam Leffler 	return 0;
3021cf4c5a53SSam Leffler }
3022cf4c5a53SSam Leffler 
3023cf4c5a53SSam Leffler /*
3024cf4c5a53SSam Leffler  * Calculate fixed tx rate information per client state;
3025cf4c5a53SSam Leffler  * this value is suitable for writing to the Format field
3026cf4c5a53SSam Leffler  * of a tx descriptor.
3027cf4c5a53SSam Leffler  */
3028cf4c5a53SSam Leffler static uint16_t
mwl_calcformat(uint8_t rate,const struct ieee80211_node * ni)3029cf4c5a53SSam Leffler mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3030cf4c5a53SSam Leffler {
3031cf4c5a53SSam Leffler 	uint16_t fmt;
3032cf4c5a53SSam Leffler 
3033fe5ebb23SBjoern A. Zeeb 	fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3034cf4c5a53SSam Leffler 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3035cf4c5a53SSam Leffler 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
30367850fa71SSam Leffler 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3037cf4c5a53SSam Leffler 		fmt |= EAGLE_TXD_FORMAT_HT
3038cf4c5a53SSam Leffler 		    /* NB: 0x80 implicitly stripped from ucastrate */
3039fe5ebb23SBjoern A. Zeeb 		    | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3040cf4c5a53SSam Leffler 		/* XXX short/long GI may be wrong; re-check */
3041cf4c5a53SSam Leffler 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3042cf4c5a53SSam Leffler 			fmt |= EAGLE_TXD_CHW_40
3043cf4c5a53SSam Leffler 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3044cf4c5a53SSam Leffler 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3045cf4c5a53SSam Leffler 		} else {
3046cf4c5a53SSam Leffler 			fmt |= EAGLE_TXD_CHW_20
3047cf4c5a53SSam Leffler 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3048cf4c5a53SSam Leffler 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3049cf4c5a53SSam Leffler 		}
3050cf4c5a53SSam Leffler 	} else {			/* legacy rate */
3051cf4c5a53SSam Leffler 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3052fe5ebb23SBjoern A. Zeeb 		    | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3053fe5ebb23SBjoern A. Zeeb 			EAGLE_TXD_RATE)
3054cf4c5a53SSam Leffler 		    | EAGLE_TXD_CHW_20
3055cf4c5a53SSam Leffler 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3056cf4c5a53SSam Leffler 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3057cf4c5a53SSam Leffler 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3058cf4c5a53SSam Leffler 	}
3059cf4c5a53SSam Leffler 	return fmt;
3060cf4c5a53SSam Leffler }
3061cf4c5a53SSam Leffler 
3062cf4c5a53SSam Leffler static int
mwl_tx_start(struct mwl_softc * sc,struct ieee80211_node * ni,struct mwl_txbuf * bf,struct mbuf * m0)3063cf4c5a53SSam Leffler mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3064cf4c5a53SSam Leffler     struct mbuf *m0)
3065cf4c5a53SSam Leffler {
30667a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
3067cf4c5a53SSam Leffler 	struct ieee80211vap *vap = ni->ni_vap;
3068cf4c5a53SSam Leffler 	int error, iswep, ismcast;
3069cec04a5dSMateusz Guzik 	int hdrlen, pktlen;
3070cf4c5a53SSam Leffler 	struct mwl_txdesc *ds;
3071cf4c5a53SSam Leffler 	struct mwl_txq *txq;
3072cf4c5a53SSam Leffler 	struct ieee80211_frame *wh;
3073cf4c5a53SSam Leffler 	struct mwltxrec *tr;
3074cf4c5a53SSam Leffler 	struct mwl_node *mn;
3075cf4c5a53SSam Leffler 	uint16_t qos;
3076cf4c5a53SSam Leffler #if MWL_TXDESC > 1
3077cf4c5a53SSam Leffler 	int i;
3078cf4c5a53SSam Leffler #endif
3079cf4c5a53SSam Leffler 
3080cf4c5a53SSam Leffler 	wh = mtod(m0, struct ieee80211_frame *);
30815945b5f5SKevin Lo 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3082cf4c5a53SSam Leffler 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3083cf4c5a53SSam Leffler 	hdrlen = ieee80211_anyhdrsize(wh);
3084cf4c5a53SSam Leffler 	pktlen = m0->m_pkthdr.len;
3085cf4c5a53SSam Leffler 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3086f3f08e16SAndriy Voskoboinyk 		qos = *(uint16_t *)ieee80211_getqos(wh);
3087cf4c5a53SSam Leffler 	} else
3088cf4c5a53SSam Leffler 		qos = 0;
3089cf4c5a53SSam Leffler 
3090cf4c5a53SSam Leffler 	if (iswep) {
3091cf4c5a53SSam Leffler 		const struct ieee80211_cipher *cip;
3092cf4c5a53SSam Leffler 		struct ieee80211_key *k;
3093cf4c5a53SSam Leffler 
3094cf4c5a53SSam Leffler 		/*
3095cf4c5a53SSam Leffler 		 * Construct the 802.11 header+trailer for an encrypted
3096cf4c5a53SSam Leffler 		 * frame. The only reason this can fail is because of an
3097cf4c5a53SSam Leffler 		 * unknown or unsupported cipher/key type.
3098cf4c5a53SSam Leffler 		 *
3099cf4c5a53SSam Leffler 		 * NB: we do this even though the firmware will ignore
3100cf4c5a53SSam Leffler 		 *     what we've done for WEP and TKIP as we need the
3101cf4c5a53SSam Leffler 		 *     ExtIV filled in for CCMP and this also adjusts
3102cf4c5a53SSam Leffler 		 *     the headers which simplifies our work below.
3103cf4c5a53SSam Leffler 		 */
3104cf4c5a53SSam Leffler 		k = ieee80211_crypto_encap(ni, m0);
3105cf4c5a53SSam Leffler 		if (k == NULL) {
3106cf4c5a53SSam Leffler 			/*
3107cf4c5a53SSam Leffler 			 * This can happen when the key is yanked after the
3108cf4c5a53SSam Leffler 			 * frame was queued.  Just discard the frame; the
3109cf4c5a53SSam Leffler 			 * 802.11 layer counts failures and provides
3110cf4c5a53SSam Leffler 			 * debugging/diagnostics.
3111cf4c5a53SSam Leffler 			 */
3112cf4c5a53SSam Leffler 			m_freem(m0);
3113cf4c5a53SSam Leffler 			return EIO;
3114cf4c5a53SSam Leffler 		}
3115cf4c5a53SSam Leffler 		/*
3116cf4c5a53SSam Leffler 		 * Adjust the packet length for the crypto additions
3117cf4c5a53SSam Leffler 		 * done during encap and any other bits that the f/w
3118cf4c5a53SSam Leffler 		 * will add later on.
3119cf4c5a53SSam Leffler 		 */
3120cf4c5a53SSam Leffler 		cip = k->wk_cipher;
3121cf4c5a53SSam Leffler 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3122cf4c5a53SSam Leffler 
3123cf4c5a53SSam Leffler 		/* packet header may have moved, reset our local pointer */
3124cf4c5a53SSam Leffler 		wh = mtod(m0, struct ieee80211_frame *);
3125cf4c5a53SSam Leffler 	}
3126cf4c5a53SSam Leffler 
3127cf4c5a53SSam Leffler 	if (ieee80211_radiotap_active_vap(vap)) {
3128cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3129cf4c5a53SSam Leffler 		if (iswep)
3130cf4c5a53SSam Leffler 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3131cf4c5a53SSam Leffler #if 0
3132cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_rate = ds->DataRate;
3133cf4c5a53SSam Leffler #endif
3134cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3135cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3136cf4c5a53SSam Leffler 
3137cf4c5a53SSam Leffler 		ieee80211_radiotap_tx(vap, m0);
3138cf4c5a53SSam Leffler 	}
3139cf4c5a53SSam Leffler 	/*
3140cf4c5a53SSam Leffler 	 * Copy up/down the 802.11 header; the firmware requires
3141cf4c5a53SSam Leffler 	 * we present a 2-byte payload length followed by a
3142cf4c5a53SSam Leffler 	 * 4-address header (w/o QoS), followed (optionally) by
3143cf4c5a53SSam Leffler 	 * any WEP/ExtIV header (but only filled in for CCMP).
3144cf4c5a53SSam Leffler 	 * We are assured the mbuf has sufficient headroom to
3145cf4c5a53SSam Leffler 	 * prepend in-place by the setup of ic_headroom in
3146cf4c5a53SSam Leffler 	 * mwl_attach.
3147cf4c5a53SSam Leffler 	 */
3148cf4c5a53SSam Leffler 	if (hdrlen < sizeof(struct mwltxrec)) {
3149cf4c5a53SSam Leffler 		const int space = sizeof(struct mwltxrec) - hdrlen;
3150cf4c5a53SSam Leffler 		if (M_LEADINGSPACE(m0) < space) {
3151cf4c5a53SSam Leffler 			/* NB: should never happen */
3152cf4c5a53SSam Leffler 			device_printf(sc->sc_dev,
3153cf4c5a53SSam Leffler 			    "not enough headroom, need %d found %zd, "
3154cf4c5a53SSam Leffler 			    "m_flags 0x%x m_len %d\n",
3155cf4c5a53SSam Leffler 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3156cf4c5a53SSam Leffler 			ieee80211_dump_pkt(ic,
3157cf4c5a53SSam Leffler 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3158cf4c5a53SSam Leffler 			m_freem(m0);
3159cf4c5a53SSam Leffler 			sc->sc_stats.mst_tx_noheadroom++;
3160cf4c5a53SSam Leffler 			return EIO;
3161cf4c5a53SSam Leffler 		}
3162cf4c5a53SSam Leffler 		M_PREPEND(m0, space, M_NOWAIT);
3163cf4c5a53SSam Leffler 	}
3164cf4c5a53SSam Leffler 	tr = mtod(m0, struct mwltxrec *);
3165cf4c5a53SSam Leffler 	if (wh != (struct ieee80211_frame *) &tr->wh)
3166cf4c5a53SSam Leffler 		ovbcopy(wh, &tr->wh, hdrlen);
3167cf4c5a53SSam Leffler 	/*
3168cf4c5a53SSam Leffler 	 * Note: the "firmware length" is actually the length
3169cf4c5a53SSam Leffler 	 * of the fully formed "802.11 payload".  That is, it's
3170cf4c5a53SSam Leffler 	 * everything except for the 802.11 header.  In particular
3171cf4c5a53SSam Leffler 	 * this includes all crypto material including the MIC!
3172cf4c5a53SSam Leffler 	 */
3173cf4c5a53SSam Leffler 	tr->fwlen = htole16(pktlen - hdrlen);
3174cf4c5a53SSam Leffler 
3175cf4c5a53SSam Leffler 	/*
3176cf4c5a53SSam Leffler 	 * Load the DMA map so any coalescing is done.  This
3177cf4c5a53SSam Leffler 	 * also calculates the number of descriptors we need.
3178cf4c5a53SSam Leffler 	 */
3179cf4c5a53SSam Leffler 	error = mwl_tx_dmasetup(sc, bf, m0);
3180cf4c5a53SSam Leffler 	if (error != 0) {
3181cf4c5a53SSam Leffler 		/* NB: stat collected in mwl_tx_dmasetup */
3182cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_XMIT,
3183cf4c5a53SSam Leffler 		    "%s: unable to setup dma\n", __func__);
3184cf4c5a53SSam Leffler 		return error;
3185cf4c5a53SSam Leffler 	}
3186cf4c5a53SSam Leffler 	bf->bf_node = ni;			/* NB: held reference */
3187cf4c5a53SSam Leffler 	m0 = bf->bf_m;				/* NB: may have changed */
3188cf4c5a53SSam Leffler 	tr = mtod(m0, struct mwltxrec *);
3189cf4c5a53SSam Leffler 	wh = (struct ieee80211_frame *)&tr->wh;
3190cf4c5a53SSam Leffler 
3191cf4c5a53SSam Leffler 	/*
3192cf4c5a53SSam Leffler 	 * Formulate tx descriptor.
3193cf4c5a53SSam Leffler 	 */
3194cf4c5a53SSam Leffler 	ds = bf->bf_desc;
3195cf4c5a53SSam Leffler 	txq = bf->bf_txq;
3196cf4c5a53SSam Leffler 
3197cf4c5a53SSam Leffler 	ds->QosCtrl = qos;			/* NB: already little-endian */
3198cf4c5a53SSam Leffler #if MWL_TXDESC == 1
3199cf4c5a53SSam Leffler 	/*
3200cf4c5a53SSam Leffler 	 * NB: multiframes should be zero because the descriptors
3201cf4c5a53SSam Leffler 	 *     are initialized to zero.  This should handle the case
3202cf4c5a53SSam Leffler 	 *     where the driver is built with MWL_TXDESC=1 but we are
3203cf4c5a53SSam Leffler 	 *     using firmware with multi-segment support.
3204cf4c5a53SSam Leffler 	 */
3205cf4c5a53SSam Leffler 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3206cf4c5a53SSam Leffler 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3207cf4c5a53SSam Leffler #else
3208cf4c5a53SSam Leffler 	ds->multiframes = htole32(bf->bf_nseg);
3209cf4c5a53SSam Leffler 	ds->PktLen = htole16(m0->m_pkthdr.len);
3210cf4c5a53SSam Leffler 	for (i = 0; i < bf->bf_nseg; i++) {
3211cf4c5a53SSam Leffler 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3212cf4c5a53SSam Leffler 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3213cf4c5a53SSam Leffler 	}
3214cf4c5a53SSam Leffler #endif
3215cf4c5a53SSam Leffler 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3216cf4c5a53SSam Leffler 	ds->Format = 0;
3217cf4c5a53SSam Leffler 	ds->pad = 0;
32187850fa71SSam Leffler 	ds->ack_wcb_addr = 0;
3219cf4c5a53SSam Leffler 
3220cf4c5a53SSam Leffler 	mn = MWL_NODE(ni);
3221cf4c5a53SSam Leffler 	/*
3222cf4c5a53SSam Leffler 	 * Select transmit rate.
3223cf4c5a53SSam Leffler 	 */
3224cf4c5a53SSam Leffler 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3225cf4c5a53SSam Leffler 	case IEEE80211_FC0_TYPE_MGT:
3226cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_mgmt++;
3227cf4c5a53SSam Leffler 		/* fall thru... */
3228cf4c5a53SSam Leffler 	case IEEE80211_FC0_TYPE_CTL:
3229cf4c5a53SSam Leffler 		/* NB: assign to BE q to avoid bursting */
3230cf4c5a53SSam Leffler 		ds->TxPriority = MWL_WME_AC_BE;
3231cf4c5a53SSam Leffler 		break;
3232cf4c5a53SSam Leffler 	case IEEE80211_FC0_TYPE_DATA:
3233cf4c5a53SSam Leffler 		if (!ismcast) {
3234cf4c5a53SSam Leffler 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3235cf4c5a53SSam Leffler 			/*
3236cf4c5a53SSam Leffler 			 * EAPOL frames get forced to a fixed rate and w/o
3237cf4c5a53SSam Leffler 			 * aggregation; otherwise check for any fixed rate
3238cf4c5a53SSam Leffler 			 * for the client (may depend on association state).
3239cf4c5a53SSam Leffler 			 */
3240cf4c5a53SSam Leffler 			if (m0->m_flags & M_EAPOL) {
3241cf4c5a53SSam Leffler 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3242cf4c5a53SSam Leffler 				ds->Format = mvp->mv_eapolformat;
3243cf4c5a53SSam Leffler 				ds->pad = htole16(
3244cf4c5a53SSam Leffler 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
32457850fa71SSam Leffler 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3246cf4c5a53SSam Leffler 				/* XXX pre-calculate per node */
3247cf4c5a53SSam Leffler 				ds->Format = htole16(
3248cf4c5a53SSam Leffler 				    mwl_calcformat(tp->ucastrate, ni));
3249cf4c5a53SSam Leffler 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3250cf4c5a53SSam Leffler 			}
3251cf4c5a53SSam Leffler 			/* NB: EAPOL frames will never have qos set */
3252cf4c5a53SSam Leffler 			if (qos == 0)
3253cf4c5a53SSam Leffler 				ds->TxPriority = txq->qnum;
3254cf4c5a53SSam Leffler #if MWL_MAXBA > 3
3255cf4c5a53SSam Leffler 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3256cf4c5a53SSam Leffler 				ds->TxPriority = mn->mn_ba[3].txq;
3257cf4c5a53SSam Leffler #endif
3258cf4c5a53SSam Leffler #if MWL_MAXBA > 2
3259cf4c5a53SSam Leffler 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3260cf4c5a53SSam Leffler 				ds->TxPriority = mn->mn_ba[2].txq;
3261cf4c5a53SSam Leffler #endif
3262cf4c5a53SSam Leffler #if MWL_MAXBA > 1
3263cf4c5a53SSam Leffler 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3264cf4c5a53SSam Leffler 				ds->TxPriority = mn->mn_ba[1].txq;
3265cf4c5a53SSam Leffler #endif
3266cf4c5a53SSam Leffler #if MWL_MAXBA > 0
3267cf4c5a53SSam Leffler 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3268cf4c5a53SSam Leffler 				ds->TxPriority = mn->mn_ba[0].txq;
3269cf4c5a53SSam Leffler #endif
3270cf4c5a53SSam Leffler 			else
3271cf4c5a53SSam Leffler 				ds->TxPriority = txq->qnum;
3272cf4c5a53SSam Leffler 		} else
3273cf4c5a53SSam Leffler 			ds->TxPriority = txq->qnum;
3274cf4c5a53SSam Leffler 		break;
3275cf4c5a53SSam Leffler 	default:
32767a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3277cf4c5a53SSam Leffler 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3278cf4c5a53SSam Leffler 		sc->sc_stats.mst_tx_badframetype++;
3279cf4c5a53SSam Leffler 		m_freem(m0);
3280cf4c5a53SSam Leffler 		return EIO;
3281cf4c5a53SSam Leffler 	}
3282cf4c5a53SSam Leffler 
3283cf4c5a53SSam Leffler 	if (IFF_DUMPPKTS_XMIT(sc))
3284cf4c5a53SSam Leffler 		ieee80211_dump_pkt(ic,
3285cf4c5a53SSam Leffler 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3286cf4c5a53SSam Leffler 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3287cf4c5a53SSam Leffler 
3288cf4c5a53SSam Leffler 	MWL_TXQ_LOCK(txq);
3289cf4c5a53SSam Leffler 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3290cf4c5a53SSam Leffler 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3291cf4c5a53SSam Leffler 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3292cf4c5a53SSam Leffler 
32937cf545d0SJohn Baldwin 	sc->sc_tx_timer = 5;
3294cf4c5a53SSam Leffler 	MWL_TXQ_UNLOCK(txq);
3295cf4c5a53SSam Leffler 
3296cf4c5a53SSam Leffler 	return 0;
3297cf4c5a53SSam Leffler }
3298cf4c5a53SSam Leffler 
3299cf4c5a53SSam Leffler static __inline int
mwl_cvtlegacyrix(int rix)3300cf4c5a53SSam Leffler mwl_cvtlegacyrix(int rix)
3301cf4c5a53SSam Leffler {
3302cf4c5a53SSam Leffler 	static const int ieeerates[] =
3303cf4c5a53SSam Leffler 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3304d6166defSAdrian Chadd 	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3305cf4c5a53SSam Leffler }
3306cf4c5a53SSam Leffler 
3307cf4c5a53SSam Leffler /*
3308cf4c5a53SSam Leffler  * Process completed xmit descriptors from the specified queue.
3309cf4c5a53SSam Leffler  */
3310cf4c5a53SSam Leffler static int
mwl_tx_processq(struct mwl_softc * sc,struct mwl_txq * txq)3311cf4c5a53SSam Leffler mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3312cf4c5a53SSam Leffler {
3313cf4c5a53SSam Leffler #define	EAGLE_TXD_STATUS_MCAST \
3314cf4c5a53SSam Leffler 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
33157a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
3316cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
3317cf4c5a53SSam Leffler 	struct mwl_txdesc *ds;
3318cf4c5a53SSam Leffler 	struct ieee80211_node *ni;
3319cf4c5a53SSam Leffler 	int nreaped;
3320cf4c5a53SSam Leffler 	uint32_t status;
3321cf4c5a53SSam Leffler 
3322cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3323cf4c5a53SSam Leffler 	for (nreaped = 0;; nreaped++) {
3324cf4c5a53SSam Leffler 		MWL_TXQ_LOCK(txq);
3325cf4c5a53SSam Leffler 		bf = STAILQ_FIRST(&txq->active);
3326cf4c5a53SSam Leffler 		if (bf == NULL) {
3327cf4c5a53SSam Leffler 			MWL_TXQ_UNLOCK(txq);
3328cf4c5a53SSam Leffler 			break;
3329cf4c5a53SSam Leffler 		}
3330cf4c5a53SSam Leffler 		ds = bf->bf_desc;
3331cf4c5a53SSam Leffler 		MWL_TXDESC_SYNC(txq, ds,
3332cf4c5a53SSam Leffler 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3333cf4c5a53SSam Leffler 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3334cf4c5a53SSam Leffler 			MWL_TXQ_UNLOCK(txq);
3335cf4c5a53SSam Leffler 			break;
3336cf4c5a53SSam Leffler 		}
3337cf4c5a53SSam Leffler 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3338cf4c5a53SSam Leffler 		MWL_TXQ_UNLOCK(txq);
3339cf4c5a53SSam Leffler 
3340cf4c5a53SSam Leffler #ifdef MWL_DEBUG
3341cf4c5a53SSam Leffler 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3342cf4c5a53SSam Leffler 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3343cf4c5a53SSam Leffler #endif
3344cf4c5a53SSam Leffler 		ni = bf->bf_node;
3345cf4c5a53SSam Leffler 		if (ni != NULL) {
3346cf4c5a53SSam Leffler 			status = le32toh(ds->Status);
3347cf4c5a53SSam Leffler 			if (status & EAGLE_TXD_STATUS_OK) {
3348cf4c5a53SSam Leffler 				uint16_t Format = le16toh(ds->Format);
3349fe5ebb23SBjoern A. Zeeb 				uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3350fe5ebb23SBjoern A. Zeeb 				    EAGLE_TXD_ANTENNA);
3351cf4c5a53SSam Leffler 
3352cf4c5a53SSam Leffler 				sc->sc_stats.mst_ant_tx[txant]++;
3353cf4c5a53SSam Leffler 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3354cf4c5a53SSam Leffler 					sc->sc_stats.mst_tx_retries++;
3355cf4c5a53SSam Leffler 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3356cf4c5a53SSam Leffler 					sc->sc_stats.mst_tx_mretries++;
3357cf4c5a53SSam Leffler 				if (txq->qnum >= MWL_WME_AC_VO)
3358cf4c5a53SSam Leffler 					ic->ic_wme.wme_hipri_traffic++;
3359fe5ebb23SBjoern A. Zeeb 				ni->ni_txrate = _IEEE80211_MASKSHIFT(Format,
3360fe5ebb23SBjoern A. Zeeb 				    EAGLE_TXD_RATE);
3361cf4c5a53SSam Leffler 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3362cf4c5a53SSam Leffler 					ni->ni_txrate = mwl_cvtlegacyrix(
3363cf4c5a53SSam Leffler 					    ni->ni_txrate);
3364cf4c5a53SSam Leffler 				} else
3365cf4c5a53SSam Leffler 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3366cf4c5a53SSam Leffler 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3367cf4c5a53SSam Leffler 			} else {
3368cf4c5a53SSam Leffler 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3369cf4c5a53SSam Leffler 					sc->sc_stats.mst_tx_linkerror++;
3370cf4c5a53SSam Leffler 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3371cf4c5a53SSam Leffler 					sc->sc_stats.mst_tx_xretries++;
3372cf4c5a53SSam Leffler 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3373cf4c5a53SSam Leffler 					sc->sc_stats.mst_tx_aging++;
3374cf4c5a53SSam Leffler 				if (bf->bf_m->m_flags & M_FF)
3375cf4c5a53SSam Leffler 					sc->sc_stats.mst_ff_txerr++;
3376cf4c5a53SSam Leffler 			}
33777a79cebfSGleb Smirnoff 			if (bf->bf_m->m_flags & M_TXCB)
3378cf4c5a53SSam Leffler 				/* XXX strip fw len in case header inspected */
3379cf4c5a53SSam Leffler 				m_adj(bf->bf_m, sizeof(uint16_t));
33807a79cebfSGleb Smirnoff 			ieee80211_tx_complete(ni, bf->bf_m,
3381cf4c5a53SSam Leffler 			    (status & EAGLE_TXD_STATUS_OK) == 0);
33827a79cebfSGleb Smirnoff 		} else
33837a79cebfSGleb Smirnoff 			m_freem(bf->bf_m);
3384cf4c5a53SSam Leffler 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3385cf4c5a53SSam Leffler 
3386cf4c5a53SSam Leffler 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3387cf4c5a53SSam Leffler 		    BUS_DMASYNC_POSTWRITE);
3388cf4c5a53SSam Leffler 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3389cf4c5a53SSam Leffler 
3390cf4c5a53SSam Leffler 		mwl_puttxbuf_tail(txq, bf);
3391cf4c5a53SSam Leffler 	}
3392cf4c5a53SSam Leffler 	return nreaped;
3393cf4c5a53SSam Leffler #undef EAGLE_TXD_STATUS_MCAST
3394cf4c5a53SSam Leffler }
3395cf4c5a53SSam Leffler 
3396cf4c5a53SSam Leffler /*
3397cf4c5a53SSam Leffler  * Deferred processing of transmit interrupt; special-cased
3398cf4c5a53SSam Leffler  * for four hardware queues, 0-3.
3399cf4c5a53SSam Leffler  */
3400cf4c5a53SSam Leffler static void
mwl_tx_proc(void * arg,int npending)3401cf4c5a53SSam Leffler mwl_tx_proc(void *arg, int npending)
3402cf4c5a53SSam Leffler {
3403cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
3404cf4c5a53SSam Leffler 	int nreaped;
3405cf4c5a53SSam Leffler 
3406cf4c5a53SSam Leffler 	/*
3407cf4c5a53SSam Leffler 	 * Process each active queue.
3408cf4c5a53SSam Leffler 	 */
3409cf4c5a53SSam Leffler 	nreaped = 0;
3410cf4c5a53SSam Leffler 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3411cf4c5a53SSam Leffler 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3412cf4c5a53SSam Leffler 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3413cf4c5a53SSam Leffler 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3414cf4c5a53SSam Leffler 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3415cf4c5a53SSam Leffler 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3416cf4c5a53SSam Leffler 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3417cf4c5a53SSam Leffler 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3418cf4c5a53SSam Leffler 
3419cf4c5a53SSam Leffler 	if (nreaped != 0) {
34207cf545d0SJohn Baldwin 		sc->sc_tx_timer = 0;
34217a79cebfSGleb Smirnoff 		if (mbufq_first(&sc->sc_snd) != NULL) {
3422cf4c5a53SSam Leffler 			/* NB: kick fw; the tx thread may have been preempted */
3423cf4c5a53SSam Leffler 			mwl_hal_txstart(sc->sc_mh, 0);
34247a79cebfSGleb Smirnoff 			mwl_start(sc);
3425cf4c5a53SSam Leffler 		}
3426cf4c5a53SSam Leffler 	}
3427cf4c5a53SSam Leffler }
3428cf4c5a53SSam Leffler 
3429cf4c5a53SSam Leffler static void
mwl_tx_draintxq(struct mwl_softc * sc,struct mwl_txq * txq)3430cf4c5a53SSam Leffler mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3431cf4c5a53SSam Leffler {
3432cf4c5a53SSam Leffler 	struct ieee80211_node *ni;
3433cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
343452c80d49SDimitry Andric 	u_int ix __unused;
3435cf4c5a53SSam Leffler 
3436cf4c5a53SSam Leffler 	/*
3437cf4c5a53SSam Leffler 	 * NB: this assumes output has been stopped and
3438cf4c5a53SSam Leffler 	 *     we do not need to block mwl_tx_tasklet
3439cf4c5a53SSam Leffler 	 */
3440cf4c5a53SSam Leffler 	for (ix = 0;; ix++) {
3441cf4c5a53SSam Leffler 		MWL_TXQ_LOCK(txq);
3442cf4c5a53SSam Leffler 		bf = STAILQ_FIRST(&txq->active);
3443cf4c5a53SSam Leffler 		if (bf == NULL) {
3444cf4c5a53SSam Leffler 			MWL_TXQ_UNLOCK(txq);
3445cf4c5a53SSam Leffler 			break;
3446cf4c5a53SSam Leffler 		}
3447cf4c5a53SSam Leffler 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3448cf4c5a53SSam Leffler 		MWL_TXQ_UNLOCK(txq);
3449cf4c5a53SSam Leffler #ifdef MWL_DEBUG
3450cf4c5a53SSam Leffler 		if (sc->sc_debug & MWL_DEBUG_RESET) {
34517a79cebfSGleb Smirnoff 			struct ieee80211com *ic = &sc->sc_ic;
3452cf4c5a53SSam Leffler 			const struct mwltxrec *tr =
3453cf4c5a53SSam Leffler 			    mtod(bf->bf_m, const struct mwltxrec *);
3454cf4c5a53SSam Leffler 			mwl_printtxbuf(bf, txq->qnum, ix);
3455cf4c5a53SSam Leffler 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3456cf4c5a53SSam Leffler 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3457cf4c5a53SSam Leffler 		}
3458cf4c5a53SSam Leffler #endif /* MWL_DEBUG */
3459cf4c5a53SSam Leffler 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3460cf4c5a53SSam Leffler 		ni = bf->bf_node;
3461cf4c5a53SSam Leffler 		if (ni != NULL) {
3462cf4c5a53SSam Leffler 			/*
3463cf4c5a53SSam Leffler 			 * Reclaim node reference.
3464cf4c5a53SSam Leffler 			 */
3465cf4c5a53SSam Leffler 			ieee80211_free_node(ni);
3466cf4c5a53SSam Leffler 		}
3467cf4c5a53SSam Leffler 		m_freem(bf->bf_m);
3468cf4c5a53SSam Leffler 
3469cf4c5a53SSam Leffler 		mwl_puttxbuf_tail(txq, bf);
3470cf4c5a53SSam Leffler 	}
3471cf4c5a53SSam Leffler }
3472cf4c5a53SSam Leffler 
3473cf4c5a53SSam Leffler /*
3474cf4c5a53SSam Leffler  * Drain the transmit queues and reclaim resources.
3475cf4c5a53SSam Leffler  */
3476cf4c5a53SSam Leffler static void
mwl_draintxq(struct mwl_softc * sc)3477cf4c5a53SSam Leffler mwl_draintxq(struct mwl_softc *sc)
3478cf4c5a53SSam Leffler {
3479cf4c5a53SSam Leffler 	int i;
3480cf4c5a53SSam Leffler 
3481cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3482cf4c5a53SSam Leffler 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
34837cf545d0SJohn Baldwin 	sc->sc_tx_timer = 0;
3484cf4c5a53SSam Leffler }
3485cf4c5a53SSam Leffler 
3486cf4c5a53SSam Leffler #ifdef MWL_DIAGAPI
3487cf4c5a53SSam Leffler /*
3488cf4c5a53SSam Leffler  * Reset the transmit queues to a pristine state after a fw download.
3489cf4c5a53SSam Leffler  */
3490cf4c5a53SSam Leffler static void
mwl_resettxq(struct mwl_softc * sc)3491cf4c5a53SSam Leffler mwl_resettxq(struct mwl_softc *sc)
3492cf4c5a53SSam Leffler {
3493cf4c5a53SSam Leffler 	int i;
3494cf4c5a53SSam Leffler 
3495cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3496cf4c5a53SSam Leffler 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3497cf4c5a53SSam Leffler }
3498cf4c5a53SSam Leffler #endif /* MWL_DIAGAPI */
3499cf4c5a53SSam Leffler 
3500cf4c5a53SSam Leffler /*
3501cf4c5a53SSam Leffler  * Clear the transmit queues of any frames submitted for the
3502cf4c5a53SSam Leffler  * specified vap.  This is done when the vap is deleted so we
3503cf4c5a53SSam Leffler  * don't potentially reference the vap after it is gone.
3504cf4c5a53SSam Leffler  * Note we cannot remove the frames; we only reclaim the node
3505cf4c5a53SSam Leffler  * reference.
3506cf4c5a53SSam Leffler  */
3507cf4c5a53SSam Leffler static void
mwl_cleartxq(struct mwl_softc * sc,struct ieee80211vap * vap)3508cf4c5a53SSam Leffler mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3509cf4c5a53SSam Leffler {
3510cf4c5a53SSam Leffler 	struct mwl_txq *txq;
3511cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
3512cf4c5a53SSam Leffler 	int i;
3513cf4c5a53SSam Leffler 
3514cf4c5a53SSam Leffler 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3515cf4c5a53SSam Leffler 		txq = &sc->sc_txq[i];
3516cf4c5a53SSam Leffler 		MWL_TXQ_LOCK(txq);
3517cf4c5a53SSam Leffler 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3518cf4c5a53SSam Leffler 			struct ieee80211_node *ni = bf->bf_node;
3519cf4c5a53SSam Leffler 			if (ni != NULL && ni->ni_vap == vap) {
3520cf4c5a53SSam Leffler 				bf->bf_node = NULL;
3521cf4c5a53SSam Leffler 				ieee80211_free_node(ni);
3522cf4c5a53SSam Leffler 			}
3523cf4c5a53SSam Leffler 		}
3524cf4c5a53SSam Leffler 		MWL_TXQ_UNLOCK(txq);
3525cf4c5a53SSam Leffler 	}
3526cf4c5a53SSam Leffler }
3527cf4c5a53SSam Leffler 
352876340123SSam Leffler static int
mwl_recv_action(struct ieee80211_node * ni,const struct ieee80211_frame * wh,const uint8_t * frm,const uint8_t * efrm)352976340123SSam Leffler mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
353076340123SSam Leffler 	const uint8_t *frm, const uint8_t *efrm)
3531cf4c5a53SSam Leffler {
35327a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3533cf4c5a53SSam Leffler 	const struct ieee80211_action *ia;
3534cf4c5a53SSam Leffler 
3535cf4c5a53SSam Leffler 	ia = (const struct ieee80211_action *) frm;
3536cf4c5a53SSam Leffler 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3537cf4c5a53SSam Leffler 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3538cf4c5a53SSam Leffler 		const struct ieee80211_action_ht_mimopowersave *mps =
3539cf4c5a53SSam Leffler 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3540cf4c5a53SSam Leffler 
3541cf4c5a53SSam Leffler 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3542cf4c5a53SSam Leffler 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3543fe5ebb23SBjoern A. Zeeb 		    _IEEE80211_MASKSHIFT(mps->am_control,
3544fe5ebb23SBjoern A. Zeeb 			IEEE80211_A_HT_MIMOPWRSAVE_MODE));
354576340123SSam Leffler 		return 0;
3546cf4c5a53SSam Leffler 	} else
354776340123SSam Leffler 		return sc->sc_recv_action(ni, wh, frm, efrm);
3548cf4c5a53SSam Leffler }
3549cf4c5a53SSam Leffler 
3550cf4c5a53SSam Leffler static int
mwl_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)3551cf4c5a53SSam Leffler mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3552cf4c5a53SSam Leffler 	int dialogtoken, int baparamset, int batimeout)
3553cf4c5a53SSam Leffler {
35547a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
35557850fa71SSam Leffler 	struct ieee80211vap *vap = ni->ni_vap;
3556cf4c5a53SSam Leffler 	struct mwl_node *mn = MWL_NODE(ni);
3557cf4c5a53SSam Leffler 	struct mwl_bastate *bas;
3558cf4c5a53SSam Leffler 
3559cf4c5a53SSam Leffler 	bas = tap->txa_private;
3560cf4c5a53SSam Leffler 	if (bas == NULL) {
3561cf4c5a53SSam Leffler 		const MWL_HAL_BASTREAM *sp;
3562cf4c5a53SSam Leffler 		/*
3563cf4c5a53SSam Leffler 		 * Check for a free BA stream slot.
3564cf4c5a53SSam Leffler 		 */
3565cf4c5a53SSam Leffler #if MWL_MAXBA > 3
3566cf4c5a53SSam Leffler 		if (mn->mn_ba[3].bastream == NULL)
3567cf4c5a53SSam Leffler 			bas = &mn->mn_ba[3];
3568cf4c5a53SSam Leffler 		else
3569cf4c5a53SSam Leffler #endif
3570cf4c5a53SSam Leffler #if MWL_MAXBA > 2
3571cf4c5a53SSam Leffler 		if (mn->mn_ba[2].bastream == NULL)
3572cf4c5a53SSam Leffler 			bas = &mn->mn_ba[2];
3573cf4c5a53SSam Leffler 		else
3574cf4c5a53SSam Leffler #endif
3575cf4c5a53SSam Leffler #if MWL_MAXBA > 1
3576cf4c5a53SSam Leffler 		if (mn->mn_ba[1].bastream == NULL)
3577cf4c5a53SSam Leffler 			bas = &mn->mn_ba[1];
3578cf4c5a53SSam Leffler 		else
3579cf4c5a53SSam Leffler #endif
3580cf4c5a53SSam Leffler #if MWL_MAXBA > 0
3581cf4c5a53SSam Leffler 		if (mn->mn_ba[0].bastream == NULL)
3582cf4c5a53SSam Leffler 			bas = &mn->mn_ba[0];
3583cf4c5a53SSam Leffler 		else
3584cf4c5a53SSam Leffler #endif
3585cf4c5a53SSam Leffler 		{
3586cf4c5a53SSam Leffler 			/* sta already has max BA streams */
3587cf4c5a53SSam Leffler 			/* XXX assign BA stream to highest priority tid */
3588cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3589cf4c5a53SSam Leffler 			    "%s: already has max bastreams\n", __func__);
3590cf4c5a53SSam Leffler 			sc->sc_stats.mst_ampdu_reject++;
3591cf4c5a53SSam Leffler 			return 0;
3592cf4c5a53SSam Leffler 		}
3593cf4c5a53SSam Leffler 		/* NB: no held reference to ni */
35947850fa71SSam Leffler 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
35957850fa71SSam Leffler 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
35962aa563dfSAdrian Chadd 		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
35977850fa71SSam Leffler 		    ni, tap);
3598cf4c5a53SSam Leffler 		if (sp == NULL) {
3599cf4c5a53SSam Leffler 			/*
3600cf4c5a53SSam Leffler 			 * No available stream, return 0 so no
3601cf4c5a53SSam Leffler 			 * a-mpdu aggregation will be done.
3602cf4c5a53SSam Leffler 			 */
3603cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3604cf4c5a53SSam Leffler 			    "%s: no bastream available\n", __func__);
3605cf4c5a53SSam Leffler 			sc->sc_stats.mst_ampdu_nostream++;
3606cf4c5a53SSam Leffler 			return 0;
3607cf4c5a53SSam Leffler 		}
3608cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3609cf4c5a53SSam Leffler 		    __func__, sp);
3610cf4c5a53SSam Leffler 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3611cf4c5a53SSam Leffler 		bas->bastream = sp;
3612cf4c5a53SSam Leffler 		tap->txa_private = bas;
3613cf4c5a53SSam Leffler 	}
3614cf4c5a53SSam Leffler 	/* fetch current seq# from the firmware; if available */
3615cf4c5a53SSam Leffler 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
36167850fa71SSam Leffler 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3617cf4c5a53SSam Leffler 	    &tap->txa_start) != 0)
3618cf4c5a53SSam Leffler 		tap->txa_start = 0;
3619cf4c5a53SSam Leffler 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3620cf4c5a53SSam Leffler }
3621cf4c5a53SSam Leffler 
3622cf4c5a53SSam Leffler static int
mwl_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)3623cf4c5a53SSam Leffler mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3624cf4c5a53SSam Leffler 	int code, int baparamset, int batimeout)
3625cf4c5a53SSam Leffler {
36267a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3627cf4c5a53SSam Leffler 	struct mwl_bastate *bas;
3628cf4c5a53SSam Leffler 
3629cf4c5a53SSam Leffler 	bas = tap->txa_private;
3630cf4c5a53SSam Leffler 	if (bas == NULL) {
3631cf4c5a53SSam Leffler 		/* XXX should not happen */
3632cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU,
36332aa563dfSAdrian Chadd 		    "%s: no BA stream allocated, TID %d\n",
36342aa563dfSAdrian Chadd 		    __func__, tap->txa_tid);
3635cf4c5a53SSam Leffler 		sc->sc_stats.mst_addba_nostream++;
3636cf4c5a53SSam Leffler 		return 0;
3637cf4c5a53SSam Leffler 	}
3638cf4c5a53SSam Leffler 	if (code == IEEE80211_STATUS_SUCCESS) {
36397850fa71SSam Leffler 		struct ieee80211vap *vap = ni->ni_vap;
3640cf4c5a53SSam Leffler 		int bufsiz, error;
3641cf4c5a53SSam Leffler 
3642cf4c5a53SSam Leffler 		/*
3643cf4c5a53SSam Leffler 		 * Tell the firmware to setup the BA stream;
3644cf4c5a53SSam Leffler 		 * we know resources are available because we
3645cf4c5a53SSam Leffler 		 * pre-allocated one before forming the request.
3646cf4c5a53SSam Leffler 		 */
3647fe5ebb23SBjoern A. Zeeb 		bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3648cf4c5a53SSam Leffler 		if (bufsiz == 0)
3649cf4c5a53SSam Leffler 			bufsiz = IEEE80211_AGGR_BAWMAX;
36507850fa71SSam Leffler 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
36517850fa71SSam Leffler 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3652cf4c5a53SSam Leffler 		if (error != 0) {
3653cf4c5a53SSam Leffler 			/*
3654cf4c5a53SSam Leffler 			 * Setup failed, return immediately so no a-mpdu
3655cf4c5a53SSam Leffler 			 * aggregation will be done.
3656cf4c5a53SSam Leffler 			 */
3657cf4c5a53SSam Leffler 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3658cf4c5a53SSam Leffler 			mwl_bastream_free(bas);
3659cf4c5a53SSam Leffler 			tap->txa_private = NULL;
3660cf4c5a53SSam Leffler 
3661cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_AMPDU,
36622aa563dfSAdrian Chadd 			    "%s: create failed, error %d, bufsiz %d TID %d "
3663cf4c5a53SSam Leffler 			    "htparam 0x%x\n", __func__, error, bufsiz,
36642aa563dfSAdrian Chadd 			    tap->txa_tid, ni->ni_htparam);
3665cf4c5a53SSam Leffler 			sc->sc_stats.mst_bacreate_failed++;
3666cf4c5a53SSam Leffler 			return 0;
3667cf4c5a53SSam Leffler 		}
3668cf4c5a53SSam Leffler 		/* NB: cache txq to avoid ptr indirect */
36692aa563dfSAdrian Chadd 		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3670cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU,
36712aa563dfSAdrian Chadd 		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3672cf4c5a53SSam Leffler 		    "htparam 0x%x\n", __func__, bas->bastream,
36732aa563dfSAdrian Chadd 		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3674cf4c5a53SSam Leffler 	} else {
3675cf4c5a53SSam Leffler 		/*
3676cf4c5a53SSam Leffler 		 * Other side NAK'd us; return the resources.
3677cf4c5a53SSam Leffler 		 */
3678cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3679cf4c5a53SSam Leffler 		    "%s: request failed with code %d, destroy bastream %p\n",
3680cf4c5a53SSam Leffler 		    __func__, code, bas->bastream);
3681cf4c5a53SSam Leffler 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3682cf4c5a53SSam Leffler 		mwl_bastream_free(bas);
3683cf4c5a53SSam Leffler 		tap->txa_private = NULL;
3684cf4c5a53SSam Leffler 	}
3685cf4c5a53SSam Leffler 	/* NB: firmware sends BAR so we don't need to */
3686cf4c5a53SSam Leffler 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3687cf4c5a53SSam Leffler }
3688cf4c5a53SSam Leffler 
3689cf4c5a53SSam Leffler static void
mwl_addba_stop(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)3690cf4c5a53SSam Leffler mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3691cf4c5a53SSam Leffler {
36927a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3693cf4c5a53SSam Leffler 	struct mwl_bastate *bas;
3694cf4c5a53SSam Leffler 
3695cf4c5a53SSam Leffler 	bas = tap->txa_private;
3696cf4c5a53SSam Leffler 	if (bas != NULL) {
3697cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3698cf4c5a53SSam Leffler 		    __func__, bas->bastream);
3699cf4c5a53SSam Leffler 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3700cf4c5a53SSam Leffler 		mwl_bastream_free(bas);
3701cf4c5a53SSam Leffler 		tap->txa_private = NULL;
3702cf4c5a53SSam Leffler 	}
3703cf4c5a53SSam Leffler 	sc->sc_addba_stop(ni, tap);
3704cf4c5a53SSam Leffler }
3705cf4c5a53SSam Leffler 
3706cf4c5a53SSam Leffler /*
3707cf4c5a53SSam Leffler  * Setup the rx data structures.  This should only be
3708cf4c5a53SSam Leffler  * done once or we may get out of sync with the firmware.
3709cf4c5a53SSam Leffler  */
3710cf4c5a53SSam Leffler static int
mwl_startrecv(struct mwl_softc * sc)3711cf4c5a53SSam Leffler mwl_startrecv(struct mwl_softc *sc)
3712cf4c5a53SSam Leffler {
3713cf4c5a53SSam Leffler 	if (!sc->sc_recvsetup) {
3714cf4c5a53SSam Leffler 		struct mwl_rxbuf *bf, *prev;
3715cf4c5a53SSam Leffler 		struct mwl_rxdesc *ds;
3716cf4c5a53SSam Leffler 
3717cf4c5a53SSam Leffler 		prev = NULL;
3718cf4c5a53SSam Leffler 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3719cf4c5a53SSam Leffler 			int error = mwl_rxbuf_init(sc, bf);
3720cf4c5a53SSam Leffler 			if (error != 0) {
3721cf4c5a53SSam Leffler 				DPRINTF(sc, MWL_DEBUG_RECV,
3722cf4c5a53SSam Leffler 					"%s: mwl_rxbuf_init failed %d\n",
3723cf4c5a53SSam Leffler 					__func__, error);
3724cf4c5a53SSam Leffler 				return error;
3725cf4c5a53SSam Leffler 			}
3726cf4c5a53SSam Leffler 			if (prev != NULL) {
3727cf4c5a53SSam Leffler 				ds = prev->bf_desc;
3728cf4c5a53SSam Leffler 				ds->pPhysNext = htole32(bf->bf_daddr);
3729cf4c5a53SSam Leffler 			}
3730cf4c5a53SSam Leffler 			prev = bf;
3731cf4c5a53SSam Leffler 		}
3732cf4c5a53SSam Leffler 		if (prev != NULL) {
3733cf4c5a53SSam Leffler 			ds = prev->bf_desc;
3734cf4c5a53SSam Leffler 			ds->pPhysNext =
3735cf4c5a53SSam Leffler 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3736cf4c5a53SSam Leffler 		}
3737cf4c5a53SSam Leffler 		sc->sc_recvsetup = 1;
3738cf4c5a53SSam Leffler 	}
3739cf4c5a53SSam Leffler 	mwl_mode_init(sc);		/* set filters, etc. */
3740cf4c5a53SSam Leffler 	return 0;
3741cf4c5a53SSam Leffler }
3742cf4c5a53SSam Leffler 
3743cf4c5a53SSam Leffler static MWL_HAL_APMODE
mwl_getapmode(const struct ieee80211vap * vap,struct ieee80211_channel * chan)3744cf4c5a53SSam Leffler mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3745cf4c5a53SSam Leffler {
3746cf4c5a53SSam Leffler 	MWL_HAL_APMODE mode;
3747cf4c5a53SSam Leffler 
3748cf4c5a53SSam Leffler 	if (IEEE80211_IS_CHAN_HT(chan)) {
374973c1905dSSam Leffler 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3750cf4c5a53SSam Leffler 			mode = AP_MODE_N_ONLY;
3751cf4c5a53SSam Leffler 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3752cf4c5a53SSam Leffler 			mode = AP_MODE_AandN;
3753cf4c5a53SSam Leffler 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3754cf4c5a53SSam Leffler 			mode = AP_MODE_GandN;
3755cf4c5a53SSam Leffler 		else
3756cf4c5a53SSam Leffler 			mode = AP_MODE_BandGandN;
3757cf4c5a53SSam Leffler 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3758cf4c5a53SSam Leffler 		if (vap->iv_flags & IEEE80211_F_PUREG)
3759cf4c5a53SSam Leffler 			mode = AP_MODE_G_ONLY;
3760cf4c5a53SSam Leffler 		else
3761cf4c5a53SSam Leffler 			mode = AP_MODE_MIXED;
3762cf4c5a53SSam Leffler 	} else if (IEEE80211_IS_CHAN_B(chan))
3763cf4c5a53SSam Leffler 		mode = AP_MODE_B_ONLY;
3764cf4c5a53SSam Leffler 	else if (IEEE80211_IS_CHAN_A(chan))
3765cf4c5a53SSam Leffler 		mode = AP_MODE_A_ONLY;
3766cf4c5a53SSam Leffler 	else
3767cf4c5a53SSam Leffler 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3768cf4c5a53SSam Leffler 	return mode;
3769cf4c5a53SSam Leffler }
3770cf4c5a53SSam Leffler 
3771cf4c5a53SSam Leffler static int
mwl_setapmode(struct ieee80211vap * vap,struct ieee80211_channel * chan)3772cf4c5a53SSam Leffler mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3773cf4c5a53SSam Leffler {
3774cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3775cf4c5a53SSam Leffler 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3776cf4c5a53SSam Leffler }
3777cf4c5a53SSam Leffler 
3778cf4c5a53SSam Leffler /*
3779cf4c5a53SSam Leffler  * Set/change channels.
3780cf4c5a53SSam Leffler  */
3781cf4c5a53SSam Leffler static int
mwl_chan_set(struct mwl_softc * sc,struct ieee80211_channel * chan)3782cf4c5a53SSam Leffler mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3783cf4c5a53SSam Leffler {
3784cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
37857a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
3786cf4c5a53SSam Leffler 	MWL_HAL_CHANNEL hchan;
3787cf4c5a53SSam Leffler 	int maxtxpow;
3788cf4c5a53SSam Leffler 
3789cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3790cf4c5a53SSam Leffler 	    __func__, chan->ic_freq, chan->ic_flags);
3791cf4c5a53SSam Leffler 
3792cf4c5a53SSam Leffler 	/*
3793cf4c5a53SSam Leffler 	 * Convert to a HAL channel description with
3794cf4c5a53SSam Leffler 	 * the flags constrained to reflect the current
3795cf4c5a53SSam Leffler 	 * operating mode.
3796cf4c5a53SSam Leffler 	 */
3797cf4c5a53SSam Leffler 	mwl_mapchan(&hchan, chan);
3798cf4c5a53SSam Leffler 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3799cf4c5a53SSam Leffler #if 0
3800cf4c5a53SSam Leffler 	mwl_draintxq(sc);		/* clear pending tx frames */
3801cf4c5a53SSam Leffler #endif
3802cf4c5a53SSam Leffler 	mwl_hal_setchannel(mh, &hchan);
3803cf4c5a53SSam Leffler 	/*
3804cf4c5a53SSam Leffler 	 * Tx power is cap'd by the regulatory setting and
3805cf4c5a53SSam Leffler 	 * possibly a user-set limit.  We pass the min of
3806cf4c5a53SSam Leffler 	 * these to the hal to apply them to the cal data
3807cf4c5a53SSam Leffler 	 * for this channel.
3808cf4c5a53SSam Leffler 	 * XXX min bound?
3809cf4c5a53SSam Leffler 	 */
3810cf4c5a53SSam Leffler 	maxtxpow = 2*chan->ic_maxregpower;
3811cf4c5a53SSam Leffler 	if (maxtxpow > ic->ic_txpowlimit)
3812cf4c5a53SSam Leffler 		maxtxpow = ic->ic_txpowlimit;
3813cf4c5a53SSam Leffler 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3814cf4c5a53SSam Leffler 	/* NB: potentially change mcast/mgt rates */
3815cf4c5a53SSam Leffler 	mwl_setcurchanrates(sc);
3816cf4c5a53SSam Leffler 
3817cf4c5a53SSam Leffler 	/*
3818cf4c5a53SSam Leffler 	 * Update internal state.
3819cf4c5a53SSam Leffler 	 */
3820cf4c5a53SSam Leffler 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3821cf4c5a53SSam Leffler 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3822cf4c5a53SSam Leffler 	if (IEEE80211_IS_CHAN_A(chan)) {
3823cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3824cf4c5a53SSam Leffler 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3825cf4c5a53SSam Leffler 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3826cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3827cf4c5a53SSam Leffler 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3828cf4c5a53SSam Leffler 	} else {
3829cf4c5a53SSam Leffler 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3830cf4c5a53SSam Leffler 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3831cf4c5a53SSam Leffler 	}
3832cf4c5a53SSam Leffler 	sc->sc_curchan = hchan;
3833cf4c5a53SSam Leffler 	mwl_hal_intrset(mh, sc->sc_imask);
3834cf4c5a53SSam Leffler 
3835cf4c5a53SSam Leffler 	return 0;
3836cf4c5a53SSam Leffler }
3837cf4c5a53SSam Leffler 
3838cf4c5a53SSam Leffler static void
mwl_scan_start(struct ieee80211com * ic)3839cf4c5a53SSam Leffler mwl_scan_start(struct ieee80211com *ic)
3840cf4c5a53SSam Leffler {
38417a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
3842cf4c5a53SSam Leffler 
3843cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3844cf4c5a53SSam Leffler }
3845cf4c5a53SSam Leffler 
3846cf4c5a53SSam Leffler static void
mwl_scan_end(struct ieee80211com * ic)3847cf4c5a53SSam Leffler mwl_scan_end(struct ieee80211com *ic)
3848cf4c5a53SSam Leffler {
38497a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
3850cf4c5a53SSam Leffler 
3851cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3852cf4c5a53SSam Leffler }
3853cf4c5a53SSam Leffler 
3854cf4c5a53SSam Leffler static void
mwl_set_channel(struct ieee80211com * ic)3855cf4c5a53SSam Leffler mwl_set_channel(struct ieee80211com *ic)
3856cf4c5a53SSam Leffler {
38577a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
3858cf4c5a53SSam Leffler 
3859cf4c5a53SSam Leffler 	(void) mwl_chan_set(sc, ic->ic_curchan);
3860cf4c5a53SSam Leffler }
3861cf4c5a53SSam Leffler 
3862cf4c5a53SSam Leffler /*
3863cf4c5a53SSam Leffler  * Handle a channel switch request.  We inform the firmware
3864cf4c5a53SSam Leffler  * and mark the global state to suppress various actions.
3865cf4c5a53SSam Leffler  * NB: we issue only one request to the fw; we may be called
3866cf4c5a53SSam Leffler  * multiple times if there are multiple vap's.
3867cf4c5a53SSam Leffler  */
3868cf4c5a53SSam Leffler static void
mwl_startcsa(struct ieee80211vap * vap)3869cf4c5a53SSam Leffler mwl_startcsa(struct ieee80211vap *vap)
3870cf4c5a53SSam Leffler {
3871cf4c5a53SSam Leffler 	struct ieee80211com *ic = vap->iv_ic;
38727a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
3873cf4c5a53SSam Leffler 	MWL_HAL_CHANNEL hchan;
3874cf4c5a53SSam Leffler 
3875cf4c5a53SSam Leffler 	if (sc->sc_csapending)
3876cf4c5a53SSam Leffler 		return;
3877cf4c5a53SSam Leffler 
3878cf4c5a53SSam Leffler 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3879cf4c5a53SSam Leffler 	/* 1 =>'s quiet channel */
3880cf4c5a53SSam Leffler 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3881cf4c5a53SSam Leffler 	sc->sc_csapending = 1;
3882cf4c5a53SSam Leffler }
3883cf4c5a53SSam Leffler 
3884cf4c5a53SSam Leffler /*
3885cf4c5a53SSam Leffler  * Plumb any static WEP key for the station.  This is
3886cf4c5a53SSam Leffler  * necessary as we must propagate the key from the
3887cf4c5a53SSam Leffler  * global key table of the vap to each sta db entry.
3888cf4c5a53SSam Leffler  */
3889cf4c5a53SSam Leffler static void
mwl_setanywepkey(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])3890cf4c5a53SSam Leffler mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3891cf4c5a53SSam Leffler {
3892cf4c5a53SSam Leffler 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3893cf4c5a53SSam Leffler 		IEEE80211_F_PRIVACY &&
3894cf4c5a53SSam Leffler 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3895cf4c5a53SSam Leffler 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3896bc813c40SAdrian Chadd 		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3897bc813c40SAdrian Chadd 				    mac);
3898cf4c5a53SSam Leffler }
3899cf4c5a53SSam Leffler 
3900cf4c5a53SSam Leffler static int
mwl_peerstadb(struct ieee80211_node * ni,int aid,int staid,MWL_HAL_PEERINFO * pi)3901cf4c5a53SSam Leffler mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3902cf4c5a53SSam Leffler {
3903cf4c5a53SSam Leffler #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3904cf4c5a53SSam Leffler 	struct ieee80211vap *vap = ni->ni_vap;
3905cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap;
3906cf4c5a53SSam Leffler 	int error;
3907cf4c5a53SSam Leffler 
3908cf4c5a53SSam Leffler 	if (vap->iv_opmode == IEEE80211_M_WDS) {
3909cf4c5a53SSam Leffler 		/*
3910cf4c5a53SSam Leffler 		 * WDS vap's do not have a f/w vap; instead they piggyback
3911cf4c5a53SSam Leffler 		 * on an AP vap and we must install the sta db entry and
3912cf4c5a53SSam Leffler 		 * crypto state using that AP's handle (the WDS vap has none).
3913cf4c5a53SSam Leffler 		 */
3914cf4c5a53SSam Leffler 		hvap = MWL_VAP(vap)->mv_ap_hvap;
3915cf4c5a53SSam Leffler 	} else
3916cf4c5a53SSam Leffler 		hvap = MWL_VAP(vap)->mv_hvap;
3917cf4c5a53SSam Leffler 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3918cf4c5a53SSam Leffler 	    aid, staid, pi,
3919cf4c5a53SSam Leffler 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3920cf4c5a53SSam Leffler 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3921cf4c5a53SSam Leffler 	if (error == 0) {
3922cf4c5a53SSam Leffler 		/*
3923cf4c5a53SSam Leffler 		 * Setup security for this station.  For sta mode this is
3924cf4c5a53SSam Leffler 		 * needed even though do the same thing on transition to
3925cf4c5a53SSam Leffler 		 * AUTH state because the call to mwl_hal_newstation
3926cf4c5a53SSam Leffler 		 * clobbers the crypto state we setup.
3927cf4c5a53SSam Leffler 		 */
3928cf4c5a53SSam Leffler 		mwl_setanywepkey(vap, ni->ni_macaddr);
3929cf4c5a53SSam Leffler 	}
3930cf4c5a53SSam Leffler 	return error;
3931cf4c5a53SSam Leffler #undef WME
3932cf4c5a53SSam Leffler }
3933cf4c5a53SSam Leffler 
3934cf4c5a53SSam Leffler static void
mwl_setglobalkeys(struct ieee80211vap * vap)3935cf4c5a53SSam Leffler mwl_setglobalkeys(struct ieee80211vap *vap)
3936cf4c5a53SSam Leffler {
3937cf4c5a53SSam Leffler 	struct ieee80211_key *wk;
3938cf4c5a53SSam Leffler 
3939cf4c5a53SSam Leffler 	wk = &vap->iv_nw_keys[0];
3940cf4c5a53SSam Leffler 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3941cf4c5a53SSam Leffler 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3942bc813c40SAdrian Chadd 			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3943cf4c5a53SSam Leffler }
3944cf4c5a53SSam Leffler 
3945cf4c5a53SSam Leffler /*
39467850fa71SSam Leffler  * Convert a legacy rate set to a firmware bitmask.
39477850fa71SSam Leffler  */
39487850fa71SSam Leffler static uint32_t
get_rate_bitmap(const struct ieee80211_rateset * rs)39497850fa71SSam Leffler get_rate_bitmap(const struct ieee80211_rateset *rs)
39507850fa71SSam Leffler {
39517850fa71SSam Leffler 	uint32_t rates;
39527850fa71SSam Leffler 	int i;
39537850fa71SSam Leffler 
39547850fa71SSam Leffler 	rates = 0;
39557850fa71SSam Leffler 	for (i = 0; i < rs->rs_nrates; i++)
39567850fa71SSam Leffler 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
39577850fa71SSam Leffler 		case 2:	  rates |= 0x001; break;
39587850fa71SSam Leffler 		case 4:	  rates |= 0x002; break;
39597850fa71SSam Leffler 		case 11:  rates |= 0x004; break;
39607850fa71SSam Leffler 		case 22:  rates |= 0x008; break;
39617850fa71SSam Leffler 		case 44:  rates |= 0x010; break;
39627850fa71SSam Leffler 		case 12:  rates |= 0x020; break;
39637850fa71SSam Leffler 		case 18:  rates |= 0x040; break;
39647850fa71SSam Leffler 		case 24:  rates |= 0x080; break;
39657850fa71SSam Leffler 		case 36:  rates |= 0x100; break;
39667850fa71SSam Leffler 		case 48:  rates |= 0x200; break;
39677850fa71SSam Leffler 		case 72:  rates |= 0x400; break;
39687850fa71SSam Leffler 		case 96:  rates |= 0x800; break;
39697850fa71SSam Leffler 		case 108: rates |= 0x1000; break;
39707850fa71SSam Leffler 		}
39717850fa71SSam Leffler 	return rates;
39727850fa71SSam Leffler }
39737850fa71SSam Leffler 
39747850fa71SSam Leffler /*
39757850fa71SSam Leffler  * Construct an HT firmware bitmask from an HT rate set.
39767850fa71SSam Leffler  */
39777850fa71SSam Leffler static uint32_t
get_htrate_bitmap(const struct ieee80211_htrateset * rs)39787850fa71SSam Leffler get_htrate_bitmap(const struct ieee80211_htrateset *rs)
39797850fa71SSam Leffler {
39807850fa71SSam Leffler 	uint32_t rates;
39817850fa71SSam Leffler 	int i;
39827850fa71SSam Leffler 
39837850fa71SSam Leffler 	rates = 0;
39847850fa71SSam Leffler 	for (i = 0; i < rs->rs_nrates; i++) {
39857850fa71SSam Leffler 		if (rs->rs_rates[i] < 16)
39867850fa71SSam Leffler 			rates |= 1<<rs->rs_rates[i];
39877850fa71SSam Leffler 	}
39887850fa71SSam Leffler 	return rates;
39897850fa71SSam Leffler }
39907850fa71SSam Leffler 
39917850fa71SSam Leffler /*
39927850fa71SSam Leffler  * Craft station database entry for station.
39937850fa71SSam Leffler  * NB: use host byte order here, the hal handles byte swapping.
39947850fa71SSam Leffler  */
39957850fa71SSam Leffler static MWL_HAL_PEERINFO *
mkpeerinfo(MWL_HAL_PEERINFO * pi,const struct ieee80211_node * ni)39967850fa71SSam Leffler mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
39977850fa71SSam Leffler {
39987850fa71SSam Leffler 	const struct ieee80211vap *vap = ni->ni_vap;
39997850fa71SSam Leffler 
40007850fa71SSam Leffler 	memset(pi, 0, sizeof(*pi));
40017850fa71SSam Leffler 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
40027850fa71SSam Leffler 	pi->CapInfo = ni->ni_capinfo;
40037850fa71SSam Leffler 	if (ni->ni_flags & IEEE80211_NODE_HT) {
40047850fa71SSam Leffler 		/* HT capabilities, etc */
40057850fa71SSam Leffler 		pi->HTCapabilitiesInfo = ni->ni_htcap;
40067850fa71SSam Leffler 		/* XXX pi.HTCapabilitiesInfo */
40077850fa71SSam Leffler 	        pi->MacHTParamInfo = ni->ni_htparam;
40087850fa71SSam Leffler 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
40097850fa71SSam Leffler 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
40107850fa71SSam Leffler 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
40117850fa71SSam Leffler 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
40127850fa71SSam Leffler 		pi->AddHtInfo.stbc = ni->ni_htstbc;
40137850fa71SSam Leffler 
40147850fa71SSam Leffler 		/* constrain according to local configuration */
40157850fa71SSam Leffler 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
40167850fa71SSam Leffler 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
40177850fa71SSam Leffler 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
40187850fa71SSam Leffler 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4019*ca389486SBjoern A. Zeeb 		if (ni->ni_chw != IEEE80211_STA_RX_BW_40)
40207850fa71SSam Leffler 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
40217850fa71SSam Leffler 	}
40227850fa71SSam Leffler 	return pi;
40237850fa71SSam Leffler }
40247850fa71SSam Leffler 
40257850fa71SSam Leffler /*
4026cf4c5a53SSam Leffler  * Re-create the local sta db entry for a vap to ensure
4027cf4c5a53SSam Leffler  * up to date WME state is pushed to the firmware.  Because
4028cf4c5a53SSam Leffler  * this resets crypto state this must be followed by a
4029cf4c5a53SSam Leffler  * reload of any keys in the global key table.
4030cf4c5a53SSam Leffler  */
4031cf4c5a53SSam Leffler static int
mwl_localstadb(struct ieee80211vap * vap)4032cf4c5a53SSam Leffler mwl_localstadb(struct ieee80211vap *vap)
4033cf4c5a53SSam Leffler {
4034cf4c5a53SSam Leffler #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4035cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4036cf4c5a53SSam Leffler 	struct ieee80211_node *bss;
40377850fa71SSam Leffler 	MWL_HAL_PEERINFO pi;
4038cf4c5a53SSam Leffler 	int error;
4039cf4c5a53SSam Leffler 
4040cf4c5a53SSam Leffler 	switch (vap->iv_opmode) {
4041cf4c5a53SSam Leffler 	case IEEE80211_M_STA:
4042cf4c5a53SSam Leffler 		bss = vap->iv_bss;
40437850fa71SSam Leffler 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
40447850fa71SSam Leffler 		    vap->iv_state == IEEE80211_S_RUN ?
40457850fa71SSam Leffler 			mkpeerinfo(&pi, bss) : NULL,
40467850fa71SSam Leffler 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4047cf4c5a53SSam Leffler 		    bss->ni_ies.wme_ie != NULL ?
4048cf4c5a53SSam Leffler 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4049cf4c5a53SSam Leffler 		if (error == 0)
4050cf4c5a53SSam Leffler 			mwl_setglobalkeys(vap);
4051cf4c5a53SSam Leffler 		break;
4052cf4c5a53SSam Leffler 	case IEEE80211_M_HOSTAP:
405359aa14a9SRui Paulo 	case IEEE80211_M_MBSS:
4054cf4c5a53SSam Leffler 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4055cf4c5a53SSam Leffler 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4056cf4c5a53SSam Leffler 		if (error == 0)
4057cf4c5a53SSam Leffler 			mwl_setglobalkeys(vap);
4058cf4c5a53SSam Leffler 		break;
4059cf4c5a53SSam Leffler 	default:
4060cf4c5a53SSam Leffler 		error = 0;
4061cf4c5a53SSam Leffler 		break;
4062cf4c5a53SSam Leffler 	}
4063cf4c5a53SSam Leffler 	return error;
4064cf4c5a53SSam Leffler #undef WME
4065cf4c5a53SSam Leffler }
4066cf4c5a53SSam Leffler 
4067cf4c5a53SSam Leffler static int
mwl_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4068cf4c5a53SSam Leffler mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4069cf4c5a53SSam Leffler {
4070cf4c5a53SSam Leffler 	struct mwl_vap *mvp = MWL_VAP(vap);
4071cf4c5a53SSam Leffler 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4072cf4c5a53SSam Leffler 	struct ieee80211com *ic = vap->iv_ic;
4073cf4c5a53SSam Leffler 	struct ieee80211_node *ni = NULL;
40747a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
4075cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
4076cf4c5a53SSam Leffler 	enum ieee80211_state ostate = vap->iv_state;
4077cf4c5a53SSam Leffler 	int error;
4078cf4c5a53SSam Leffler 
4079cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
408013f2ef16SJustin Hibbits 	    if_name(vap->iv_ifp), __func__,
4081cf4c5a53SSam Leffler 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4082cf4c5a53SSam Leffler 
4083cf4c5a53SSam Leffler 	callout_stop(&sc->sc_timer);
4084cf4c5a53SSam Leffler 	/*
4085cf4c5a53SSam Leffler 	 * Clear current radar detection state.
4086cf4c5a53SSam Leffler 	 */
4087cf4c5a53SSam Leffler 	if (ostate == IEEE80211_S_CAC) {
4088cf4c5a53SSam Leffler 		/* stop quiet mode radar detection */
4089cf4c5a53SSam Leffler 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4090cf4c5a53SSam Leffler 	} else if (sc->sc_radarena) {
4091cf4c5a53SSam Leffler 		/* stop in-service radar detection */
4092cf4c5a53SSam Leffler 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4093cf4c5a53SSam Leffler 		sc->sc_radarena = 0;
4094cf4c5a53SSam Leffler 	}
4095cf4c5a53SSam Leffler 	/*
4096cf4c5a53SSam Leffler 	 * Carry out per-state actions before doing net80211 work.
4097cf4c5a53SSam Leffler 	 */
4098cf4c5a53SSam Leffler 	if (nstate == IEEE80211_S_INIT) {
4099cf4c5a53SSam Leffler 		/* NB: only ap+sta vap's have a fw entity */
4100cf4c5a53SSam Leffler 		if (hvap != NULL)
4101cf4c5a53SSam Leffler 			mwl_hal_stop(hvap);
4102cf4c5a53SSam Leffler 	} else if (nstate == IEEE80211_S_SCAN) {
4103cf4c5a53SSam Leffler 		mwl_hal_start(hvap);
4104cf4c5a53SSam Leffler 		/* NB: this disables beacon frames */
4105cf4c5a53SSam Leffler 		mwl_hal_setinframode(hvap);
4106cf4c5a53SSam Leffler 	} else if (nstate == IEEE80211_S_AUTH) {
4107cf4c5a53SSam Leffler 		/*
4108cf4c5a53SSam Leffler 		 * Must create a sta db entry in case a WEP key needs to
4109cf4c5a53SSam Leffler 		 * be plumbed.  This entry will be overwritten if we
4110cf4c5a53SSam Leffler 		 * associate; otherwise it will be reclaimed on node free.
4111cf4c5a53SSam Leffler 		 */
4112cf4c5a53SSam Leffler 		ni = vap->iv_bss;
4113cf4c5a53SSam Leffler 		MWL_NODE(ni)->mn_hvap = hvap;
4114cf4c5a53SSam Leffler 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4115cf4c5a53SSam Leffler 	} else if (nstate == IEEE80211_S_CSA) {
4116cf4c5a53SSam Leffler 		/* XXX move to below? */
411759aa14a9SRui Paulo 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
411859aa14a9SRui Paulo 		    vap->iv_opmode == IEEE80211_M_MBSS)
4119cf4c5a53SSam Leffler 			mwl_startcsa(vap);
4120cf4c5a53SSam Leffler 	} else if (nstate == IEEE80211_S_CAC) {
4121cf4c5a53SSam Leffler 		/* XXX move to below? */
4122cf4c5a53SSam Leffler 		/* stop ap xmit and enable quiet mode radar detection */
4123cf4c5a53SSam Leffler 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4124cf4c5a53SSam Leffler 	}
4125cf4c5a53SSam Leffler 
4126cf4c5a53SSam Leffler 	/*
4127cf4c5a53SSam Leffler 	 * Invoke the parent method to do net80211 work.
4128cf4c5a53SSam Leffler 	 */
4129cf4c5a53SSam Leffler 	error = mvp->mv_newstate(vap, nstate, arg);
4130cf4c5a53SSam Leffler 
4131cf4c5a53SSam Leffler 	/*
4132cf4c5a53SSam Leffler 	 * Carry out work that must be done after net80211 runs;
4133cf4c5a53SSam Leffler 	 * this work requires up to date state (e.g. iv_bss).
4134cf4c5a53SSam Leffler 	 */
4135cf4c5a53SSam Leffler 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4136cf4c5a53SSam Leffler 		/* NB: collect bss node again, it may have changed */
4137cf4c5a53SSam Leffler 		ni = vap->iv_bss;
4138cf4c5a53SSam Leffler 
4139cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_STATE,
4140cf4c5a53SSam Leffler 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4141cf4c5a53SSam Leffler 		    "capinfo 0x%04x chan %d\n",
414213f2ef16SJustin Hibbits 		    if_name(vap->iv_ifp), __func__, vap->iv_flags,
4143cf4c5a53SSam Leffler 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4144cf4c5a53SSam Leffler 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4145cf4c5a53SSam Leffler 
4146cf4c5a53SSam Leffler 		/*
41477850fa71SSam Leffler 		 * Recreate local sta db entry to update WME/HT state.
4148cf4c5a53SSam Leffler 		 */
4149cf4c5a53SSam Leffler 		mwl_localstadb(vap);
4150cf4c5a53SSam Leffler 		switch (vap->iv_opmode) {
4151cf4c5a53SSam Leffler 		case IEEE80211_M_HOSTAP:
415259aa14a9SRui Paulo 		case IEEE80211_M_MBSS:
4153cf4c5a53SSam Leffler 			if (ostate == IEEE80211_S_CAC) {
4154cf4c5a53SSam Leffler 				/* enable in-service radar detection */
4155cf4c5a53SSam Leffler 				mwl_hal_setradardetection(mh,
4156cf4c5a53SSam Leffler 				    DR_IN_SERVICE_MONITOR_START);
4157cf4c5a53SSam Leffler 				sc->sc_radarena = 1;
4158cf4c5a53SSam Leffler 			}
4159cf4c5a53SSam Leffler 			/*
4160cf4c5a53SSam Leffler 			 * Allocate and setup the beacon frame
4161cf4c5a53SSam Leffler 			 * (and related state).
4162cf4c5a53SSam Leffler 			 */
4163cf4c5a53SSam Leffler 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4164cf4c5a53SSam Leffler 			if (error != 0) {
4165cf4c5a53SSam Leffler 				DPRINTF(sc, MWL_DEBUG_STATE,
4166cf4c5a53SSam Leffler 				    "%s: beacon setup failed, error %d\n",
4167cf4c5a53SSam Leffler 				    __func__, error);
4168cf4c5a53SSam Leffler 				goto bad;
4169cf4c5a53SSam Leffler 			}
4170cf4c5a53SSam Leffler 			/* NB: must be after setting up beacon */
4171cf4c5a53SSam Leffler 			mwl_hal_start(hvap);
4172cf4c5a53SSam Leffler 			break;
4173cf4c5a53SSam Leffler 		case IEEE80211_M_STA:
4174cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
417513f2ef16SJustin Hibbits 			    if_name(vap->iv_ifp), __func__, ni->ni_associd);
4176cf4c5a53SSam Leffler 			/*
4177cf4c5a53SSam Leffler 			 * Set state now that we're associated.
4178cf4c5a53SSam Leffler 			 */
4179cf4c5a53SSam Leffler 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4180cf4c5a53SSam Leffler 			mwl_setrates(vap);
4181cf4c5a53SSam Leffler 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
41827850fa71SSam Leffler 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
41837850fa71SSam Leffler 			    sc->sc_ndwdsvaps++ == 0)
41847850fa71SSam Leffler 				mwl_hal_setdwds(mh, 1);
4185cf4c5a53SSam Leffler 			break;
4186cf4c5a53SSam Leffler 		case IEEE80211_M_WDS:
4187cf4c5a53SSam Leffler 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
418813f2ef16SJustin Hibbits 			    if_name(vap->iv_ifp), __func__,
4189cf4c5a53SSam Leffler 			    ether_sprintf(ni->ni_bssid));
4190cf4c5a53SSam Leffler 			mwl_seteapolformat(vap);
4191cf4c5a53SSam Leffler 			break;
4192cf4c5a53SSam Leffler 		default:
4193cf4c5a53SSam Leffler 			break;
4194cf4c5a53SSam Leffler 		}
4195cf4c5a53SSam Leffler 		/*
4196cf4c5a53SSam Leffler 		 * Set CS mode according to operating channel;
4197cf4c5a53SSam Leffler 		 * this mostly an optimization for 5GHz.
4198cf4c5a53SSam Leffler 		 *
4199cf4c5a53SSam Leffler 		 * NB: must follow mwl_hal_start which resets csmode
4200cf4c5a53SSam Leffler 		 */
4201cf4c5a53SSam Leffler 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4202cf4c5a53SSam Leffler 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4203cf4c5a53SSam Leffler 		else
4204cf4c5a53SSam Leffler 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4205cf4c5a53SSam Leffler 		/*
4206cf4c5a53SSam Leffler 		 * Start timer to prod firmware.
4207cf4c5a53SSam Leffler 		 */
4208cf4c5a53SSam Leffler 		if (sc->sc_ageinterval != 0)
4209cf4c5a53SSam Leffler 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4210cf4c5a53SSam Leffler 			    mwl_agestations, sc);
4211cf4c5a53SSam Leffler 	} else if (nstate == IEEE80211_S_SLEEP) {
4212cf4c5a53SSam Leffler 		/* XXX set chip in power save */
42137850fa71SSam Leffler 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
42147850fa71SSam Leffler 	    --sc->sc_ndwdsvaps == 0)
42157850fa71SSam Leffler 		mwl_hal_setdwds(mh, 0);
4216cf4c5a53SSam Leffler bad:
4217cf4c5a53SSam Leffler 	return error;
4218cf4c5a53SSam Leffler }
4219cf4c5a53SSam Leffler 
4220cf4c5a53SSam Leffler /*
4221cf4c5a53SSam Leffler  * Manage station id's; these are separate from AID's
4222cf4c5a53SSam Leffler  * as AID's may have values out of the range of possible
4223cf4c5a53SSam Leffler  * station id's acceptable to the firmware.
4224cf4c5a53SSam Leffler  */
4225cf4c5a53SSam Leffler static int
allocstaid(struct mwl_softc * sc,int aid)4226cf4c5a53SSam Leffler allocstaid(struct mwl_softc *sc, int aid)
4227cf4c5a53SSam Leffler {
4228cf4c5a53SSam Leffler 	int staid;
4229cf4c5a53SSam Leffler 
4230cf4c5a53SSam Leffler 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4231cf4c5a53SSam Leffler 		/* NB: don't use 0 */
4232cf4c5a53SSam Leffler 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4233cf4c5a53SSam Leffler 			if (isclr(sc->sc_staid, staid))
4234cf4c5a53SSam Leffler 				break;
4235cf4c5a53SSam Leffler 	} else
4236cf4c5a53SSam Leffler 		staid = aid;
4237cf4c5a53SSam Leffler 	setbit(sc->sc_staid, staid);
4238cf4c5a53SSam Leffler 	return staid;
4239cf4c5a53SSam Leffler }
4240cf4c5a53SSam Leffler 
4241cf4c5a53SSam Leffler static void
delstaid(struct mwl_softc * sc,int staid)4242cf4c5a53SSam Leffler delstaid(struct mwl_softc *sc, int staid)
4243cf4c5a53SSam Leffler {
4244cf4c5a53SSam Leffler 	clrbit(sc->sc_staid, staid);
4245cf4c5a53SSam Leffler }
4246cf4c5a53SSam Leffler 
4247cf4c5a53SSam Leffler /*
4248cf4c5a53SSam Leffler  * Setup driver-specific state for a newly associated node.
4249cf4c5a53SSam Leffler  * Note that we're called also on a re-associate, the isnew
4250cf4c5a53SSam Leffler  * param tells us if this is the first time or not.
4251cf4c5a53SSam Leffler  */
4252cf4c5a53SSam Leffler static void
mwl_newassoc(struct ieee80211_node * ni,int isnew)4253cf4c5a53SSam Leffler mwl_newassoc(struct ieee80211_node *ni, int isnew)
4254cf4c5a53SSam Leffler {
4255cf4c5a53SSam Leffler 	struct ieee80211vap *vap = ni->ni_vap;
42567a79cebfSGleb Smirnoff         struct mwl_softc *sc = vap->iv_ic->ic_softc;
4257cf4c5a53SSam Leffler 	struct mwl_node *mn = MWL_NODE(ni);
4258cf4c5a53SSam Leffler 	MWL_HAL_PEERINFO pi;
4259cf4c5a53SSam Leffler 	uint16_t aid;
4260cf4c5a53SSam Leffler 	int error;
4261cf4c5a53SSam Leffler 
4262cf4c5a53SSam Leffler 	aid = IEEE80211_AID(ni->ni_associd);
4263cf4c5a53SSam Leffler 	if (isnew) {
4264cf4c5a53SSam Leffler 		mn->mn_staid = allocstaid(sc, aid);
4265cf4c5a53SSam Leffler 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4266cf4c5a53SSam Leffler 	} else {
4267cf4c5a53SSam Leffler 		mn = MWL_NODE(ni);
4268cf4c5a53SSam Leffler 		/* XXX reset BA stream? */
4269cf4c5a53SSam Leffler 	}
4270cf4c5a53SSam Leffler 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4271cf4c5a53SSam Leffler 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
42727850fa71SSam Leffler 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4273cf4c5a53SSam Leffler 	if (error != 0) {
4274cf4c5a53SSam Leffler 		DPRINTF(sc, MWL_DEBUG_NODE,
4275cf4c5a53SSam Leffler 		    "%s: error %d creating sta db entry\n",
4276cf4c5a53SSam Leffler 		    __func__, error);
4277cf4c5a53SSam Leffler 		/* XXX how to deal with error? */
4278cf4c5a53SSam Leffler 	}
4279cf4c5a53SSam Leffler }
4280cf4c5a53SSam Leffler 
4281cf4c5a53SSam Leffler /*
4282cf4c5a53SSam Leffler  * Periodically poke the firmware to age out station state
4283cf4c5a53SSam Leffler  * (power save queues, pending tx aggregates).
4284cf4c5a53SSam Leffler  */
4285cf4c5a53SSam Leffler static void
mwl_agestations(void * arg)4286cf4c5a53SSam Leffler mwl_agestations(void *arg)
4287cf4c5a53SSam Leffler {
4288cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg;
4289cf4c5a53SSam Leffler 
4290cf4c5a53SSam Leffler 	mwl_hal_setkeepalive(sc->sc_mh);
4291cf4c5a53SSam Leffler 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
42927850fa71SSam Leffler 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4293cf4c5a53SSam Leffler }
4294cf4c5a53SSam Leffler 
4295cf4c5a53SSam Leffler static const struct mwl_hal_channel *
findhalchannel(const MWL_HAL_CHANNELINFO * ci,int ieee)4296cf4c5a53SSam Leffler findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4297cf4c5a53SSam Leffler {
4298cf4c5a53SSam Leffler 	int i;
4299cf4c5a53SSam Leffler 
4300cf4c5a53SSam Leffler 	for (i = 0; i < ci->nchannels; i++) {
4301cf4c5a53SSam Leffler 		const struct mwl_hal_channel *hc = &ci->channels[i];
4302cf4c5a53SSam Leffler 		if (hc->ieee == ieee)
4303cf4c5a53SSam Leffler 			return hc;
4304cf4c5a53SSam Leffler 	}
4305cf4c5a53SSam Leffler 	return NULL;
4306cf4c5a53SSam Leffler }
4307cf4c5a53SSam Leffler 
4308cf4c5a53SSam Leffler static int
mwl_setregdomain(struct ieee80211com * ic,struct ieee80211_regdomain * rd,int nchan,struct ieee80211_channel chans[])4309cf4c5a53SSam Leffler mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4310cf4c5a53SSam Leffler 	int nchan, struct ieee80211_channel chans[])
4311cf4c5a53SSam Leffler {
43127a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
4313cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
4314cf4c5a53SSam Leffler 	const MWL_HAL_CHANNELINFO *ci;
4315cf4c5a53SSam Leffler 	int i;
4316cf4c5a53SSam Leffler 
4317cf4c5a53SSam Leffler 	for (i = 0; i < nchan; i++) {
4318cf4c5a53SSam Leffler 		struct ieee80211_channel *c = &chans[i];
4319cf4c5a53SSam Leffler 		const struct mwl_hal_channel *hc;
4320cf4c5a53SSam Leffler 
4321cf4c5a53SSam Leffler 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4322cf4c5a53SSam Leffler 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4323cf4c5a53SSam Leffler 			    IEEE80211_IS_CHAN_HT40(c) ?
4324cf4c5a53SSam Leffler 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4325cf4c5a53SSam Leffler 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4326cf4c5a53SSam Leffler 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4327cf4c5a53SSam Leffler 			    IEEE80211_IS_CHAN_HT40(c) ?
4328cf4c5a53SSam Leffler 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4329cf4c5a53SSam Leffler 		} else {
43307a79cebfSGleb Smirnoff 			device_printf(sc->sc_dev,
4331cf4c5a53SSam Leffler 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4332cf4c5a53SSam Leffler 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4333cf4c5a53SSam Leffler 			return EINVAL;
4334cf4c5a53SSam Leffler 		}
4335cf4c5a53SSam Leffler 		/*
4336cf4c5a53SSam Leffler 		 * Verify channel has cal data and cap tx power.
4337cf4c5a53SSam Leffler 		 */
4338cf4c5a53SSam Leffler 		hc = findhalchannel(ci, c->ic_ieee);
4339cf4c5a53SSam Leffler 		if (hc != NULL) {
4340cf4c5a53SSam Leffler 			if (c->ic_maxpower > 2*hc->maxTxPow)
4341cf4c5a53SSam Leffler 				c->ic_maxpower = 2*hc->maxTxPow;
4342cf4c5a53SSam Leffler 			goto next;
4343cf4c5a53SSam Leffler 		}
4344cf4c5a53SSam Leffler 		if (IEEE80211_IS_CHAN_HT40(c)) {
4345cf4c5a53SSam Leffler 			/*
4346cf4c5a53SSam Leffler 			 * Look for the extension channel since the
4347cf4c5a53SSam Leffler 			 * hal table only has the primary channel.
4348cf4c5a53SSam Leffler 			 */
4349cf4c5a53SSam Leffler 			hc = findhalchannel(ci, c->ic_extieee);
4350cf4c5a53SSam Leffler 			if (hc != NULL) {
4351cf4c5a53SSam Leffler 				if (c->ic_maxpower > 2*hc->maxTxPow)
4352cf4c5a53SSam Leffler 					c->ic_maxpower = 2*hc->maxTxPow;
4353cf4c5a53SSam Leffler 				goto next;
4354cf4c5a53SSam Leffler 			}
4355cf4c5a53SSam Leffler 		}
43567a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev,
4357cf4c5a53SSam Leffler 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4358cf4c5a53SSam Leffler 		    __func__, c->ic_ieee, c->ic_extieee,
4359cf4c5a53SSam Leffler 		    c->ic_freq, c->ic_flags);
4360cf4c5a53SSam Leffler 		return EINVAL;
4361cf4c5a53SSam Leffler 	next:
4362cf4c5a53SSam Leffler 		;
4363cf4c5a53SSam Leffler 	}
4364cf4c5a53SSam Leffler 	return 0;
4365cf4c5a53SSam Leffler }
4366cf4c5a53SSam Leffler 
4367cf4c5a53SSam Leffler #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4368cf4c5a53SSam Leffler #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4369cf4c5a53SSam Leffler 
4370cf4c5a53SSam Leffler static void
addht40channels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,int flags)4371cf4c5a53SSam Leffler addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4372cf4c5a53SSam Leffler 	const MWL_HAL_CHANNELINFO *ci, int flags)
4373cf4c5a53SSam Leffler {
43745216e2b6SAndriy Voskoboinyk 	int i, error;
4375cf4c5a53SSam Leffler 
4376cf4c5a53SSam Leffler 	for (i = 0; i < ci->nchannels; i++) {
43775216e2b6SAndriy Voskoboinyk 		const struct mwl_hal_channel *hc = &ci->channels[i];
43785216e2b6SAndriy Voskoboinyk 
43795216e2b6SAndriy Voskoboinyk 		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
43805216e2b6SAndriy Voskoboinyk 		    hc->ieee, hc->maxTxPow, flags);
43815216e2b6SAndriy Voskoboinyk 		if (error != 0 && error != ENOENT)
4382cf4c5a53SSam Leffler 			break;
4383cf4c5a53SSam Leffler 	}
4384cf4c5a53SSam Leffler }
4385cf4c5a53SSam Leffler 
4386cf4c5a53SSam Leffler static void
addchannels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,const uint8_t bands[])4387cf4c5a53SSam Leffler addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
43885216e2b6SAndriy Voskoboinyk 	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4389cf4c5a53SSam Leffler {
43905216e2b6SAndriy Voskoboinyk 	int i, error;
4391cf4c5a53SSam Leffler 
43925216e2b6SAndriy Voskoboinyk 	error = 0;
43935216e2b6SAndriy Voskoboinyk 	for (i = 0; i < ci->nchannels && error == 0; i++) {
43945216e2b6SAndriy Voskoboinyk 		const struct mwl_hal_channel *hc = &ci->channels[i];
4395cf4c5a53SSam Leffler 
43965216e2b6SAndriy Voskoboinyk 		error = ieee80211_add_channel(chans, maxchans, nchans,
43975216e2b6SAndriy Voskoboinyk 		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4398cf4c5a53SSam Leffler 	}
4399cf4c5a53SSam Leffler }
4400cf4c5a53SSam Leffler 
4401cf4c5a53SSam Leffler static void
getchannels(struct mwl_softc * sc,int maxchans,int * nchans,struct ieee80211_channel chans[])4402cf4c5a53SSam Leffler getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4403cf4c5a53SSam Leffler 	struct ieee80211_channel chans[])
4404cf4c5a53SSam Leffler {
4405cf4c5a53SSam Leffler 	const MWL_HAL_CHANNELINFO *ci;
44065216e2b6SAndriy Voskoboinyk 	uint8_t bands[IEEE80211_MODE_BYTES];
4407cf4c5a53SSam Leffler 
4408cf4c5a53SSam Leffler 	/*
4409cf4c5a53SSam Leffler 	 * Use the channel info from the hal to craft the
4410cf4c5a53SSam Leffler 	 * channel list.  Note that we pass back an unsorted
4411cf4c5a53SSam Leffler 	 * list; the caller is required to sort it for us
4412cf4c5a53SSam Leffler 	 * (if desired).
4413cf4c5a53SSam Leffler 	 */
4414cf4c5a53SSam Leffler 	*nchans = 0;
4415cf4c5a53SSam Leffler 	if (mwl_hal_getchannelinfo(sc->sc_mh,
44165216e2b6SAndriy Voskoboinyk 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
44175216e2b6SAndriy Voskoboinyk 		memset(bands, 0, sizeof(bands));
44185216e2b6SAndriy Voskoboinyk 		setbit(bands, IEEE80211_MODE_11B);
44195216e2b6SAndriy Voskoboinyk 		setbit(bands, IEEE80211_MODE_11G);
44205216e2b6SAndriy Voskoboinyk 		setbit(bands, IEEE80211_MODE_11NG);
44215216e2b6SAndriy Voskoboinyk 		addchannels(chans, maxchans, nchans, ci, bands);
44225216e2b6SAndriy Voskoboinyk 	}
4423cf4c5a53SSam Leffler 	if (mwl_hal_getchannelinfo(sc->sc_mh,
44245216e2b6SAndriy Voskoboinyk 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
44255216e2b6SAndriy Voskoboinyk 		memset(bands, 0, sizeof(bands));
44265216e2b6SAndriy Voskoboinyk 		setbit(bands, IEEE80211_MODE_11A);
44275216e2b6SAndriy Voskoboinyk 		setbit(bands, IEEE80211_MODE_11NA);
44285216e2b6SAndriy Voskoboinyk 		addchannels(chans, maxchans, nchans, ci, bands);
44295216e2b6SAndriy Voskoboinyk 	}
4430cf4c5a53SSam Leffler 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4431cf4c5a53SSam Leffler 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4432cf4c5a53SSam Leffler 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4433cf4c5a53SSam Leffler 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4434cf4c5a53SSam Leffler 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4435cf4c5a53SSam Leffler 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4436cf4c5a53SSam Leffler }
4437cf4c5a53SSam Leffler 
4438cf4c5a53SSam Leffler static void
mwl_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])4439cf4c5a53SSam Leffler mwl_getradiocaps(struct ieee80211com *ic,
4440cf4c5a53SSam Leffler 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4441cf4c5a53SSam Leffler {
44427a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
4443cf4c5a53SSam Leffler 
4444cf4c5a53SSam Leffler 	getchannels(sc, maxchans, nchans, chans);
4445cf4c5a53SSam Leffler }
4446cf4c5a53SSam Leffler 
4447cf4c5a53SSam Leffler static int
mwl_getchannels(struct mwl_softc * sc)4448cf4c5a53SSam Leffler mwl_getchannels(struct mwl_softc *sc)
4449cf4c5a53SSam Leffler {
44507a79cebfSGleb Smirnoff 	struct ieee80211com *ic = &sc->sc_ic;
4451cf4c5a53SSam Leffler 
4452cf4c5a53SSam Leffler 	/*
4453cf4c5a53SSam Leffler 	 * Use the channel info from the hal to craft the
4454cf4c5a53SSam Leffler 	 * channel list for net80211.  Note that we pass up
4455cf4c5a53SSam Leffler 	 * an unsorted list; net80211 will sort it for us.
4456cf4c5a53SSam Leffler 	 */
4457cf4c5a53SSam Leffler 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4458cf4c5a53SSam Leffler 	ic->ic_nchans = 0;
4459cf4c5a53SSam Leffler 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4460cf4c5a53SSam Leffler 
4461cf4c5a53SSam Leffler 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4462cf4c5a53SSam Leffler 	ic->ic_regdomain.country = CTRY_DEFAULT;
4463cf4c5a53SSam Leffler 	ic->ic_regdomain.location = 'I';
4464cf4c5a53SSam Leffler 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4465cf4c5a53SSam Leffler 	ic->ic_regdomain.isocc[1] = ' ';
4466cf4c5a53SSam Leffler 	return (ic->ic_nchans == 0 ? EIO : 0);
4467cf4c5a53SSam Leffler }
4468cf4c5a53SSam Leffler #undef IEEE80211_CHAN_HTA
4469cf4c5a53SSam Leffler #undef IEEE80211_CHAN_HTG
4470cf4c5a53SSam Leffler 
4471cf4c5a53SSam Leffler #ifdef MWL_DEBUG
4472cf4c5a53SSam Leffler static void
mwl_printrxbuf(const struct mwl_rxbuf * bf,u_int ix)4473cf4c5a53SSam Leffler mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4474cf4c5a53SSam Leffler {
4475cf4c5a53SSam Leffler 	const struct mwl_rxdesc *ds = bf->bf_desc;
4476cf4c5a53SSam Leffler 	uint32_t status = le32toh(ds->Status);
4477cf4c5a53SSam Leffler 
44782706e872SMarius Strobl 	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4479cf4c5a53SSam Leffler 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
44802706e872SMarius Strobl 	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
44812706e872SMarius Strobl 	    le32toh(ds->pPhysBuffData), ds->RxControl,
4482cf4c5a53SSam Leffler 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4483cf4c5a53SSam Leffler 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4484cf4c5a53SSam Leffler 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4485cf4c5a53SSam Leffler 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4486cf4c5a53SSam Leffler }
4487cf4c5a53SSam Leffler 
4488cf4c5a53SSam Leffler static void
mwl_printtxbuf(const struct mwl_txbuf * bf,u_int qnum,u_int ix)4489cf4c5a53SSam Leffler mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4490cf4c5a53SSam Leffler {
4491cf4c5a53SSam Leffler 	const struct mwl_txdesc *ds = bf->bf_desc;
4492cf4c5a53SSam Leffler 	uint32_t status = le32toh(ds->Status);
4493cf4c5a53SSam Leffler 
4494cf4c5a53SSam Leffler 	printf("Q%u[%3u]", qnum, ix);
44952706e872SMarius Strobl 	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4496cf4c5a53SSam Leffler 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4497cf4c5a53SSam Leffler 	    le32toh(ds->pPhysNext),
4498cf4c5a53SSam Leffler 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4499cf4c5a53SSam Leffler 	    status & EAGLE_TXD_STATUS_USED ?
4500cf4c5a53SSam Leffler 		"" : (status & 3) != 0 ? " *" : " !");
4501cf4c5a53SSam Leffler 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4502cf4c5a53SSam Leffler 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4503cf4c5a53SSam Leffler 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4504cf4c5a53SSam Leffler #if MWL_TXDESC > 1
4505cf4c5a53SSam Leffler 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4506cf4c5a53SSam Leffler 	    , le32toh(ds->multiframes)
4507cf4c5a53SSam Leffler 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4508cf4c5a53SSam Leffler 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4509cf4c5a53SSam Leffler 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4510cf4c5a53SSam Leffler 	);
4511cf4c5a53SSam Leffler 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4512cf4c5a53SSam Leffler 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4513cf4c5a53SSam Leffler 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4514cf4c5a53SSam Leffler 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4515cf4c5a53SSam Leffler 	);
4516cf4c5a53SSam Leffler #endif
4517cf4c5a53SSam Leffler #if 0
4518cf4c5a53SSam Leffler { const uint8_t *cp = (const uint8_t *) ds;
4519cf4c5a53SSam Leffler   int i;
4520cf4c5a53SSam Leffler   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4521cf4c5a53SSam Leffler 	printf("%02x ", cp[i]);
4522cf4c5a53SSam Leffler 	if (((i+1) % 16) == 0)
4523cf4c5a53SSam Leffler 		printf("\n");
4524cf4c5a53SSam Leffler   }
4525cf4c5a53SSam Leffler   printf("\n");
4526cf4c5a53SSam Leffler }
4527cf4c5a53SSam Leffler #endif
4528cf4c5a53SSam Leffler }
4529cf4c5a53SSam Leffler #endif /* MWL_DEBUG */
4530cf4c5a53SSam Leffler 
4531cf4c5a53SSam Leffler #if 0
4532cf4c5a53SSam Leffler static void
4533cf4c5a53SSam Leffler mwl_txq_dump(struct mwl_txq *txq)
4534cf4c5a53SSam Leffler {
4535cf4c5a53SSam Leffler 	struct mwl_txbuf *bf;
4536cf4c5a53SSam Leffler 	int i = 0;
4537cf4c5a53SSam Leffler 
4538cf4c5a53SSam Leffler 	MWL_TXQ_LOCK(txq);
4539cf4c5a53SSam Leffler 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4540cf4c5a53SSam Leffler 		struct mwl_txdesc *ds = bf->bf_desc;
4541cf4c5a53SSam Leffler 		MWL_TXDESC_SYNC(txq, ds,
4542cf4c5a53SSam Leffler 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4543cf4c5a53SSam Leffler #ifdef MWL_DEBUG
4544cf4c5a53SSam Leffler 		mwl_printtxbuf(bf, txq->qnum, i);
4545cf4c5a53SSam Leffler #endif
4546cf4c5a53SSam Leffler 		i++;
4547cf4c5a53SSam Leffler 	}
4548cf4c5a53SSam Leffler 	MWL_TXQ_UNLOCK(txq);
4549cf4c5a53SSam Leffler }
4550cf4c5a53SSam Leffler #endif
4551cf4c5a53SSam Leffler 
4552cf4c5a53SSam Leffler static void
mwl_watchdog(void * arg)45537cf545d0SJohn Baldwin mwl_watchdog(void *arg)
4554cf4c5a53SSam Leffler {
45557a79cebfSGleb Smirnoff 	struct mwl_softc *sc = arg;
4556cf4c5a53SSam Leffler 
45577cf545d0SJohn Baldwin 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
45587cf545d0SJohn Baldwin 	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
45597cf545d0SJohn Baldwin 		return;
45607cf545d0SJohn Baldwin 
45617a79cebfSGleb Smirnoff 	if (sc->sc_running && !sc->sc_invalid) {
4562cf4c5a53SSam Leffler 		if (mwl_hal_setkeepalive(sc->sc_mh))
45637a79cebfSGleb Smirnoff 			device_printf(sc->sc_dev,
45647a79cebfSGleb Smirnoff 			    "transmit timeout (firmware hung?)\n");
4565cf4c5a53SSam Leffler 		else
45667a79cebfSGleb Smirnoff 			device_printf(sc->sc_dev,
45677a79cebfSGleb Smirnoff 			    "transmit timeout\n");
4568cf4c5a53SSam Leffler #if 0
45697a79cebfSGleb Smirnoff 		mwl_reset(sc);
4570cf4c5a53SSam Leffler mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4571cf4c5a53SSam Leffler #endif
45727a79cebfSGleb Smirnoff 		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4573cf4c5a53SSam Leffler 		sc->sc_stats.mst_watchdog++;
4574cf4c5a53SSam Leffler 	}
4575cf4c5a53SSam Leffler }
4576cf4c5a53SSam Leffler 
4577cf4c5a53SSam Leffler #ifdef MWL_DIAGAPI
4578cf4c5a53SSam Leffler /*
4579cf4c5a53SSam Leffler  * Diagnostic interface to the HAL.  This is used by various
4580cf4c5a53SSam Leffler  * tools to do things like retrieve register contents for
4581cf4c5a53SSam Leffler  * debugging.  The mechanism is intentionally opaque so that
4582453130d9SPedro F. Giffuni  * it can change frequently w/o concern for compatibility.
4583cf4c5a53SSam Leffler  */
4584cf4c5a53SSam Leffler static int
mwl_ioctl_diag(struct mwl_softc * sc,struct mwl_diag * md)4585cf4c5a53SSam Leffler mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4586cf4c5a53SSam Leffler {
4587cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
4588cf4c5a53SSam Leffler 	u_int id = md->md_id & MWL_DIAG_ID;
4589cf4c5a53SSam Leffler 	void *indata = NULL;
4590cf4c5a53SSam Leffler 	void *outdata = NULL;
4591cf4c5a53SSam Leffler 	u_int32_t insize = md->md_in_size;
4592cf4c5a53SSam Leffler 	u_int32_t outsize = md->md_out_size;
4593cf4c5a53SSam Leffler 	int error = 0;
4594cf4c5a53SSam Leffler 
4595cf4c5a53SSam Leffler 	if (md->md_id & MWL_DIAG_IN) {
4596cf4c5a53SSam Leffler 		/*
4597cf4c5a53SSam Leffler 		 * Copy in data.
4598cf4c5a53SSam Leffler 		 */
4599cf4c5a53SSam Leffler 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4600cf4c5a53SSam Leffler 		if (indata == NULL) {
4601cf4c5a53SSam Leffler 			error = ENOMEM;
4602cf4c5a53SSam Leffler 			goto bad;
4603cf4c5a53SSam Leffler 		}
4604cf4c5a53SSam Leffler 		error = copyin(md->md_in_data, indata, insize);
4605cf4c5a53SSam Leffler 		if (error)
4606cf4c5a53SSam Leffler 			goto bad;
4607cf4c5a53SSam Leffler 	}
4608cf4c5a53SSam Leffler 	if (md->md_id & MWL_DIAG_DYN) {
4609cf4c5a53SSam Leffler 		/*
4610cf4c5a53SSam Leffler 		 * Allocate a buffer for the results (otherwise the HAL
4611cf4c5a53SSam Leffler 		 * returns a pointer to a buffer where we can read the
4612cf4c5a53SSam Leffler 		 * results).  Note that we depend on the HAL leaving this
4613cf4c5a53SSam Leffler 		 * pointer for us to use below in reclaiming the buffer;
4614cf4c5a53SSam Leffler 		 * may want to be more defensive.
4615cf4c5a53SSam Leffler 		 */
4616cf4c5a53SSam Leffler 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4617cf4c5a53SSam Leffler 		if (outdata == NULL) {
4618cf4c5a53SSam Leffler 			error = ENOMEM;
4619cf4c5a53SSam Leffler 			goto bad;
4620cf4c5a53SSam Leffler 		}
4621cf4c5a53SSam Leffler 	}
4622cf4c5a53SSam Leffler 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4623cf4c5a53SSam Leffler 		if (outsize < md->md_out_size)
4624cf4c5a53SSam Leffler 			md->md_out_size = outsize;
4625cf4c5a53SSam Leffler 		if (outdata != NULL)
4626cf4c5a53SSam Leffler 			error = copyout(outdata, md->md_out_data,
4627cf4c5a53SSam Leffler 					md->md_out_size);
4628cf4c5a53SSam Leffler 	} else {
4629cf4c5a53SSam Leffler 		error = EINVAL;
4630cf4c5a53SSam Leffler 	}
4631cf4c5a53SSam Leffler bad:
4632cf4c5a53SSam Leffler 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4633cf4c5a53SSam Leffler 		free(indata, M_TEMP);
4634cf4c5a53SSam Leffler 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4635cf4c5a53SSam Leffler 		free(outdata, M_TEMP);
4636cf4c5a53SSam Leffler 	return error;
4637cf4c5a53SSam Leffler }
4638cf4c5a53SSam Leffler 
4639cf4c5a53SSam Leffler static int
mwl_ioctl_reset(struct mwl_softc * sc,struct mwl_diag * md)4640cf4c5a53SSam Leffler mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4641cf4c5a53SSam Leffler {
4642cf4c5a53SSam Leffler 	struct mwl_hal *mh = sc->sc_mh;
4643cf4c5a53SSam Leffler 	int error;
4644cf4c5a53SSam Leffler 
4645cf4c5a53SSam Leffler 	MWL_LOCK_ASSERT(sc);
4646cf4c5a53SSam Leffler 
4647cf4c5a53SSam Leffler 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4648cf4c5a53SSam Leffler 		device_printf(sc->sc_dev, "unable to load firmware\n");
4649cf4c5a53SSam Leffler 		return EIO;
4650cf4c5a53SSam Leffler 	}
4651cf4c5a53SSam Leffler 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4652cf4c5a53SSam Leffler 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4653cf4c5a53SSam Leffler 		return EIO;
4654cf4c5a53SSam Leffler 	}
4655cf4c5a53SSam Leffler 	error = mwl_setupdma(sc);
4656cf4c5a53SSam Leffler 	if (error != 0) {
4657cf4c5a53SSam Leffler 		/* NB: mwl_setupdma prints a msg */
4658cf4c5a53SSam Leffler 		return error;
4659cf4c5a53SSam Leffler 	}
4660cf4c5a53SSam Leffler 	/*
4661cf4c5a53SSam Leffler 	 * Reset tx/rx data structures; after reload we must
4662cf4c5a53SSam Leffler 	 * re-start the driver's notion of the next xmit/recv.
4663cf4c5a53SSam Leffler 	 */
4664cf4c5a53SSam Leffler 	mwl_draintxq(sc);		/* clear pending frames */
4665cf4c5a53SSam Leffler 	mwl_resettxq(sc);		/* rebuild tx q lists */
4666cf4c5a53SSam Leffler 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4667cf4c5a53SSam Leffler 	return 0;
4668cf4c5a53SSam Leffler }
4669cf4c5a53SSam Leffler #endif /* MWL_DIAGAPI */
4670cf4c5a53SSam Leffler 
46717a79cebfSGleb Smirnoff static void
mwl_parent(struct ieee80211com * ic)46727a79cebfSGleb Smirnoff mwl_parent(struct ieee80211com *ic)
4673cf4c5a53SSam Leffler {
46747a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
46757a79cebfSGleb Smirnoff 	int startall = 0;
4676cf4c5a53SSam Leffler 
4677cf4c5a53SSam Leffler 	MWL_LOCK(sc);
46787a79cebfSGleb Smirnoff 	if (ic->ic_nrunning > 0) {
46797a79cebfSGleb Smirnoff 		if (sc->sc_running) {
4680cf4c5a53SSam Leffler 			/*
4681cf4c5a53SSam Leffler 			 * To avoid rescanning another access point,
4682cf4c5a53SSam Leffler 			 * do not call mwl_init() here.  Instead,
4683cf4c5a53SSam Leffler 			 * only reflect promisc mode settings.
4684cf4c5a53SSam Leffler 			 */
4685cf4c5a53SSam Leffler 			mwl_mode_init(sc);
46867a79cebfSGleb Smirnoff 		} else {
4687cf4c5a53SSam Leffler 			/*
4688cf4c5a53SSam Leffler 			 * Beware of being called during attach/detach
4689cf4c5a53SSam Leffler 			 * to reset promiscuous mode.  In that case we
4690cf4c5a53SSam Leffler 			 * will still be marked UP but not RUNNING.
4691cf4c5a53SSam Leffler 			 * However trying to re-init the interface
4692cf4c5a53SSam Leffler 			 * is the wrong thing to do as we've already
4693cf4c5a53SSam Leffler 			 * torn down much of our state.  There's
4694cf4c5a53SSam Leffler 			 * probably a better way to deal with this.
4695cf4c5a53SSam Leffler 			 */
4696cf4c5a53SSam Leffler 			if (!sc->sc_invalid) {
46977a79cebfSGleb Smirnoff 				mwl_init(sc);	/* XXX lose error */
4698cf4c5a53SSam Leffler 				startall = 1;
4699cf4c5a53SSam Leffler 			}
47007a79cebfSGleb Smirnoff 		}
4701cf4c5a53SSam Leffler 	} else
47027a79cebfSGleb Smirnoff 		mwl_stop(sc);
4703cf4c5a53SSam Leffler 	MWL_UNLOCK(sc);
4704cf4c5a53SSam Leffler 	if (startall)
4705cf4c5a53SSam Leffler 		ieee80211_start_all(ic);
47067a79cebfSGleb Smirnoff }
47077a79cebfSGleb Smirnoff 
47087a79cebfSGleb Smirnoff static int
mwl_ioctl(struct ieee80211com * ic,u_long cmd,void * data)47097a79cebfSGleb Smirnoff mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
47107a79cebfSGleb Smirnoff {
47117a79cebfSGleb Smirnoff 	struct mwl_softc *sc = ic->ic_softc;
47127a79cebfSGleb Smirnoff 	struct ifreq *ifr = data;
47137a79cebfSGleb Smirnoff 	int error = 0;
47147a79cebfSGleb Smirnoff 
47157a79cebfSGleb Smirnoff 	switch (cmd) {
4716cf4c5a53SSam Leffler 	case SIOCGMVSTATS:
4717cf4c5a53SSam Leffler 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
47187a79cebfSGleb Smirnoff #if 0
4719cf4c5a53SSam Leffler 		/* NB: embed these numbers to get a consistent view */
47202c705cadSGleb Smirnoff 		sc->sc_stats.mst_tx_packets =
472113f2ef16SJustin Hibbits 		    if_get_counter(ifp, IFCOUNTER_OPACKETS);
47222c705cadSGleb Smirnoff 		sc->sc_stats.mst_rx_packets =
472313f2ef16SJustin Hibbits 		    if_get_counter(ifp, IFCOUNTER_IPACKETS);
47247a79cebfSGleb Smirnoff #endif
4725cf4c5a53SSam Leffler 		/*
4726cf4c5a53SSam Leffler 		 * NB: Drop the softc lock in case of a page fault;
4727cf4c5a53SSam Leffler 		 * we'll accept any potential inconsisentcy in the
4728cf4c5a53SSam Leffler 		 * statistics.  The alternative is to copy the data
4729cf4c5a53SSam Leffler 		 * to a local structure.
4730cf4c5a53SSam Leffler 		 */
4731541d96aaSBrooks Davis 		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4732541d96aaSBrooks Davis 		    sizeof (sc->sc_stats)));
4733cf4c5a53SSam Leffler #ifdef MWL_DIAGAPI
4734cf4c5a53SSam Leffler 	case SIOCGMVDIAG:
4735cf4c5a53SSam Leffler 		/* XXX check privs */
4736cf4c5a53SSam Leffler 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4737cf4c5a53SSam Leffler 	case SIOCGMVRESET:
4738cf4c5a53SSam Leffler 		/* XXX check privs */
4739cf4c5a53SSam Leffler 		MWL_LOCK(sc);
4740cf4c5a53SSam Leffler 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4741cf4c5a53SSam Leffler 		MWL_UNLOCK(sc);
4742cf4c5a53SSam Leffler 		break;
4743cf4c5a53SSam Leffler #endif /* MWL_DIAGAPI */
4744cf4c5a53SSam Leffler 	default:
47457a79cebfSGleb Smirnoff 		error = ENOTTY;
4746cf4c5a53SSam Leffler 		break;
4747cf4c5a53SSam Leffler 	}
47487a79cebfSGleb Smirnoff 	return (error);
4749cf4c5a53SSam Leffler }
4750cf4c5a53SSam Leffler 
4751cf4c5a53SSam Leffler #ifdef	MWL_DEBUG
4752cf4c5a53SSam Leffler static int
mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)4753cf4c5a53SSam Leffler mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4754cf4c5a53SSam Leffler {
4755cf4c5a53SSam Leffler 	struct mwl_softc *sc = arg1;
4756cf4c5a53SSam Leffler 	int debug, error;
4757cf4c5a53SSam Leffler 
4758cf4c5a53SSam Leffler 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4759cf4c5a53SSam Leffler 	error = sysctl_handle_int(oidp, &debug, 0, req);
4760cf4c5a53SSam Leffler 	if (error || !req->newptr)
4761cf4c5a53SSam Leffler 		return error;
4762cf4c5a53SSam Leffler 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4763cf4c5a53SSam Leffler 	sc->sc_debug = debug & 0x00ffffff;
4764cf4c5a53SSam Leffler 	return 0;
4765cf4c5a53SSam Leffler }
4766cf4c5a53SSam Leffler #endif /* MWL_DEBUG */
4767cf4c5a53SSam Leffler 
4768cf4c5a53SSam Leffler static void
mwl_sysctlattach(struct mwl_softc * sc)4769cf4c5a53SSam Leffler mwl_sysctlattach(struct mwl_softc *sc)
4770cf4c5a53SSam Leffler {
4771cf4c5a53SSam Leffler #ifdef	MWL_DEBUG
4772cf4c5a53SSam Leffler 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4773cf4c5a53SSam Leffler 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4774cf4c5a53SSam Leffler 
4775cf4c5a53SSam Leffler 	sc->sc_debug = mwl_debug;
47767029da5cSPawel Biernacki 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
47777029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
4778cf4c5a53SSam Leffler 	    mwl_sysctl_debug, "I", "control debugging printfs");
4779cf4c5a53SSam Leffler #endif
4780cf4c5a53SSam Leffler }
4781cf4c5a53SSam Leffler 
4782cf4c5a53SSam Leffler /*
4783cf4c5a53SSam Leffler  * Announce various information on device/driver attach.
4784cf4c5a53SSam Leffler  */
4785cf4c5a53SSam Leffler static void
mwl_announce(struct mwl_softc * sc)4786cf4c5a53SSam Leffler mwl_announce(struct mwl_softc *sc)
4787cf4c5a53SSam Leffler {
4788cf4c5a53SSam Leffler 
47897a79cebfSGleb Smirnoff 	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4790cf4c5a53SSam Leffler 		sc->sc_hwspecs.hwVersion,
4791cf4c5a53SSam Leffler 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4792cf4c5a53SSam Leffler 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4793cf4c5a53SSam Leffler 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4794cf4c5a53SSam Leffler 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4795cf4c5a53SSam Leffler 		sc->sc_hwspecs.regionCode);
4796cf4c5a53SSam Leffler 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4797cf4c5a53SSam Leffler 
4798cf4c5a53SSam Leffler 	if (bootverbose) {
4799cf4c5a53SSam Leffler 		int i;
4800cf4c5a53SSam Leffler 		for (i = 0; i <= WME_AC_VO; i++) {
4801cf4c5a53SSam Leffler 			struct mwl_txq *txq = sc->sc_ac2q[i];
48027a79cebfSGleb Smirnoff 			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4803cf4c5a53SSam Leffler 				txq->qnum, ieee80211_wme_acnames[i]);
4804cf4c5a53SSam Leffler 		}
4805cf4c5a53SSam Leffler 	}
4806cf4c5a53SSam Leffler 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
48077a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4808cf4c5a53SSam Leffler 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
48097a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4810cf4c5a53SSam Leffler 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
48117a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4812cf4c5a53SSam Leffler 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
48137a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "multi-bss support\n");
4814cf4c5a53SSam Leffler #ifdef MWL_TX_NODROP
4815cf4c5a53SSam Leffler 	if (bootverbose)
48167a79cebfSGleb Smirnoff 		device_printf(sc->sc_dev, "no tx drop\n");
4817cf4c5a53SSam Leffler #endif
4818cf4c5a53SSam Leffler }
4819