xref: /freebsd/sys/dev/mwl/if_mwl.c (revision 7067450010931479f8dd97e51e4c5bf6a4d34c7e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16  *    redistribution must be conditioned upon including a substantially
17  *    similar Disclaimer requirement for further binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGES.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * Driver for the Marvell 88W8363 Wireless LAN controller.
36  */
37 
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 #include "opt_wlan.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
54 #include <sys/bus.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
58 
59 #include <machine/bus.h>
60 
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
69 
70 #include <net/bpf.h>
71 
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_input.h>
74 #include <net80211/ieee80211_regdomain.h>
75 
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #endif /* INET */
80 
81 #include <dev/mwl/if_mwlvar.h>
82 #include <dev/mwl/mwldiag.h>
83 
84 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
85 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
86 		    const uint8_t [IEEE80211_ADDR_LEN],
87 		    const uint8_t [IEEE80211_ADDR_LEN]);
88 static void	mwl_vap_delete(struct ieee80211vap *);
89 static int	mwl_setupdma(struct mwl_softc *);
90 static int	mwl_hal_reset(struct mwl_softc *sc);
91 static int	mwl_init(struct mwl_softc *);
92 static void	mwl_parent(struct ieee80211com *);
93 static int	mwl_reset(struct ieee80211vap *, u_long);
94 static void	mwl_stop(struct mwl_softc *);
95 static void	mwl_start(struct mwl_softc *);
96 static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
97 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
98 			const struct ieee80211_bpf_params *);
99 static int	mwl_media_change(if_t);
100 static void	mwl_watchdog(void *);
101 static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
102 static void	mwl_radar_proc(void *, int);
103 static void	mwl_chanswitch_proc(void *, int);
104 static void	mwl_bawatchdog_proc(void *, int);
105 static int	mwl_key_alloc(struct ieee80211vap *,
106 			struct ieee80211_key *,
107 			ieee80211_keyix *, ieee80211_keyix *);
108 static int	mwl_key_delete(struct ieee80211vap *,
109 			const struct ieee80211_key *);
110 static int	mwl_key_set(struct ieee80211vap *,
111 			const struct ieee80211_key *);
112 static int	_mwl_key_set(struct ieee80211vap *,
113 			const struct ieee80211_key *,
114 			const uint8_t mac[IEEE80211_ADDR_LEN]);
115 static int	mwl_mode_init(struct mwl_softc *);
116 static void	mwl_update_mcast(struct ieee80211com *);
117 static void	mwl_update_promisc(struct ieee80211com *);
118 static void	mwl_updateslot(struct ieee80211com *);
119 static int	mwl_beacon_setup(struct ieee80211vap *);
120 static void	mwl_beacon_update(struct ieee80211vap *, int);
121 #ifdef MWL_HOST_PS_SUPPORT
122 static void	mwl_update_ps(struct ieee80211vap *, int);
123 static int	mwl_set_tim(struct ieee80211_node *, int);
124 #endif
125 static int	mwl_dma_setup(struct mwl_softc *);
126 static void	mwl_dma_cleanup(struct mwl_softc *);
127 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128 		    const uint8_t [IEEE80211_ADDR_LEN]);
129 static void	mwl_node_cleanup(struct ieee80211_node *);
130 static void	mwl_node_drain(struct ieee80211_node *);
131 static void	mwl_node_getsignal(const struct ieee80211_node *,
132 			int8_t *, int8_t *);
133 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
134 			struct ieee80211_mimo_info *);
135 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136 static void	mwl_rx_proc(void *, int);
137 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138 static int	mwl_tx_setup(struct mwl_softc *, int, int);
139 static int	mwl_wme_update(struct ieee80211com *);
140 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141 static void	mwl_tx_cleanup(struct mwl_softc *);
142 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144 			     struct mwl_txbuf *, struct mbuf *);
145 static void	mwl_tx_proc(void *, int);
146 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147 static void	mwl_draintxq(struct mwl_softc *);
148 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149 static int	mwl_recv_action(struct ieee80211_node *,
150 			const struct ieee80211_frame *,
151 			const uint8_t *, const uint8_t *);
152 static int	mwl_addba_request(struct ieee80211_node *,
153 			struct ieee80211_tx_ampdu *, int dialogtoken,
154 			int baparamset, int batimeout);
155 static int	mwl_addba_response(struct ieee80211_node *,
156 			struct ieee80211_tx_ampdu *, int status,
157 			int baparamset, int batimeout);
158 static void	mwl_addba_stop(struct ieee80211_node *,
159 			struct ieee80211_tx_ampdu *);
160 static int	mwl_startrecv(struct mwl_softc *);
161 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162 			struct ieee80211_channel *);
163 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164 static void	mwl_scan_start(struct ieee80211com *);
165 static void	mwl_scan_end(struct ieee80211com *);
166 static void	mwl_set_channel(struct ieee80211com *);
167 static int	mwl_peerstadb(struct ieee80211_node *,
168 			int aid, int staid, MWL_HAL_PEERINFO *pi);
169 static int	mwl_localstadb(struct ieee80211vap *);
170 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171 static int	allocstaid(struct mwl_softc *sc, int aid);
172 static void	delstaid(struct mwl_softc *sc, int staid);
173 static void	mwl_newassoc(struct ieee80211_node *, int);
174 static void	mwl_agestations(void *);
175 static int	mwl_setregdomain(struct ieee80211com *,
176 			struct ieee80211_regdomain *, int,
177 			struct ieee80211_channel []);
178 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
179 			struct ieee80211_channel []);
180 static int	mwl_getchannels(struct mwl_softc *);
181 
182 static void	mwl_sysctlattach(struct mwl_softc *);
183 static void	mwl_announce(struct mwl_softc *);
184 
185 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
186     "Marvell driver parameters");
187 
188 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
189 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
190 	    0, "rx descriptors allocated");
191 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
192 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
193 	    0, "rx buffers allocated");
194 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
195 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
196 	    0, "tx buffers allocated");
197 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
198 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
199 	    0, "tx buffers to send at once");
200 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
202 	    0, "max rx buffers to process per interrupt");
203 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
204 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
205 	    0, "min free rx buffers before restarting traffic");
206 
207 #ifdef MWL_DEBUG
208 static	int mwl_debug = 0;
209 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
210 	    0, "control debugging printfs");
211 enum {
212 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
213 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
214 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
215 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
216 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
217 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
218 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
219 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
220 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
221 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
222 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
223 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
224 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
225 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
226 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
227 	MWL_DEBUG_ANY		= 0xffffffff
228 };
229 #define	IFF_DUMPPKTS_RECV(sc, wh) \
230     ((sc->sc_debug & MWL_DEBUG_RECV) && \
231       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh)))
232 #define	IFF_DUMPPKTS_XMIT(sc) \
233 	(sc->sc_debug & MWL_DEBUG_XMIT)
234 
235 #define	DPRINTF(sc, m, fmt, ...) do {				\
236 	if (sc->sc_debug & (m))					\
237 		printf(fmt, __VA_ARGS__);			\
238 } while (0)
239 #define	KEYPRINTF(sc, hk, mac) do {				\
240 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
241 		mwl_keyprint(sc, __func__, hk, mac);		\
242 } while (0)
243 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
244 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
245 #else
246 #define	IFF_DUMPPKTS_RECV(sc, wh)	0
247 #define	IFF_DUMPPKTS_XMIT(sc)		0
248 #define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
249 #define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
250 #endif
251 
252 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
253 
254 /*
255  * Each packet has fixed front matter: a 2-byte length
256  * of the payload, followed by a 4-address 802.11 header
257  * (regardless of the actual header and always w/o any
258  * QoS header).  The payload then follows.
259  */
260 struct mwltxrec {
261 	uint16_t fwlen;
262 	struct ieee80211_frame_addr4 wh;
263 } __packed;
264 
265 /*
266  * Read/Write shorthands for accesses to BAR 0.  Note
267  * that all BAR 1 operations are done in the "hal" and
268  * there should be no reference to them here.
269  */
270 #ifdef MWL_DEBUG
271 static __inline uint32_t
RD4(struct mwl_softc * sc,bus_size_t off)272 RD4(struct mwl_softc *sc, bus_size_t off)
273 {
274 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
275 }
276 #endif
277 
278 static __inline void
WR4(struct mwl_softc * sc,bus_size_t off,uint32_t val)279 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
280 {
281 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
282 }
283 
284 int
mwl_attach(uint16_t devid,struct mwl_softc * sc)285 mwl_attach(uint16_t devid, struct mwl_softc *sc)
286 {
287 	struct ieee80211com *ic = &sc->sc_ic;
288 	struct mwl_hal *mh;
289 	int error = 0;
290 
291 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
292 
293 	/*
294 	 * Setup the RX free list lock early, so it can be consistently
295 	 * removed.
296 	 */
297 	MWL_RXFREE_INIT(sc);
298 
299 	mh = mwl_hal_attach(sc->sc_dev, devid,
300 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
301 	if (mh == NULL) {
302 		device_printf(sc->sc_dev, "unable to attach HAL\n");
303 		error = EIO;
304 		goto bad;
305 	}
306 	sc->sc_mh = mh;
307 	/*
308 	 * Load firmware so we can get setup.  We arbitrarily
309 	 * pick station firmware; we'll re-load firmware as
310 	 * needed so setting up the wrong mode isn't a big deal.
311 	 */
312 	if (mwl_hal_fwload(mh, NULL) != 0) {
313 		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
314 		error = EIO;
315 		goto bad1;
316 	}
317 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
318 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
319 		error = EIO;
320 		goto bad1;
321 	}
322 	error = mwl_getchannels(sc);
323 	if (error != 0)
324 		goto bad1;
325 
326 	sc->sc_txantenna = 0;		/* h/w default */
327 	sc->sc_rxantenna = 0;		/* h/w default */
328 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
329 	sc->sc_ageinterval = MWL_AGEINTERVAL;
330 
331 	/*
332 	 * Allocate tx+rx descriptors and populate the lists.
333 	 * We immediately push the information to the firmware
334 	 * as otherwise it gets upset.
335 	 */
336 	error = mwl_dma_setup(sc);
337 	if (error != 0) {
338 		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
339 		    error);
340 		goto bad1;
341 	}
342 	error = mwl_setupdma(sc);	/* push to firmware */
343 	if (error != 0)			/* NB: mwl_setupdma prints msg */
344 		goto bad1;
345 
346 	callout_init(&sc->sc_timer, 1);
347 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
348 	mbufq_init(&sc->sc_snd, ifqmaxlen);
349 
350 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
351 		taskqueue_thread_enqueue, &sc->sc_tq);
352 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
353 		"%s taskq", device_get_nameunit(sc->sc_dev));
354 
355 	NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
356 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
357 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
358 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
359 
360 	/* NB: insure BK queue is the lowest priority h/w queue */
361 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
362 		device_printf(sc->sc_dev,
363 		    "unable to setup xmit queue for %s traffic!\n",
364 		     ieee80211_wme_acnames[WME_AC_BK]);
365 		error = EIO;
366 		goto bad2;
367 	}
368 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
369 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
370 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
371 		/*
372 		 * Not enough hardware tx queues to properly do WME;
373 		 * just punt and assign them all to the same h/w queue.
374 		 * We could do a better job of this if, for example,
375 		 * we allocate queues when we switch from station to
376 		 * AP mode.
377 		 */
378 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
379 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
380 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
381 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
382 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
383 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
384 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
385 	}
386 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
387 
388 	ic->ic_softc = sc;
389 	ic->ic_name = device_get_nameunit(sc->sc_dev);
390 	/* XXX not right but it's not used anywhere important */
391 	ic->ic_phytype = IEEE80211_T_OFDM;
392 	ic->ic_opmode = IEEE80211_M_STA;
393 	ic->ic_caps =
394 		  IEEE80211_C_STA		/* station mode supported */
395 		| IEEE80211_C_HOSTAP		/* hostap mode */
396 		| IEEE80211_C_MONITOR		/* monitor mode */
397 #if 0
398 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
399 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
400 #endif
401 		| IEEE80211_C_MBSS		/* mesh point link mode */
402 		| IEEE80211_C_WDS		/* WDS supported */
403 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
404 		| IEEE80211_C_SHSLOT		/* short slot time supported */
405 		| IEEE80211_C_WME		/* WME/WMM supported */
406 		| IEEE80211_C_BURST		/* xmit bursting supported */
407 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
408 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
409 		| IEEE80211_C_TXFRAG		/* handle tx frags */
410 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
411 		| IEEE80211_C_DFS		/* DFS supported */
412 		;
413 
414 	ic->ic_htcaps =
415 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
416 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
417 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
418 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
419 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
420 #if MWL_AGGR_SIZE == 7935
421 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
422 #else
423 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
424 #endif
425 #if 0
426 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
427 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
428 #endif
429 		/* s/w capabilities */
430 		| IEEE80211_HTC_HT		/* HT operation */
431 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
432 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
433 		| IEEE80211_HTC_SMPS		/* SMPS available */
434 		;
435 
436 	/*
437 	 * Mark h/w crypto support.
438 	 * XXX no way to query h/w support.
439 	 */
440 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
441 			  |  IEEE80211_CRYPTO_AES_CCM
442 			  |  IEEE80211_CRYPTO_TKIP
443 			  |  IEEE80211_CRYPTO_TKIPMIC
444 			  ;
445 	/*
446 	 * Transmit requires space in the packet for a special
447 	 * format transmit record and optional padding between
448 	 * this record and the payload.  Ask the net80211 layer
449 	 * to arrange this when encapsulating packets so we can
450 	 * add it efficiently.
451 	 */
452 	ic->ic_headroom = sizeof(struct mwltxrec) -
453 		sizeof(struct ieee80211_frame);
454 
455 	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
456 
457 	/* call MI attach routine. */
458 	ieee80211_ifattach(ic);
459 	ic->ic_setregdomain = mwl_setregdomain;
460 	ic->ic_getradiocaps = mwl_getradiocaps;
461 	/* override default methods */
462 	ic->ic_raw_xmit = mwl_raw_xmit;
463 	ic->ic_newassoc = mwl_newassoc;
464 	ic->ic_updateslot = mwl_updateslot;
465 	ic->ic_update_mcast = mwl_update_mcast;
466 	ic->ic_update_promisc = mwl_update_promisc;
467 	ic->ic_wme.wme_update = mwl_wme_update;
468 	ic->ic_transmit = mwl_transmit;
469 	ic->ic_ioctl = mwl_ioctl;
470 	ic->ic_parent = mwl_parent;
471 
472 	ic->ic_node_alloc = mwl_node_alloc;
473 	sc->sc_node_cleanup = ic->ic_node_cleanup;
474 	ic->ic_node_cleanup = mwl_node_cleanup;
475 	sc->sc_node_drain = ic->ic_node_drain;
476 	ic->ic_node_drain = mwl_node_drain;
477 	ic->ic_node_getsignal = mwl_node_getsignal;
478 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
479 
480 	ic->ic_scan_start = mwl_scan_start;
481 	ic->ic_scan_end = mwl_scan_end;
482 	ic->ic_set_channel = mwl_set_channel;
483 
484 	sc->sc_recv_action = ic->ic_recv_action;
485 	ic->ic_recv_action = mwl_recv_action;
486 	sc->sc_addba_request = ic->ic_addba_request;
487 	ic->ic_addba_request = mwl_addba_request;
488 	sc->sc_addba_response = ic->ic_addba_response;
489 	ic->ic_addba_response = mwl_addba_response;
490 	sc->sc_addba_stop = ic->ic_addba_stop;
491 	ic->ic_addba_stop = mwl_addba_stop;
492 
493 	ic->ic_vap_create = mwl_vap_create;
494 	ic->ic_vap_delete = mwl_vap_delete;
495 
496 	ieee80211_radiotap_attach(ic,
497 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
498 		MWL_TX_RADIOTAP_PRESENT,
499 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
500 		MWL_RX_RADIOTAP_PRESENT);
501 	/*
502 	 * Setup dynamic sysctl's now that country code and
503 	 * regdomain are available from the hal.
504 	 */
505 	mwl_sysctlattach(sc);
506 
507 	if (bootverbose)
508 		ieee80211_announce(ic);
509 	mwl_announce(sc);
510 	return 0;
511 bad2:
512 	mwl_dma_cleanup(sc);
513 bad1:
514 	mwl_hal_detach(mh);
515 bad:
516 	MWL_RXFREE_DESTROY(sc);
517 	sc->sc_invalid = 1;
518 	return error;
519 }
520 
521 int
mwl_detach(struct mwl_softc * sc)522 mwl_detach(struct mwl_softc *sc)
523 {
524 	struct ieee80211com *ic = &sc->sc_ic;
525 
526 	MWL_LOCK(sc);
527 	mwl_stop(sc);
528 	MWL_UNLOCK(sc);
529 	/*
530 	 * NB: the order of these is important:
531 	 * o call the 802.11 layer before detaching the hal to
532 	 *   insure callbacks into the driver to delete global
533 	 *   key cache entries can be handled
534 	 * o reclaim the tx queue data structures after calling
535 	 *   the 802.11 layer as we'll get called back to reclaim
536 	 *   node state and potentially want to use them
537 	 * o to cleanup the tx queues the hal is called, so detach
538 	 *   it last
539 	 * Other than that, it's straightforward...
540 	 */
541 	ieee80211_ifdetach(ic);
542 	callout_drain(&sc->sc_watchdog);
543 	mwl_dma_cleanup(sc);
544 	MWL_RXFREE_DESTROY(sc);
545 	mwl_tx_cleanup(sc);
546 	mwl_hal_detach(sc->sc_mh);
547 	mbufq_drain(&sc->sc_snd);
548 
549 	return 0;
550 }
551 
552 /*
553  * MAC address handling for multiple BSS on the same radio.
554  * The first vap uses the MAC address from the EEPROM.  For
555  * subsequent vap's we set the U/L bit (bit 1) in the MAC
556  * address and use the next six bits as an index.
557  */
558 static void
assign_address(struct mwl_softc * sc,uint8_t mac[IEEE80211_ADDR_LEN],int clone)559 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
560 {
561 	int i;
562 
563 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
564 		/* NB: we only do this if h/w supports multiple bssid */
565 		for (i = 0; i < 32; i++)
566 			if ((sc->sc_bssidmask & (1<<i)) == 0)
567 				break;
568 		if (i != 0)
569 			mac[0] |= (i << 2)|0x2;
570 	} else
571 		i = 0;
572 	sc->sc_bssidmask |= 1<<i;
573 	if (i == 0)
574 		sc->sc_nbssid0++;
575 }
576 
577 static void
reclaim_address(struct mwl_softc * sc,const uint8_t mac[IEEE80211_ADDR_LEN])578 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
579 {
580 	int i = mac[0] >> 2;
581 	if (i != 0 || --sc->sc_nbssid0 == 0)
582 		sc->sc_bssidmask &= ~(1<<i);
583 }
584 
585 static struct ieee80211vap *
mwl_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac0[IEEE80211_ADDR_LEN])586 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
587     enum ieee80211_opmode opmode, int flags,
588     const uint8_t bssid[IEEE80211_ADDR_LEN],
589     const uint8_t mac0[IEEE80211_ADDR_LEN])
590 {
591 	struct mwl_softc *sc = ic->ic_softc;
592 	struct mwl_hal *mh = sc->sc_mh;
593 	struct ieee80211vap *vap, *apvap;
594 	struct mwl_hal_vap *hvap;
595 	struct mwl_vap *mvp;
596 	uint8_t mac[IEEE80211_ADDR_LEN];
597 
598 	IEEE80211_ADDR_COPY(mac, mac0);
599 	switch (opmode) {
600 	case IEEE80211_M_HOSTAP:
601 	case IEEE80211_M_MBSS:
602 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
603 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
604 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
605 		if (hvap == NULL) {
606 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
607 				reclaim_address(sc, mac);
608 			return NULL;
609 		}
610 		break;
611 	case IEEE80211_M_STA:
612 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
613 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
614 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
615 		if (hvap == NULL) {
616 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
617 				reclaim_address(sc, mac);
618 			return NULL;
619 		}
620 		/* no h/w beacon miss support; always use s/w */
621 		flags |= IEEE80211_CLONE_NOBEACONS;
622 		break;
623 	case IEEE80211_M_WDS:
624 		hvap = NULL;		/* NB: we use associated AP vap */
625 		if (sc->sc_napvaps == 0)
626 			return NULL;	/* no existing AP vap */
627 		break;
628 	case IEEE80211_M_MONITOR:
629 		hvap = NULL;
630 		break;
631 	case IEEE80211_M_IBSS:
632 	case IEEE80211_M_AHDEMO:
633 	default:
634 		return NULL;
635 	}
636 
637 	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
638 	mvp->mv_hvap = hvap;
639 	if (opmode == IEEE80211_M_WDS) {
640 		/*
641 		 * WDS vaps must have an associated AP vap; find one.
642 		 * XXX not right.
643 		 */
644 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
645 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
646 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
647 				break;
648 			}
649 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
650 	}
651 	vap = &mvp->mv_vap;
652 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
653 	/* override with driver methods */
654 	mvp->mv_newstate = vap->iv_newstate;
655 	vap->iv_newstate = mwl_newstate;
656 	vap->iv_max_keyix = 0;	/* XXX */
657 	vap->iv_key_alloc = mwl_key_alloc;
658 	vap->iv_key_delete = mwl_key_delete;
659 	vap->iv_key_set = mwl_key_set;
660 #ifdef MWL_HOST_PS_SUPPORT
661 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
662 		vap->iv_update_ps = mwl_update_ps;
663 		mvp->mv_set_tim = vap->iv_set_tim;
664 		vap->iv_set_tim = mwl_set_tim;
665 	}
666 #endif
667 	vap->iv_reset = mwl_reset;
668 	vap->iv_update_beacon = mwl_beacon_update;
669 
670 	/* override max aid so sta's cannot assoc when we're out of sta id's */
671 	vap->iv_max_aid = MWL_MAXSTAID;
672 	/* override default A-MPDU rx parameters */
673 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
674 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
675 
676 	/* complete setup */
677 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
678 	    mac);
679 
680 	switch (vap->iv_opmode) {
681 	case IEEE80211_M_HOSTAP:
682 	case IEEE80211_M_MBSS:
683 	case IEEE80211_M_STA:
684 		/*
685 		 * Setup sta db entry for local address.
686 		 */
687 		mwl_localstadb(vap);
688 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
689 		    vap->iv_opmode == IEEE80211_M_MBSS)
690 			sc->sc_napvaps++;
691 		else
692 			sc->sc_nstavaps++;
693 		break;
694 	case IEEE80211_M_WDS:
695 		sc->sc_nwdsvaps++;
696 		break;
697 	default:
698 		break;
699 	}
700 	/*
701 	 * Setup overall operating mode.
702 	 */
703 	if (sc->sc_napvaps)
704 		ic->ic_opmode = IEEE80211_M_HOSTAP;
705 	else if (sc->sc_nstavaps)
706 		ic->ic_opmode = IEEE80211_M_STA;
707 	else
708 		ic->ic_opmode = opmode;
709 
710 	return vap;
711 }
712 
713 static void
mwl_vap_delete(struct ieee80211vap * vap)714 mwl_vap_delete(struct ieee80211vap *vap)
715 {
716 	struct mwl_vap *mvp = MWL_VAP(vap);
717 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
718 	struct mwl_hal *mh = sc->sc_mh;
719 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
720 	enum ieee80211_opmode opmode = vap->iv_opmode;
721 
722 	/* XXX disallow ap vap delete if WDS still present */
723 	if (sc->sc_running) {
724 		/* quiesce h/w while we remove the vap */
725 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
726 	}
727 	ieee80211_vap_detach(vap);
728 	switch (opmode) {
729 	case IEEE80211_M_HOSTAP:
730 	case IEEE80211_M_MBSS:
731 	case IEEE80211_M_STA:
732 		KASSERT(hvap != NULL, ("no hal vap handle"));
733 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
734 		mwl_hal_delvap(hvap);
735 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
736 			sc->sc_napvaps--;
737 		else
738 			sc->sc_nstavaps--;
739 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
740 		reclaim_address(sc, vap->iv_myaddr);
741 		break;
742 	case IEEE80211_M_WDS:
743 		sc->sc_nwdsvaps--;
744 		break;
745 	default:
746 		break;
747 	}
748 	mwl_cleartxq(sc, vap);
749 	free(mvp, M_80211_VAP);
750 	if (sc->sc_running)
751 		mwl_hal_intrset(mh, sc->sc_imask);
752 }
753 
754 void
mwl_suspend(struct mwl_softc * sc)755 mwl_suspend(struct mwl_softc *sc)
756 {
757 
758 	MWL_LOCK(sc);
759 	mwl_stop(sc);
760 	MWL_UNLOCK(sc);
761 }
762 
763 void
mwl_resume(struct mwl_softc * sc)764 mwl_resume(struct mwl_softc *sc)
765 {
766 	int error = EDOOFUS;
767 
768 	MWL_LOCK(sc);
769 	if (sc->sc_ic.ic_nrunning > 0)
770 		error = mwl_init(sc);
771 	MWL_UNLOCK(sc);
772 
773 	if (error == 0)
774 		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
775 }
776 
777 void
mwl_shutdown(void * arg)778 mwl_shutdown(void *arg)
779 {
780 	struct mwl_softc *sc = arg;
781 
782 	MWL_LOCK(sc);
783 	mwl_stop(sc);
784 	MWL_UNLOCK(sc);
785 }
786 
787 /*
788  * Interrupt handler.  Most of the actual processing is deferred.
789  */
790 void
mwl_intr(void * arg)791 mwl_intr(void *arg)
792 {
793 	struct mwl_softc *sc = arg;
794 	struct mwl_hal *mh = sc->sc_mh;
795 	uint32_t status;
796 
797 	if (sc->sc_invalid) {
798 		/*
799 		 * The hardware is not ready/present, don't touch anything.
800 		 * Note this can happen early on if the IRQ is shared.
801 		 */
802 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
803 		return;
804 	}
805 	/*
806 	 * Figure out the reason(s) for the interrupt.
807 	 */
808 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
809 	if (status == 0)			/* must be a shared irq */
810 		return;
811 
812 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
813 	    __func__, status, sc->sc_imask);
814 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
815 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
816 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
817 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
818 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
819 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
820 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
821 		mwl_hal_cmddone(mh);
822 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
823 		;
824 	}
825 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
826 		/* TKIP ICV error */
827 		sc->sc_stats.mst_rx_badtkipicv++;
828 	}
829 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
830 		/* 11n aggregation queue is empty, re-fill */
831 		;
832 	}
833 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
834 		;
835 	}
836 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
837 		/* radar detected, process event */
838 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
839 	}
840 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
841 		/* DFS channel switch */
842 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
843 	}
844 }
845 
846 static void
mwl_radar_proc(void * arg,int pending)847 mwl_radar_proc(void *arg, int pending)
848 {
849 	struct mwl_softc *sc = arg;
850 	struct ieee80211com *ic = &sc->sc_ic;
851 
852 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
853 	    __func__, pending);
854 
855 	sc->sc_stats.mst_radardetect++;
856 	/* XXX stop h/w BA streams? */
857 
858 	IEEE80211_LOCK(ic);
859 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
860 	IEEE80211_UNLOCK(ic);
861 }
862 
863 static void
mwl_chanswitch_proc(void * arg,int pending)864 mwl_chanswitch_proc(void *arg, int pending)
865 {
866 	struct mwl_softc *sc = arg;
867 	struct ieee80211com *ic = &sc->sc_ic;
868 
869 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
870 	    __func__, pending);
871 
872 	IEEE80211_LOCK(ic);
873 	sc->sc_csapending = 0;
874 	ieee80211_csa_completeswitch(ic);
875 	IEEE80211_UNLOCK(ic);
876 }
877 
878 static void
mwl_bawatchdog(const MWL_HAL_BASTREAM * sp)879 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
880 {
881 	struct ieee80211_node *ni = sp->data[0];
882 
883 	/* send DELBA and drop the stream */
884 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
885 }
886 
887 static void
mwl_bawatchdog_proc(void * arg,int pending)888 mwl_bawatchdog_proc(void *arg, int pending)
889 {
890 	struct mwl_softc *sc = arg;
891 	struct mwl_hal *mh = sc->sc_mh;
892 	const MWL_HAL_BASTREAM *sp;
893 	uint8_t bitmap, n;
894 
895 	sc->sc_stats.mst_bawatchdog++;
896 
897 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
898 		DPRINTF(sc, MWL_DEBUG_AMPDU,
899 		    "%s: could not get bitmap\n", __func__);
900 		sc->sc_stats.mst_bawatchdog_failed++;
901 		return;
902 	}
903 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
904 	if (bitmap == 0xff) {
905 		n = 0;
906 		/* disable all ba streams */
907 		for (bitmap = 0; bitmap < 8; bitmap++) {
908 			sp = mwl_hal_bastream_lookup(mh, bitmap);
909 			if (sp != NULL) {
910 				mwl_bawatchdog(sp);
911 				n++;
912 			}
913 		}
914 		if (n == 0) {
915 			DPRINTF(sc, MWL_DEBUG_AMPDU,
916 			    "%s: no BA streams found\n", __func__);
917 			sc->sc_stats.mst_bawatchdog_empty++;
918 		}
919 	} else if (bitmap != 0xaa) {
920 		/* disable a single ba stream */
921 		sp = mwl_hal_bastream_lookup(mh, bitmap);
922 		if (sp != NULL) {
923 			mwl_bawatchdog(sp);
924 		} else {
925 			DPRINTF(sc, MWL_DEBUG_AMPDU,
926 			    "%s: no BA stream %d\n", __func__, bitmap);
927 			sc->sc_stats.mst_bawatchdog_notfound++;
928 		}
929 	}
930 }
931 
932 /*
933  * Convert net80211 channel to a HAL channel.
934  */
935 static void
mwl_mapchan(MWL_HAL_CHANNEL * hc,const struct ieee80211_channel * chan)936 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
937 {
938 	hc->channel = chan->ic_ieee;
939 
940 	*(uint32_t *)&hc->channelFlags = 0;
941 	if (IEEE80211_IS_CHAN_2GHZ(chan))
942 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
943 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
944 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
945 	if (IEEE80211_IS_CHAN_HT40(chan)) {
946 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
947 		if (IEEE80211_IS_CHAN_HT40U(chan))
948 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
949 		else
950 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
951 	} else
952 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
953 	/* XXX 10MHz channels */
954 }
955 
956 /*
957  * Inform firmware of our tx/rx dma setup.  The BAR 0
958  * writes below are for compatibility with older firmware.
959  * For current firmware we send this information with a
960  * cmd block via mwl_hal_sethwdma.
961  */
962 static int
mwl_setupdma(struct mwl_softc * sc)963 mwl_setupdma(struct mwl_softc *sc)
964 {
965 	int error, i;
966 
967 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
968 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
969 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
970 
971 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
972 		struct mwl_txq *txq = &sc->sc_txq[i];
973 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
974 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
975 	}
976 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
977 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
978 
979 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
980 	if (error != 0) {
981 		device_printf(sc->sc_dev,
982 		    "unable to setup tx/rx dma; hal status %u\n", error);
983 		/* XXX */
984 	}
985 	return error;
986 }
987 
988 /*
989  * Inform firmware of tx rate parameters.
990  * Called after a channel change.
991  */
992 static int
mwl_setcurchanrates(struct mwl_softc * sc)993 mwl_setcurchanrates(struct mwl_softc *sc)
994 {
995 	struct ieee80211com *ic = &sc->sc_ic;
996 	const struct ieee80211_rateset *rs;
997 	MWL_HAL_TXRATE rates;
998 
999 	memset(&rates, 0, sizeof(rates));
1000 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1001 	/* rate used to send management frames */
1002 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1003 	/* rate used to send multicast frames */
1004 	rates.McastRate = rates.MgtRate;
1005 
1006 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1007 }
1008 
1009 /*
1010  * Inform firmware of tx rate parameters.  Called whenever
1011  * user-settable params change and after a channel change.
1012  */
1013 static int
mwl_setrates(struct ieee80211vap * vap)1014 mwl_setrates(struct ieee80211vap *vap)
1015 {
1016 	struct mwl_vap *mvp = MWL_VAP(vap);
1017 	struct ieee80211_node *ni = vap->iv_bss;
1018 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1019 	MWL_HAL_TXRATE rates;
1020 
1021 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1022 
1023 	/*
1024 	 * Update the h/w rate map.
1025 	 * NB: 0x80 for MCS is passed through unchanged
1026 	 */
1027 	memset(&rates, 0, sizeof(rates));
1028 	/* rate used to send management frames */
1029 	rates.MgtRate = tp->mgmtrate;
1030 	/* rate used to send multicast frames */
1031 	rates.McastRate = tp->mcastrate;
1032 
1033 	/* while here calculate EAPOL fixed rate cookie */
1034 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1035 
1036 	return mwl_hal_settxrate(mvp->mv_hvap,
1037 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1038 		RATE_FIXED : RATE_AUTO, &rates);
1039 }
1040 
1041 /*
1042  * Setup a fixed xmit rate cookie for EAPOL frames.
1043  */
1044 static void
mwl_seteapolformat(struct ieee80211vap * vap)1045 mwl_seteapolformat(struct ieee80211vap *vap)
1046 {
1047 	struct mwl_vap *mvp = MWL_VAP(vap);
1048 	struct ieee80211_node *ni = vap->iv_bss;
1049 	enum ieee80211_phymode mode;
1050 	uint8_t rate;
1051 
1052 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1053 
1054 	mode = ieee80211_chan2mode(ni->ni_chan);
1055 	/*
1056 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1057 	 * NB: this may violate POLA for sta and wds vap's.
1058 	 */
1059 	if (mode == IEEE80211_MODE_11NA &&
1060 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1061 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1062 	else if (mode == IEEE80211_MODE_11NG &&
1063 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1064 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1065 	else
1066 		rate = vap->iv_txparms[mode].mgmtrate;
1067 
1068 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1069 }
1070 
1071 /*
1072  * Map SKU+country code to region code for radar bin'ing.
1073  */
1074 static int
mwl_map2regioncode(const struct ieee80211_regdomain * rd)1075 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1076 {
1077 	switch (rd->regdomain) {
1078 	case SKU_FCC:
1079 	case SKU_FCC3:
1080 		return DOMAIN_CODE_FCC;
1081 	case SKU_CA:
1082 		return DOMAIN_CODE_IC;
1083 	case SKU_ETSI:
1084 	case SKU_ETSI2:
1085 	case SKU_ETSI3:
1086 		if (rd->country == CTRY_SPAIN)
1087 			return DOMAIN_CODE_SPAIN;
1088 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1089 			return DOMAIN_CODE_FRANCE;
1090 		/* XXX force 1.3.1 radar type */
1091 		return DOMAIN_CODE_ETSI_131;
1092 	case SKU_JAPAN:
1093 		return DOMAIN_CODE_MKK;
1094 	case SKU_ROW:
1095 		return DOMAIN_CODE_DGT;	/* Taiwan */
1096 	case SKU_APAC:
1097 	case SKU_APAC2:
1098 	case SKU_APAC3:
1099 		return DOMAIN_CODE_AUS;	/* Australia */
1100 	}
1101 	/* XXX KOREA? */
1102 	return DOMAIN_CODE_FCC;			/* XXX? */
1103 }
1104 
1105 static int
mwl_hal_reset(struct mwl_softc * sc)1106 mwl_hal_reset(struct mwl_softc *sc)
1107 {
1108 	struct ieee80211com *ic = &sc->sc_ic;
1109 	struct mwl_hal *mh = sc->sc_mh;
1110 
1111 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1112 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1113 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1114 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1115 	mwl_chan_set(sc, ic->ic_curchan);
1116 	/* NB: RF/RA performance tuned for indoor mode */
1117 	mwl_hal_setrateadaptmode(mh, 0);
1118 	mwl_hal_setoptimizationlevel(mh,
1119 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1120 
1121 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1122 
1123 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1124 	mwl_hal_setcfend(mh, 0);			/* XXX */
1125 
1126 	return 1;
1127 }
1128 
1129 static int
mwl_init(struct mwl_softc * sc)1130 mwl_init(struct mwl_softc *sc)
1131 {
1132 	struct mwl_hal *mh = sc->sc_mh;
1133 	int error = 0;
1134 
1135 	MWL_LOCK_ASSERT(sc);
1136 
1137 	/*
1138 	 * Stop anything previously setup.  This is safe
1139 	 * whether this is the first time through or not.
1140 	 */
1141 	mwl_stop(sc);
1142 
1143 	/*
1144 	 * Push vap-independent state to the firmware.
1145 	 */
1146 	if (!mwl_hal_reset(sc)) {
1147 		device_printf(sc->sc_dev, "unable to reset hardware\n");
1148 		return EIO;
1149 	}
1150 
1151 	/*
1152 	 * Setup recv (once); transmit is already good to go.
1153 	 */
1154 	error = mwl_startrecv(sc);
1155 	if (error != 0) {
1156 		device_printf(sc->sc_dev, "unable to start recv logic\n");
1157 		return error;
1158 	}
1159 
1160 	/*
1161 	 * Enable interrupts.
1162 	 */
1163 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1164 		     | MACREG_A2HRIC_BIT_TX_DONE
1165 		     | MACREG_A2HRIC_BIT_OPC_DONE
1166 #if 0
1167 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1168 #endif
1169 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1170 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1171 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1172 #if 0
1173 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1174 #endif
1175 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1176 		     | MACREQ_A2HRIC_BIT_TX_ACK
1177 		     ;
1178 
1179 	sc->sc_running = 1;
1180 	mwl_hal_intrset(mh, sc->sc_imask);
1181 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1182 
1183 	return 0;
1184 }
1185 
1186 static void
mwl_stop(struct mwl_softc * sc)1187 mwl_stop(struct mwl_softc *sc)
1188 {
1189 
1190 	MWL_LOCK_ASSERT(sc);
1191 	if (sc->sc_running) {
1192 		/*
1193 		 * Shutdown the hardware and driver.
1194 		 */
1195 		sc->sc_running = 0;
1196 		callout_stop(&sc->sc_watchdog);
1197 		sc->sc_tx_timer = 0;
1198 		mwl_draintxq(sc);
1199 	}
1200 }
1201 
1202 static int
mwl_reset_vap(struct ieee80211vap * vap,int state)1203 mwl_reset_vap(struct ieee80211vap *vap, int state)
1204 {
1205 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1206 	struct ieee80211com *ic = vap->iv_ic;
1207 
1208 	if (state == IEEE80211_S_RUN)
1209 		mwl_setrates(vap);
1210 	/* XXX off by 1? */
1211 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1212 	/* XXX auto? 20/40 split? */
1213 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1214 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1215 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1216 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1217 	/* XXX txpower cap */
1218 
1219 	/* re-setup beacons */
1220 	if (state == IEEE80211_S_RUN &&
1221 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1222 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1223 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1224 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1225 		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1226 		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1227 		return mwl_beacon_setup(vap);
1228 	}
1229 	return 0;
1230 }
1231 
1232 /*
1233  * Reset the hardware w/o losing operational state.
1234  * Used to reset or reload hardware state for a vap.
1235  */
1236 static int
mwl_reset(struct ieee80211vap * vap,u_long cmd)1237 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1238 {
1239 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1240 	int error = 0;
1241 
1242 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1243 		struct ieee80211com *ic = vap->iv_ic;
1244 		struct mwl_softc *sc = ic->ic_softc;
1245 		struct mwl_hal *mh = sc->sc_mh;
1246 
1247 		/* XXX handle DWDS sta vap change */
1248 		/* XXX do we need to disable interrupts? */
1249 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1250 		error = mwl_reset_vap(vap, vap->iv_state);
1251 		mwl_hal_intrset(mh, sc->sc_imask);
1252 	}
1253 	return error;
1254 }
1255 
1256 /*
1257  * Allocate a tx buffer for sending a frame.  The
1258  * packet is assumed to have the WME AC stored so
1259  * we can use it to select the appropriate h/w queue.
1260  */
1261 static struct mwl_txbuf *
mwl_gettxbuf(struct mwl_softc * sc,struct mwl_txq * txq)1262 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1263 {
1264 	struct mwl_txbuf *bf;
1265 
1266 	/*
1267 	 * Grab a TX buffer and associated resources.
1268 	 */
1269 	MWL_TXQ_LOCK(txq);
1270 	bf = STAILQ_FIRST(&txq->free);
1271 	if (bf != NULL) {
1272 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1273 		txq->nfree--;
1274 	}
1275 	MWL_TXQ_UNLOCK(txq);
1276 	if (bf == NULL)
1277 		DPRINTF(sc, MWL_DEBUG_XMIT,
1278 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1279 	return bf;
1280 }
1281 
1282 /*
1283  * Return a tx buffer to the queue it came from.  Note there
1284  * are two cases because we must preserve the order of buffers
1285  * as it reflects the fixed order of descriptors in memory
1286  * (the firmware pre-fetches descriptors so we cannot reorder).
1287  */
1288 static void
mwl_puttxbuf_head(struct mwl_txq * txq,struct mwl_txbuf * bf)1289 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1290 {
1291 	bf->bf_m = NULL;
1292 	bf->bf_node = NULL;
1293 	MWL_TXQ_LOCK(txq);
1294 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1295 	txq->nfree++;
1296 	MWL_TXQ_UNLOCK(txq);
1297 }
1298 
1299 static void
mwl_puttxbuf_tail(struct mwl_txq * txq,struct mwl_txbuf * bf)1300 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1301 {
1302 	bf->bf_m = NULL;
1303 	bf->bf_node = NULL;
1304 	MWL_TXQ_LOCK(txq);
1305 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1306 	txq->nfree++;
1307 	MWL_TXQ_UNLOCK(txq);
1308 }
1309 
1310 static int
mwl_transmit(struct ieee80211com * ic,struct mbuf * m)1311 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1312 {
1313 	struct mwl_softc *sc = ic->ic_softc;
1314 	int error;
1315 
1316 	MWL_LOCK(sc);
1317 	if (!sc->sc_running) {
1318 		MWL_UNLOCK(sc);
1319 		return (ENXIO);
1320 	}
1321 	error = mbufq_enqueue(&sc->sc_snd, m);
1322 	if (error) {
1323 		MWL_UNLOCK(sc);
1324 		return (error);
1325 	}
1326 	mwl_start(sc);
1327 	MWL_UNLOCK(sc);
1328 	return (0);
1329 }
1330 
1331 static void
mwl_start(struct mwl_softc * sc)1332 mwl_start(struct mwl_softc *sc)
1333 {
1334 	struct ieee80211_node *ni;
1335 	struct mwl_txbuf *bf;
1336 	struct mbuf *m;
1337 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1338 	int nqueued;
1339 
1340 	MWL_LOCK_ASSERT(sc);
1341 	if (!sc->sc_running || sc->sc_invalid)
1342 		return;
1343 	nqueued = 0;
1344 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1345 		/*
1346 		 * Grab the node for the destination.
1347 		 */
1348 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1349 		KASSERT(ni != NULL, ("no node"));
1350 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1351 		/*
1352 		 * Grab a TX buffer and associated resources.
1353 		 * We honor the classification by the 802.11 layer.
1354 		 */
1355 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1356 		bf = mwl_gettxbuf(sc, txq);
1357 		if (bf == NULL) {
1358 			m_freem(m);
1359 			ieee80211_free_node(ni);
1360 #ifdef MWL_TX_NODROP
1361 			sc->sc_stats.mst_tx_qstop++;
1362 			break;
1363 #else
1364 			DPRINTF(sc, MWL_DEBUG_XMIT,
1365 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1366 			sc->sc_stats.mst_tx_qdrop++;
1367 			continue;
1368 #endif /* MWL_TX_NODROP */
1369 		}
1370 
1371 		/*
1372 		 * Pass the frame to the h/w for transmission.
1373 		 */
1374 		if (mwl_tx_start(sc, ni, bf, m)) {
1375 			if_inc_counter(ni->ni_vap->iv_ifp,
1376 			    IFCOUNTER_OERRORS, 1);
1377 			mwl_puttxbuf_head(txq, bf);
1378 			ieee80211_free_node(ni);
1379 			continue;
1380 		}
1381 		nqueued++;
1382 		if (nqueued >= mwl_txcoalesce) {
1383 			/*
1384 			 * Poke the firmware to process queued frames;
1385 			 * see below about (lack of) locking.
1386 			 */
1387 			nqueued = 0;
1388 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1389 		}
1390 	}
1391 	if (nqueued) {
1392 		/*
1393 		 * NB: We don't need to lock against tx done because
1394 		 * this just prods the firmware to check the transmit
1395 		 * descriptors.  The firmware will also start fetching
1396 		 * descriptors by itself if it notices new ones are
1397 		 * present when it goes to deliver a tx done interrupt
1398 		 * to the host. So if we race with tx done processing
1399 		 * it's ok.  Delivering the kick here rather than in
1400 		 * mwl_tx_start is an optimization to avoid poking the
1401 		 * firmware for each packet.
1402 		 *
1403 		 * NB: the queue id isn't used so 0 is ok.
1404 		 */
1405 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1406 	}
1407 }
1408 
1409 static int
mwl_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)1410 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1411 	const struct ieee80211_bpf_params *params)
1412 {
1413 	struct ieee80211com *ic = ni->ni_ic;
1414 	struct mwl_softc *sc = ic->ic_softc;
1415 	struct mwl_txbuf *bf;
1416 	struct mwl_txq *txq;
1417 
1418 	if (!sc->sc_running || sc->sc_invalid) {
1419 		m_freem(m);
1420 		return ENETDOWN;
1421 	}
1422 	/*
1423 	 * Grab a TX buffer and associated resources.
1424 	 * Note that we depend on the classification
1425 	 * by the 802.11 layer to get to the right h/w
1426 	 * queue.  Management frames must ALWAYS go on
1427 	 * queue 1 but we cannot just force that here
1428 	 * because we may receive non-mgt frames.
1429 	 */
1430 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1431 	bf = mwl_gettxbuf(sc, txq);
1432 	if (bf == NULL) {
1433 		sc->sc_stats.mst_tx_qstop++;
1434 		m_freem(m);
1435 		return ENOBUFS;
1436 	}
1437 	/*
1438 	 * Pass the frame to the h/w for transmission.
1439 	 */
1440 	if (mwl_tx_start(sc, ni, bf, m)) {
1441 		mwl_puttxbuf_head(txq, bf);
1442 
1443 		return EIO;		/* XXX */
1444 	}
1445 	/*
1446 	 * NB: We don't need to lock against tx done because
1447 	 * this just prods the firmware to check the transmit
1448 	 * descriptors.  The firmware will also start fetching
1449 	 * descriptors by itself if it notices new ones are
1450 	 * present when it goes to deliver a tx done interrupt
1451 	 * to the host. So if we race with tx done processing
1452 	 * it's ok.  Delivering the kick here rather than in
1453 	 * mwl_tx_start is an optimization to avoid poking the
1454 	 * firmware for each packet.
1455 	 *
1456 	 * NB: the queue id isn't used so 0 is ok.
1457 	 */
1458 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1459 	return 0;
1460 }
1461 
1462 static int
mwl_media_change(if_t ifp)1463 mwl_media_change(if_t ifp)
1464 {
1465 	struct ieee80211vap *vap;
1466 	int error;
1467 
1468 	/* NB: only the fixed rate can change and that doesn't need a reset */
1469 	error = ieee80211_media_change(ifp);
1470 	if (error != 0)
1471 		return (error);
1472 
1473 	vap = if_getsoftc(ifp);
1474 	mwl_setrates(vap);
1475 	return (0);
1476 }
1477 
1478 #ifdef MWL_DEBUG
1479 static void
mwl_keyprint(struct mwl_softc * sc,const char * tag,const MWL_HAL_KEYVAL * hk,const uint8_t mac[IEEE80211_ADDR_LEN])1480 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1481 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1482 {
1483 	static const char *ciphers[] = {
1484 		"WEP",
1485 		"TKIP",
1486 		"AES-CCM",
1487 	};
1488 	int i, n;
1489 
1490 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1491 	for (i = 0, n = hk->keyLen; i < n; i++)
1492 		printf(" %02x", hk->key.aes[i]);
1493 	printf(" mac %s", ether_sprintf(mac));
1494 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1495 		printf(" %s", "rxmic");
1496 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1497 			printf(" %02x", hk->key.tkip.rxMic[i]);
1498 		printf(" txmic");
1499 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1500 			printf(" %02x", hk->key.tkip.txMic[i]);
1501 	}
1502 	printf(" flags 0x%x\n", hk->keyFlags);
1503 }
1504 #endif
1505 
1506 /*
1507  * Allocate a key cache slot for a unicast key.  The
1508  * firmware handles key allocation and every station is
1509  * guaranteed key space so we are always successful.
1510  */
1511 static int
mwl_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)1512 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1513 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1514 {
1515 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1516 
1517 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1518 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1519 		if (!ieee80211_is_key_global(vap, k)) {
1520 			/* should not happen */
1521 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1522 				"%s: bogus group key\n", __func__);
1523 			return 0;
1524 		}
1525 		/* give the caller what they requested */
1526 		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1527 	} else {
1528 		/*
1529 		 * Firmware handles key allocation.
1530 		 */
1531 		*keyix = *rxkeyix = 0;
1532 	}
1533 	return 1;
1534 }
1535 
1536 /*
1537  * Delete a key entry allocated by mwl_key_alloc.
1538  */
1539 static int
mwl_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)1540 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1541 {
1542 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1543 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1544 	MWL_HAL_KEYVAL hk;
1545 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1546 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1547 
1548 	if (hvap == NULL) {
1549 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1550 			/* XXX monitor mode? */
1551 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1552 			    "%s: no hvap for opmode %d\n", __func__,
1553 			    vap->iv_opmode);
1554 			return 0;
1555 		}
1556 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1557 	}
1558 
1559 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1560 	    __func__, k->wk_keyix);
1561 
1562 	memset(&hk, 0, sizeof(hk));
1563 	hk.keyIndex = k->wk_keyix;
1564 	switch (k->wk_cipher->ic_cipher) {
1565 	case IEEE80211_CIPHER_WEP:
1566 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1567 		break;
1568 	case IEEE80211_CIPHER_TKIP:
1569 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1570 		break;
1571 	case IEEE80211_CIPHER_AES_CCM:
1572 		hk.keyTypeId = KEY_TYPE_ID_AES;
1573 		break;
1574 	default:
1575 		/* XXX should not happen */
1576 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1577 		    __func__, k->wk_cipher->ic_cipher);
1578 		return 0;
1579 	}
1580 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1581 }
1582 
1583 static __inline int
addgroupflags(MWL_HAL_KEYVAL * hk,const struct ieee80211_key * k)1584 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1585 {
1586 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1587 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1588 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1589 		if (k->wk_flags & IEEE80211_KEY_RECV)
1590 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1591 		return 1;
1592 	} else
1593 		return 0;
1594 }
1595 
1596 /*
1597  * Set the key cache contents for the specified key.  Key cache
1598  * slot(s) must already have been allocated by mwl_key_alloc.
1599  */
1600 static int
mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)1601 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1602 {
1603 	return (_mwl_key_set(vap, k, k->wk_macaddr));
1604 }
1605 
1606 static int
_mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1607 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1608 	const uint8_t mac[IEEE80211_ADDR_LEN])
1609 {
1610 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1611 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1612 #define	IEEE80211_IS_STATICKEY(k) \
1613 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1614 	 (GRPXMIT|IEEE80211_KEY_RECV))
1615 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1616 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1617 	const struct ieee80211_cipher *cip = k->wk_cipher;
1618 	const uint8_t *macaddr;
1619 	MWL_HAL_KEYVAL hk;
1620 
1621 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1622 		("s/w crypto set?"));
1623 
1624 	if (hvap == NULL) {
1625 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1626 			/* XXX monitor mode? */
1627 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1628 			    "%s: no hvap for opmode %d\n", __func__,
1629 			    vap->iv_opmode);
1630 			return 0;
1631 		}
1632 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1633 	}
1634 	memset(&hk, 0, sizeof(hk));
1635 	hk.keyIndex = k->wk_keyix;
1636 	switch (cip->ic_cipher) {
1637 	case IEEE80211_CIPHER_WEP:
1638 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1639 		hk.keyLen = k->wk_keylen;
1640 		if (k->wk_keyix == vap->iv_def_txkey)
1641 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1642 		if (!IEEE80211_IS_STATICKEY(k)) {
1643 			/* NB: WEP is never used for the PTK */
1644 			(void) addgroupflags(&hk, k);
1645 		}
1646 		break;
1647 	case IEEE80211_CIPHER_TKIP:
1648 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1649 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1650 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1651 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1652 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1653 		if (!addgroupflags(&hk, k))
1654 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1655 		break;
1656 	case IEEE80211_CIPHER_AES_CCM:
1657 		hk.keyTypeId = KEY_TYPE_ID_AES;
1658 		hk.keyLen = k->wk_keylen;
1659 		if (!addgroupflags(&hk, k))
1660 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1661 		break;
1662 	default:
1663 		/* XXX should not happen */
1664 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1665 		    __func__, k->wk_cipher->ic_cipher);
1666 		return 0;
1667 	}
1668 	/*
1669 	 * NB: tkip mic keys get copied here too; the layout
1670 	 *     just happens to match that in ieee80211_key.
1671 	 */
1672 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1673 
1674 	/*
1675 	 * Locate address of sta db entry for writing key;
1676 	 * the convention unfortunately is somewhat different
1677 	 * than how net80211, hostapd, and wpa_supplicant think.
1678 	 */
1679 	if (vap->iv_opmode == IEEE80211_M_STA) {
1680 		/*
1681 		 * NB: keys plumbed before the sta reaches AUTH state
1682 		 * will be discarded or written to the wrong sta db
1683 		 * entry because iv_bss is meaningless.  This is ok
1684 		 * (right now) because we handle deferred plumbing of
1685 		 * WEP keys when the sta reaches AUTH state.
1686 		 */
1687 		macaddr = vap->iv_bss->ni_bssid;
1688 		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1689 			/* XXX plumb to local sta db too for static key wep */
1690 			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1691 		}
1692 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1693 	    vap->iv_state != IEEE80211_S_RUN) {
1694 		/*
1695 		 * Prior to RUN state a WDS vap will not it's BSS node
1696 		 * setup so we will plumb the key to the wrong mac
1697 		 * address (it'll be our local address).  Workaround
1698 		 * this for the moment by grabbing the correct address.
1699 		 */
1700 		macaddr = vap->iv_des_bssid;
1701 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1702 		macaddr = vap->iv_myaddr;
1703 	else
1704 		macaddr = mac;
1705 	KEYPRINTF(sc, &hk, macaddr);
1706 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1707 #undef IEEE80211_IS_STATICKEY
1708 #undef GRPXMIT
1709 }
1710 
1711 /*
1712  * Set the multicast filter contents into the hardware.
1713  * XXX f/w has no support; just defer to the os.
1714  */
1715 static void
mwl_setmcastfilter(struct mwl_softc * sc)1716 mwl_setmcastfilter(struct mwl_softc *sc)
1717 {
1718 #if 0
1719 	struct ether_multi *enm;
1720 	struct ether_multistep estep;
1721 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1722 	uint8_t *mp;
1723 	int nmc;
1724 
1725 	mp = macs;
1726 	nmc = 0;
1727 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1728 	while (enm != NULL) {
1729 		/* XXX Punt on ranges. */
1730 		if (nmc == MWL_HAL_MCAST_MAX ||
1731 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1732 			if_setflagsbit(ifp, IFF_ALLMULTI, 0);
1733 			return;
1734 		}
1735 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1736 		mp += IEEE80211_ADDR_LEN, nmc++;
1737 		ETHER_NEXT_MULTI(estep, enm);
1738 	}
1739 	if_setflagsbit(ifp, 0, IFF_ALLMULTI);
1740 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1741 #endif
1742 }
1743 
1744 static int
mwl_mode_init(struct mwl_softc * sc)1745 mwl_mode_init(struct mwl_softc *sc)
1746 {
1747 	struct ieee80211com *ic = &sc->sc_ic;
1748 	struct mwl_hal *mh = sc->sc_mh;
1749 
1750 	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1751 	mwl_setmcastfilter(sc);
1752 
1753 	return 0;
1754 }
1755 
1756 /*
1757  * Callback from the 802.11 layer after a multicast state change.
1758  */
1759 static void
mwl_update_mcast(struct ieee80211com * ic)1760 mwl_update_mcast(struct ieee80211com *ic)
1761 {
1762 	struct mwl_softc *sc = ic->ic_softc;
1763 
1764 	mwl_setmcastfilter(sc);
1765 }
1766 
1767 /*
1768  * Callback from the 802.11 layer after a promiscuous mode change.
1769  * Note this interface does not check the operating mode as this
1770  * is an internal callback and we are expected to honor the current
1771  * state (e.g. this is used for setting the interface in promiscuous
1772  * mode when operating in hostap mode to do ACS).
1773  */
1774 static void
mwl_update_promisc(struct ieee80211com * ic)1775 mwl_update_promisc(struct ieee80211com *ic)
1776 {
1777 	struct mwl_softc *sc = ic->ic_softc;
1778 
1779 	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1780 }
1781 
1782 /*
1783  * Callback from the 802.11 layer to update the slot time
1784  * based on the current setting.  We use it to notify the
1785  * firmware of ERP changes and the f/w takes care of things
1786  * like slot time and preamble.
1787  */
1788 static void
mwl_updateslot(struct ieee80211com * ic)1789 mwl_updateslot(struct ieee80211com *ic)
1790 {
1791 	struct mwl_softc *sc = ic->ic_softc;
1792 	struct mwl_hal *mh = sc->sc_mh;
1793 	int prot;
1794 
1795 	/* NB: can be called early; suppress needless cmds */
1796 	if (!sc->sc_running)
1797 		return;
1798 
1799 	/*
1800 	 * Calculate the ERP flags.  The firwmare will use
1801 	 * this to carry out the appropriate measures.
1802 	 */
1803 	prot = 0;
1804 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1805 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1806 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1807 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1808 			prot |= IEEE80211_ERP_USE_PROTECTION;
1809 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1810 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1811 	}
1812 
1813 	DPRINTF(sc, MWL_DEBUG_RESET,
1814 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1815 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1816 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1817 	    ic->ic_flags);
1818 
1819 	mwl_hal_setgprot(mh, prot);
1820 }
1821 
1822 /*
1823  * Setup the beacon frame.
1824  */
1825 static int
mwl_beacon_setup(struct ieee80211vap * vap)1826 mwl_beacon_setup(struct ieee80211vap *vap)
1827 {
1828 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1829 	struct ieee80211_node *ni = vap->iv_bss;
1830 	struct mbuf *m;
1831 
1832 	m = ieee80211_beacon_alloc(ni);
1833 	if (m == NULL)
1834 		return ENOBUFS;
1835 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1836 	m_free(m);
1837 
1838 	return 0;
1839 }
1840 
1841 /*
1842  * Update the beacon frame in response to a change.
1843  */
1844 static void
mwl_beacon_update(struct ieee80211vap * vap,int item)1845 mwl_beacon_update(struct ieee80211vap *vap, int item)
1846 {
1847 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1848 	struct ieee80211com *ic = vap->iv_ic;
1849 
1850 	KASSERT(hvap != NULL, ("no beacon"));
1851 	switch (item) {
1852 	case IEEE80211_BEACON_ERP:
1853 		mwl_updateslot(ic);
1854 		break;
1855 	case IEEE80211_BEACON_HTINFO:
1856 		mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1857 		    ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1858 		break;
1859 	case IEEE80211_BEACON_CAPS:
1860 	case IEEE80211_BEACON_WME:
1861 	case IEEE80211_BEACON_APPIE:
1862 	case IEEE80211_BEACON_CSA:
1863 		break;
1864 	case IEEE80211_BEACON_TIM:
1865 		/* NB: firmware always forms TIM */
1866 		return;
1867 	}
1868 	/* XXX retain beacon frame and update */
1869 	mwl_beacon_setup(vap);
1870 }
1871 
1872 static void
mwl_load_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1873 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1874 {
1875 	bus_addr_t *paddr = (bus_addr_t*) arg;
1876 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1877 	*paddr = segs->ds_addr;
1878 }
1879 
1880 #ifdef MWL_HOST_PS_SUPPORT
1881 /*
1882  * Handle power save station occupancy changes.
1883  */
1884 static void
mwl_update_ps(struct ieee80211vap * vap,int nsta)1885 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1886 {
1887 	struct mwl_vap *mvp = MWL_VAP(vap);
1888 
1889 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1890 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1891 	mvp->mv_last_ps_sta = nsta;
1892 }
1893 
1894 /*
1895  * Handle associated station power save state changes.
1896  */
1897 static int
mwl_set_tim(struct ieee80211_node * ni,int set)1898 mwl_set_tim(struct ieee80211_node *ni, int set)
1899 {
1900 	struct ieee80211vap *vap = ni->ni_vap;
1901 	struct mwl_vap *mvp = MWL_VAP(vap);
1902 
1903 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1904 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1905 		    IEEE80211_AID(ni->ni_associd), set);
1906 		return 1;
1907 	} else
1908 		return 0;
1909 }
1910 #endif /* MWL_HOST_PS_SUPPORT */
1911 
1912 static int
mwl_desc_setup(struct mwl_softc * sc,const char * name,struct mwl_descdma * dd,int nbuf,size_t bufsize,int ndesc,size_t descsize)1913 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1914 	struct mwl_descdma *dd,
1915 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1916 {
1917 	uint8_t *ds;
1918 	int error;
1919 
1920 	DPRINTF(sc, MWL_DEBUG_RESET,
1921 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1922 	    __func__, name, nbuf, (uintmax_t) bufsize,
1923 	    ndesc, (uintmax_t) descsize);
1924 
1925 	dd->dd_name = name;
1926 	dd->dd_desc_len = nbuf * ndesc * descsize;
1927 
1928 	/*
1929 	 * Setup DMA descriptor area.
1930 	 */
1931 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1932 		       PAGE_SIZE, 0,		/* alignment, bounds */
1933 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1934 		       BUS_SPACE_MAXADDR,	/* highaddr */
1935 		       NULL, NULL,		/* filter, filterarg */
1936 		       dd->dd_desc_len,		/* maxsize */
1937 		       1,			/* nsegments */
1938 		       dd->dd_desc_len,		/* maxsegsize */
1939 		       BUS_DMA_ALLOCNOW,	/* flags */
1940 		       NULL,			/* lockfunc */
1941 		       NULL,			/* lockarg */
1942 		       &dd->dd_dmat);
1943 	if (error != 0) {
1944 		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1945 		return error;
1946 	}
1947 
1948 	/* allocate descriptors */
1949 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1950 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1951 				 &dd->dd_dmamap);
1952 	if (error != 0) {
1953 		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1954 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1955 		goto fail1;
1956 	}
1957 
1958 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1959 				dd->dd_desc, dd->dd_desc_len,
1960 				mwl_load_cb, &dd->dd_desc_paddr,
1961 				BUS_DMA_NOWAIT);
1962 	if (error != 0) {
1963 		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1964 			dd->dd_name, error);
1965 		goto fail2;
1966 	}
1967 
1968 	ds = dd->dd_desc;
1969 	memset(ds, 0, dd->dd_desc_len);
1970 	DPRINTF(sc, MWL_DEBUG_RESET,
1971 	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1972 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1973 	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1974 
1975 	return 0;
1976 fail2:
1977 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1978 fail1:
1979 	bus_dma_tag_destroy(dd->dd_dmat);
1980 	memset(dd, 0, sizeof(*dd));
1981 	return error;
1982 #undef DS2PHYS
1983 }
1984 
1985 static void
mwl_desc_cleanup(struct mwl_softc * sc,struct mwl_descdma * dd)1986 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1987 {
1988 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1989 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1990 	bus_dma_tag_destroy(dd->dd_dmat);
1991 
1992 	memset(dd, 0, sizeof(*dd));
1993 }
1994 
1995 /*
1996  * Construct a tx q's free list.  The order of entries on
1997  * the list must reflect the physical layout of tx descriptors
1998  * because the firmware pre-fetches descriptors.
1999  *
2000  * XXX might be better to use indices into the buffer array.
2001  */
2002 static void
mwl_txq_reset(struct mwl_softc * sc,struct mwl_txq * txq)2003 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2004 {
2005 	struct mwl_txbuf *bf;
2006 	int i;
2007 
2008 	bf = txq->dma.dd_bufptr;
2009 	STAILQ_INIT(&txq->free);
2010 	for (i = 0; i < mwl_txbuf; i++, bf++)
2011 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2012 	txq->nfree = i;
2013 }
2014 
2015 #define	DS2PHYS(_dd, _ds) \
2016 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2017 
2018 static int
mwl_txdma_setup(struct mwl_softc * sc,struct mwl_txq * txq)2019 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2020 {
2021 	int error, bsize, i;
2022 	struct mwl_txbuf *bf;
2023 	struct mwl_txdesc *ds;
2024 
2025 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2026 			mwl_txbuf, sizeof(struct mwl_txbuf),
2027 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2028 	if (error != 0)
2029 		return error;
2030 
2031 	/* allocate and setup tx buffers */
2032 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2033 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2034 	if (bf == NULL) {
2035 		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2036 			mwl_txbuf);
2037 		return ENOMEM;
2038 	}
2039 	txq->dma.dd_bufptr = bf;
2040 
2041 	ds = txq->dma.dd_desc;
2042 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2043 		bf->bf_desc = ds;
2044 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2045 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2046 				&bf->bf_dmamap);
2047 		if (error != 0) {
2048 			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2049 				"buffer %u, error %u\n", i, error);
2050 			return error;
2051 		}
2052 	}
2053 	mwl_txq_reset(sc, txq);
2054 	return 0;
2055 }
2056 
2057 static void
mwl_txdma_cleanup(struct mwl_softc * sc,struct mwl_txq * txq)2058 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2059 {
2060 	struct mwl_txbuf *bf;
2061 	int i;
2062 
2063 	bf = txq->dma.dd_bufptr;
2064 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2065 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2066 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2067 		if (bf->bf_dmamap != NULL)
2068 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2069 	}
2070 	STAILQ_INIT(&txq->free);
2071 	txq->nfree = 0;
2072 	if (txq->dma.dd_bufptr != NULL) {
2073 		free(txq->dma.dd_bufptr, M_MWLDEV);
2074 		txq->dma.dd_bufptr = NULL;
2075 	}
2076 	if (txq->dma.dd_desc_len != 0)
2077 		mwl_desc_cleanup(sc, &txq->dma);
2078 }
2079 
2080 static int
mwl_rxdma_setup(struct mwl_softc * sc)2081 mwl_rxdma_setup(struct mwl_softc *sc)
2082 {
2083 	int error, jumbosize, bsize, i;
2084 	struct mwl_rxbuf *bf;
2085 	struct mwl_jumbo *rbuf;
2086 	struct mwl_rxdesc *ds;
2087 	caddr_t data;
2088 
2089 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2090 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2091 			1, sizeof(struct mwl_rxdesc));
2092 	if (error != 0)
2093 		return error;
2094 
2095 	/*
2096 	 * Receive is done to a private pool of jumbo buffers.
2097 	 * This allows us to attach to mbuf's and avoid re-mapping
2098 	 * memory on each rx we post.  We allocate a large chunk
2099 	 * of memory and manage it in the driver.  The mbuf free
2100 	 * callback method is used to reclaim frames after sending
2101 	 * them up the stack.  By default we allocate 2x the number of
2102 	 * rx descriptors configured so we have some slop to hold
2103 	 * us while frames are processed.
2104 	 */
2105 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2106 		device_printf(sc->sc_dev,
2107 		    "too few rx dma buffers (%d); increasing to %d\n",
2108 		    mwl_rxbuf, 2*mwl_rxdesc);
2109 		mwl_rxbuf = 2*mwl_rxdesc;
2110 	}
2111 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2112 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2113 
2114 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2115 		       PAGE_SIZE, 0,		/* alignment, bounds */
2116 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2117 		       BUS_SPACE_MAXADDR,	/* highaddr */
2118 		       NULL, NULL,		/* filter, filterarg */
2119 		       sc->sc_rxmemsize,	/* maxsize */
2120 		       1,			/* nsegments */
2121 		       sc->sc_rxmemsize,	/* maxsegsize */
2122 		       BUS_DMA_ALLOCNOW,	/* flags */
2123 		       NULL,			/* lockfunc */
2124 		       NULL,			/* lockarg */
2125 		       &sc->sc_rxdmat);
2126 	if (error != 0) {
2127 		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2128 		return error;
2129 	}
2130 
2131 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2132 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2133 				 &sc->sc_rxmap);
2134 	if (error != 0) {
2135 		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2136 		    (uintmax_t) sc->sc_rxmemsize);
2137 		return error;
2138 	}
2139 
2140 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2141 				sc->sc_rxmem, sc->sc_rxmemsize,
2142 				mwl_load_cb, &sc->sc_rxmem_paddr,
2143 				BUS_DMA_NOWAIT);
2144 	if (error != 0) {
2145 		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2146 		return error;
2147 	}
2148 
2149 	/*
2150 	 * Allocate rx buffers and set them up.
2151 	 */
2152 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2153 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2154 	if (bf == NULL) {
2155 		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2156 		return error;
2157 	}
2158 	sc->sc_rxdma.dd_bufptr = bf;
2159 
2160 	STAILQ_INIT(&sc->sc_rxbuf);
2161 	ds = sc->sc_rxdma.dd_desc;
2162 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2163 		bf->bf_desc = ds;
2164 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2165 		/* pre-assign dma buffer */
2166 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2167 		/* NB: tail is intentional to preserve descriptor order */
2168 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2169 	}
2170 
2171 	/*
2172 	 * Place remainder of dma memory buffers on the free list.
2173 	 */
2174 	SLIST_INIT(&sc->sc_rxfree);
2175 	for (; i < mwl_rxbuf; i++) {
2176 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2177 		rbuf = MWL_JUMBO_DATA2BUF(data);
2178 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2179 		sc->sc_nrxfree++;
2180 	}
2181 	return 0;
2182 }
2183 #undef DS2PHYS
2184 
2185 static void
mwl_rxdma_cleanup(struct mwl_softc * sc)2186 mwl_rxdma_cleanup(struct mwl_softc *sc)
2187 {
2188 	if (sc->sc_rxmem_paddr != 0) {
2189 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2190 		sc->sc_rxmem_paddr = 0;
2191 	}
2192 	if (sc->sc_rxmem != NULL) {
2193 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2194 		sc->sc_rxmem = NULL;
2195 	}
2196 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2197 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2198 		sc->sc_rxdma.dd_bufptr = NULL;
2199 	}
2200 	if (sc->sc_rxdma.dd_desc_len != 0)
2201 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2202 }
2203 
2204 static int
mwl_dma_setup(struct mwl_softc * sc)2205 mwl_dma_setup(struct mwl_softc *sc)
2206 {
2207 	int error, i;
2208 
2209 	error = mwl_rxdma_setup(sc);
2210 	if (error != 0) {
2211 		mwl_rxdma_cleanup(sc);
2212 		return error;
2213 	}
2214 
2215 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2216 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2217 		if (error != 0) {
2218 			mwl_dma_cleanup(sc);
2219 			return error;
2220 		}
2221 	}
2222 	return 0;
2223 }
2224 
2225 static void
mwl_dma_cleanup(struct mwl_softc * sc)2226 mwl_dma_cleanup(struct mwl_softc *sc)
2227 {
2228 	int i;
2229 
2230 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2231 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2232 	mwl_rxdma_cleanup(sc);
2233 }
2234 
2235 static struct ieee80211_node *
mwl_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])2236 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2237 {
2238 	struct ieee80211com *ic = vap->iv_ic;
2239 	struct mwl_softc *sc = ic->ic_softc;
2240 	const size_t space = sizeof(struct mwl_node);
2241 	struct mwl_node *mn;
2242 
2243 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2244 	if (mn == NULL) {
2245 		/* XXX stat+msg */
2246 		return NULL;
2247 	}
2248 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2249 	return &mn->mn_node;
2250 }
2251 
2252 static void
mwl_node_cleanup(struct ieee80211_node * ni)2253 mwl_node_cleanup(struct ieee80211_node *ni)
2254 {
2255 	struct ieee80211com *ic = ni->ni_ic;
2256         struct mwl_softc *sc = ic->ic_softc;
2257 	struct mwl_node *mn = MWL_NODE(ni);
2258 
2259 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2260 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2261 
2262 	if (mn->mn_staid != 0) {
2263 		struct ieee80211vap *vap = ni->ni_vap;
2264 
2265 		if (mn->mn_hvap != NULL) {
2266 			if (vap->iv_opmode == IEEE80211_M_STA)
2267 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2268 			else
2269 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2270 		}
2271 		/*
2272 		 * NB: legacy WDS peer sta db entry is installed using
2273 		 * the associate ap's hvap; use it again to delete it.
2274 		 * XXX can vap be NULL?
2275 		 */
2276 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2277 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2278 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2279 			    ni->ni_macaddr);
2280 		delstaid(sc, mn->mn_staid);
2281 		mn->mn_staid = 0;
2282 	}
2283 	sc->sc_node_cleanup(ni);
2284 }
2285 
2286 /*
2287  * Reclaim rx dma buffers from packets sitting on the ampdu
2288  * reorder queue for a station.  We replace buffers with a
2289  * system cluster (if available).
2290  */
2291 static void
mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu * rap)2292 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2293 {
2294 #if 0
2295 	int i, n, off;
2296 	struct mbuf *m;
2297 	void *cl;
2298 
2299 	n = rap->rxa_qframes;
2300 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2301 		m = rap->rxa_m[i];
2302 		if (m == NULL)
2303 			continue;
2304 		n--;
2305 		/* our dma buffers have a well-known free routine */
2306 		if ((m->m_flags & M_EXT) == 0 ||
2307 		    m->m_ext.ext_free != mwl_ext_free)
2308 			continue;
2309 		/*
2310 		 * Try to allocate a cluster and move the data.
2311 		 */
2312 		off = m->m_data - m->m_ext.ext_buf;
2313 		if (off + m->m_pkthdr.len > MCLBYTES) {
2314 			/* XXX no AMSDU for now */
2315 			continue;
2316 		}
2317 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2318 		    &m->m_ext.ext_paddr);
2319 		if (cl != NULL) {
2320 			/*
2321 			 * Copy the existing data to the cluster, remove
2322 			 * the rx dma buffer, and attach the cluster in
2323 			 * its place.  Note we preserve the offset to the
2324 			 * data so frames being bridged can still prepend
2325 			 * their headers without adding another mbuf.
2326 			 */
2327 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2328 			MEXTREMOVE(m);
2329 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2330 			/* setup mbuf like _MCLGET does */
2331 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2332 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2333 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2334 			m->m_data += off;
2335 		}
2336 	}
2337 #endif
2338 }
2339 
2340 /*
2341  * Callback to reclaim resources.  We first let the
2342  * net80211 layer do it's thing, then if we are still
2343  * blocked by a lack of rx dma buffers we walk the ampdu
2344  * reorder q's to reclaim buffers by copying to a system
2345  * cluster.
2346  */
2347 static void
mwl_node_drain(struct ieee80211_node * ni)2348 mwl_node_drain(struct ieee80211_node *ni)
2349 {
2350 	struct ieee80211com *ic = ni->ni_ic;
2351         struct mwl_softc *sc = ic->ic_softc;
2352 	struct mwl_node *mn = MWL_NODE(ni);
2353 
2354 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2355 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2356 
2357 	/* NB: call up first to age out ampdu q's */
2358 	sc->sc_node_drain(ni);
2359 
2360 	/* XXX better to not check low water mark? */
2361 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2362 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2363 		uint8_t tid;
2364 		/*
2365 		 * Walk the reorder q and reclaim rx dma buffers by copying
2366 		 * the packet contents into clusters.
2367 		 */
2368 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2369 			struct ieee80211_rx_ampdu *rap;
2370 
2371 			rap = &ni->ni_rx_ampdu[tid];
2372 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2373 				continue;
2374 			if (rap->rxa_qframes)
2375 				mwl_ampdu_rxdma_reclaim(rap);
2376 		}
2377 	}
2378 }
2379 
2380 static void
mwl_node_getsignal(const struct ieee80211_node * ni,int8_t * rssi,int8_t * noise)2381 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2382 {
2383 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2384 #ifdef MWL_ANT_INFO_SUPPORT
2385 #if 0
2386 	/* XXX need to smooth data */
2387 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2388 #else
2389 	*noise = -95;		/* XXX */
2390 #endif
2391 #else
2392 	*noise = -95;		/* XXX */
2393 #endif
2394 }
2395 
2396 /*
2397  * Convert Hardware per-antenna rssi info to common format:
2398  * Let a1, a2, a3 represent the amplitudes per chain
2399  * Let amax represent max[a1, a2, a3]
2400  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2401  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2402  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2403  * maintain some extra precision.
2404  *
2405  * Values are stored in .5 db format capped at 127.
2406  */
2407 static void
mwl_node_getmimoinfo(const struct ieee80211_node * ni,struct ieee80211_mimo_info * mi)2408 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2409 	struct ieee80211_mimo_info *mi)
2410 {
2411 #define	CVT(_dst, _src) do {						\
2412 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2413 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2414 } while (0)
2415 	static const int8_t logdbtbl[32] = {
2416 	       0,   0,  24,  38,  48,  56,  62,  68,
2417 	      72,  76,  80,  83,  86,  89,  92,  94,
2418 	      96,  98, 100, 102, 104, 106, 107, 109,
2419 	     110, 112, 113, 115, 116, 117, 118, 119
2420 	};
2421 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2422 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2423 	uint32_t rssi_max;
2424 
2425 	rssi_max = mn->mn_ai.rssi_a;
2426 	if (mn->mn_ai.rssi_b > rssi_max)
2427 		rssi_max = mn->mn_ai.rssi_b;
2428 	if (mn->mn_ai.rssi_c > rssi_max)
2429 		rssi_max = mn->mn_ai.rssi_c;
2430 
2431 	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2432 	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2433 	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2434 
2435 	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2436 	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2437 	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2438 #undef CVT
2439 }
2440 
2441 static __inline void *
mwl_getrxdma(struct mwl_softc * sc)2442 mwl_getrxdma(struct mwl_softc *sc)
2443 {
2444 	struct mwl_jumbo *buf;
2445 	void *data;
2446 
2447 	/*
2448 	 * Allocate from jumbo pool.
2449 	 */
2450 	MWL_RXFREE_LOCK(sc);
2451 	buf = SLIST_FIRST(&sc->sc_rxfree);
2452 	if (buf == NULL) {
2453 		DPRINTF(sc, MWL_DEBUG_ANY,
2454 		    "%s: out of rx dma buffers\n", __func__);
2455 		sc->sc_stats.mst_rx_nodmabuf++;
2456 		data = NULL;
2457 	} else {
2458 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2459 		sc->sc_nrxfree--;
2460 		data = MWL_JUMBO_BUF2DATA(buf);
2461 	}
2462 	MWL_RXFREE_UNLOCK(sc);
2463 	return data;
2464 }
2465 
2466 static __inline void
mwl_putrxdma(struct mwl_softc * sc,void * data)2467 mwl_putrxdma(struct mwl_softc *sc, void *data)
2468 {
2469 	struct mwl_jumbo *buf;
2470 
2471 	/* XXX bounds check data */
2472 	MWL_RXFREE_LOCK(sc);
2473 	buf = MWL_JUMBO_DATA2BUF(data);
2474 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2475 	sc->sc_nrxfree++;
2476 	MWL_RXFREE_UNLOCK(sc);
2477 }
2478 
2479 static int
mwl_rxbuf_init(struct mwl_softc * sc,struct mwl_rxbuf * bf)2480 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2481 {
2482 	struct mwl_rxdesc *ds;
2483 
2484 	ds = bf->bf_desc;
2485 	if (bf->bf_data == NULL) {
2486 		bf->bf_data = mwl_getrxdma(sc);
2487 		if (bf->bf_data == NULL) {
2488 			/* mark descriptor to be skipped */
2489 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2490 			/* NB: don't need PREREAD */
2491 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2492 			sc->sc_stats.mst_rxbuf_failed++;
2493 			return ENOMEM;
2494 		}
2495 	}
2496 	/*
2497 	 * NB: DMA buffer contents is known to be unmodified
2498 	 *     so there's no need to flush the data cache.
2499 	 */
2500 
2501 	/*
2502 	 * Setup descriptor.
2503 	 */
2504 	ds->QosCtrl = 0;
2505 	ds->RSSI = 0;
2506 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2507 	ds->Channel = 0;
2508 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2509 	ds->SQ2 = 0;
2510 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2511 	/* NB: don't touch pPhysNext, set once */
2512 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2513 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2514 
2515 	return 0;
2516 }
2517 
2518 static void
mwl_ext_free(struct mbuf * m)2519 mwl_ext_free(struct mbuf *m)
2520 {
2521 	struct mwl_softc *sc = m->m_ext.ext_arg1;
2522 
2523 	/* XXX bounds check data */
2524 	mwl_putrxdma(sc, m->m_ext.ext_buf);
2525 	/*
2526 	 * If we were previously blocked by a lack of rx dma buffers
2527 	 * check if we now have enough to restart rx interrupt handling.
2528 	 */
2529 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2530 		sc->sc_rxblocked = 0;
2531 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2532 	}
2533 }
2534 
2535 struct mwl_frame_bar {
2536 	u_int8_t	i_fc[2];
2537 	u_int8_t	i_dur[2];
2538 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2539 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2540 	/* ctl, seq, FCS */
2541 } __packed;
2542 
2543 /*
2544  * Like ieee80211_anyhdrsize, but handles BAR frames
2545  * specially so the logic below to piece the 802.11
2546  * header together works.
2547  */
2548 static __inline int
mwl_anyhdrsize(const void * data)2549 mwl_anyhdrsize(const void *data)
2550 {
2551 	const struct ieee80211_frame *wh = data;
2552 
2553 	if (IEEE80211_IS_CTL(wh)) {
2554 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2555 		case IEEE80211_FC0_SUBTYPE_CTS:
2556 		case IEEE80211_FC0_SUBTYPE_ACK:
2557 			return sizeof(struct ieee80211_frame_ack);
2558 		case IEEE80211_FC0_SUBTYPE_BAR:
2559 			return sizeof(struct mwl_frame_bar);
2560 		}
2561 		return sizeof(struct ieee80211_frame_min);
2562 	} else
2563 		return ieee80211_hdrsize(data);
2564 }
2565 
2566 static void
mwl_handlemicerror(struct ieee80211com * ic,const uint8_t * data)2567 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2568 {
2569 	const struct ieee80211_frame *wh;
2570 	struct ieee80211_node *ni;
2571 
2572 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2573 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2574 	if (ni != NULL) {
2575 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2576 		ieee80211_free_node(ni);
2577 	}
2578 }
2579 
2580 /*
2581  * Convert hardware signal strength to rssi.  The value
2582  * provided by the device has the noise floor added in;
2583  * we need to compensate for this but we don't have that
2584  * so we use a fixed value.
2585  *
2586  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2587  * offset is already set as part of the initial gain.  This
2588  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2589  */
2590 static __inline int
cvtrssi(uint8_t ssi)2591 cvtrssi(uint8_t ssi)
2592 {
2593 	int rssi = (int) ssi + 8;
2594 	/* XXX hack guess until we have a real noise floor */
2595 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2596 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2597 }
2598 
2599 static void
mwl_rx_proc(void * arg,int npending)2600 mwl_rx_proc(void *arg, int npending)
2601 {
2602 	struct mwl_softc *sc = arg;
2603 	struct ieee80211com *ic = &sc->sc_ic;
2604 	struct mwl_rxbuf *bf;
2605 	struct mwl_rxdesc *ds;
2606 	struct mbuf *m;
2607 	struct ieee80211_qosframe *wh;
2608 	struct ieee80211_node *ni;
2609 	struct mwl_node *mn;
2610 	int off, len, hdrlen, pktlen, rssi, ntodo;
2611 	uint8_t *data, status;
2612 	void *newdata;
2613 	int16_t nf;
2614 
2615 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2616 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2617 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2618 	nf = -96;			/* XXX */
2619 	bf = sc->sc_rxnext;
2620 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2621 		if (bf == NULL)
2622 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2623 		ds = bf->bf_desc;
2624 		data = bf->bf_data;
2625 		if (data == NULL) {
2626 			/*
2627 			 * If data allocation failed previously there
2628 			 * will be no buffer; try again to re-populate it.
2629 			 * Note the firmware will not advance to the next
2630 			 * descriptor with a dma buffer so we must mimic
2631 			 * this or we'll get out of sync.
2632 			 */
2633 			DPRINTF(sc, MWL_DEBUG_ANY,
2634 			    "%s: rx buf w/o dma memory\n", __func__);
2635 			(void) mwl_rxbuf_init(sc, bf);
2636 			sc->sc_stats.mst_rx_dmabufmissing++;
2637 			break;
2638 		}
2639 		MWL_RXDESC_SYNC(sc, ds,
2640 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2641 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2642 			break;
2643 #ifdef MWL_DEBUG
2644 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2645 			mwl_printrxbuf(bf, 0);
2646 #endif
2647 		status = ds->Status;
2648 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2649 			counter_u64_add(ic->ic_ierrors, 1);
2650 			sc->sc_stats.mst_rx_crypto++;
2651 			/*
2652 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2653 			 *     for backwards compatibility.
2654 			 */
2655 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2656 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2657 				/*
2658 				 * MIC error, notify upper layers.
2659 				 */
2660 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2661 				    BUS_DMASYNC_POSTREAD);
2662 				mwl_handlemicerror(ic, data);
2663 				sc->sc_stats.mst_rx_tkipmic++;
2664 			}
2665 			/* XXX too painful to tap packets */
2666 			goto rx_next;
2667 		}
2668 		/*
2669 		 * Sync the data buffer.
2670 		 */
2671 		len = le16toh(ds->PktLen);
2672 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2673 		/*
2674 		 * The 802.11 header is provided all or in part at the front;
2675 		 * use it to calculate the true size of the header that we'll
2676 		 * construct below.  We use this to figure out where to copy
2677 		 * payload prior to constructing the header.
2678 		 */
2679 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2680 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2681 
2682 		/* calculate rssi early so we can re-use for each aggregate */
2683 		rssi = cvtrssi(ds->RSSI);
2684 
2685 		pktlen = hdrlen + (len - off);
2686 		/*
2687 		 * NB: we know our frame is at least as large as
2688 		 * IEEE80211_MIN_LEN because there is a 4-address
2689 		 * frame at the front.  Hence there's no need to
2690 		 * vet the packet length.  If the frame in fact
2691 		 * is too small it should be discarded at the
2692 		 * net80211 layer.
2693 		 */
2694 
2695 		/*
2696 		 * Attach dma buffer to an mbuf.  We tried
2697 		 * doing this based on the packet size (i.e.
2698 		 * copying small packets) but it turns out to
2699 		 * be a net loss.  The tradeoff might be system
2700 		 * dependent (cache architecture is important).
2701 		 */
2702 		MGETHDR(m, M_NOWAIT, MT_DATA);
2703 		if (m == NULL) {
2704 			DPRINTF(sc, MWL_DEBUG_ANY,
2705 			    "%s: no rx mbuf\n", __func__);
2706 			sc->sc_stats.mst_rx_nombuf++;
2707 			goto rx_next;
2708 		}
2709 		/*
2710 		 * Acquire the replacement dma buffer before
2711 		 * processing the frame.  If we're out of dma
2712 		 * buffers we disable rx interrupts and wait
2713 		 * for the free pool to reach mlw_rxdmalow buffers
2714 		 * before starting to do work again.  If the firmware
2715 		 * runs out of descriptors then it will toss frames
2716 		 * which is better than our doing it as that can
2717 		 * starve our processing.  It is also important that
2718 		 * we always process rx'd frames in case they are
2719 		 * A-MPDU as otherwise the host's view of the BA
2720 		 * window may get out of sync with the firmware.
2721 		 */
2722 		newdata = mwl_getrxdma(sc);
2723 		if (newdata == NULL) {
2724 			/* NB: stat+msg in mwl_getrxdma */
2725 			m_free(m);
2726 			/* disable RX interrupt and mark state */
2727 			mwl_hal_intrset(sc->sc_mh,
2728 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2729 			sc->sc_rxblocked = 1;
2730 			ieee80211_drain(ic);
2731 			/* XXX check rxblocked and immediately start again? */
2732 			goto rx_stop;
2733 		}
2734 		bf->bf_data = newdata;
2735 		/*
2736 		 * Attach the dma buffer to the mbuf;
2737 		 * mwl_rxbuf_init will re-setup the rx
2738 		 * descriptor using the replacement dma
2739 		 * buffer we just installed above.
2740 		 */
2741 		m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2742 		    EXT_NET_DRV);
2743 		m->m_data += off - hdrlen;
2744 		m->m_pkthdr.len = m->m_len = pktlen;
2745 		/* NB: dma buffer assumed read-only */
2746 
2747 		/*
2748 		 * Piece 802.11 header together.
2749 		 */
2750 		wh = mtod(m, struct ieee80211_qosframe *);
2751 		/* NB: don't need to do this sometimes but ... */
2752 		/* XXX special case so we can memcpy after m_devget? */
2753 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2754 		if (IEEE80211_QOS_HAS_SEQ(wh))
2755 			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2756 		/*
2757 		 * The f/w strips WEP header but doesn't clear
2758 		 * the WEP bit; mark the packet with M_WEP so
2759 		 * net80211 will treat the data as decrypted.
2760 		 * While here also clear the PWR_MGT bit since
2761 		 * power save is handled by the firmware and
2762 		 * passing this up will potentially cause the
2763 		 * upper layer to put a station in power save
2764 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2765 		 */
2766 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2767 			m->m_flags |= M_WEP;
2768 #ifdef MWL_HOST_PS_SUPPORT
2769 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2770 #else
2771 		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2772 		    IEEE80211_FC1_PWR_MGT);
2773 #endif
2774 
2775 		if (ieee80211_radiotap_active(ic)) {
2776 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2777 
2778 			tap->wr_flags = 0;
2779 			tap->wr_rate = ds->Rate;
2780 			tap->wr_antsignal = rssi + nf;
2781 			tap->wr_antnoise = nf;
2782 		}
2783 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2784 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2785 			    len, ds->Rate, rssi);
2786 		}
2787 		/* dispatch */
2788 		ni = ieee80211_find_rxnode(ic,
2789 		    (const struct ieee80211_frame_min *) wh);
2790 		if (ni != NULL) {
2791 			mn = MWL_NODE(ni);
2792 #ifdef MWL_ANT_INFO_SUPPORT
2793 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2794 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2795 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2796 			mn->mn_ai.rsvd1 = rssi;
2797 #endif
2798 			/* tag AMPDU aggregates for reorder processing */
2799 			if (ni->ni_flags & IEEE80211_NODE_HT)
2800 				m->m_flags |= M_AMPDU;
2801 			(void) ieee80211_input(ni, m, rssi, nf);
2802 			ieee80211_free_node(ni);
2803 		} else
2804 			(void) ieee80211_input_all(ic, m, rssi, nf);
2805 rx_next:
2806 		/* NB: ignore ENOMEM so we process more descriptors */
2807 		(void) mwl_rxbuf_init(sc, bf);
2808 		bf = STAILQ_NEXT(bf, bf_list);
2809 	}
2810 rx_stop:
2811 	sc->sc_rxnext = bf;
2812 
2813 	if (mbufq_first(&sc->sc_snd) != NULL) {
2814 		/* NB: kick fw; the tx thread may have been preempted */
2815 		mwl_hal_txstart(sc->sc_mh, 0);
2816 		mwl_start(sc);
2817 	}
2818 }
2819 
2820 static void
mwl_txq_init(struct mwl_softc * sc,struct mwl_txq * txq,int qnum)2821 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2822 {
2823 	struct mwl_txbuf *bf, *bn;
2824 	struct mwl_txdesc *ds;
2825 
2826 	MWL_TXQ_LOCK_INIT(sc, txq);
2827 	txq->qnum = qnum;
2828 	txq->txpri = 0;	/* XXX */
2829 #if 0
2830 	/* NB: q setup by mwl_txdma_setup XXX */
2831 	STAILQ_INIT(&txq->free);
2832 #endif
2833 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2834 		bf->bf_txq = txq;
2835 
2836 		ds = bf->bf_desc;
2837 		bn = STAILQ_NEXT(bf, bf_list);
2838 		if (bn == NULL)
2839 			bn = STAILQ_FIRST(&txq->free);
2840 		ds->pPhysNext = htole32(bn->bf_daddr);
2841 	}
2842 	STAILQ_INIT(&txq->active);
2843 }
2844 
2845 /*
2846  * Setup a hardware data transmit queue for the specified
2847  * access control.  We record the mapping from ac's
2848  * to h/w queues for use by mwl_tx_start.
2849  */
2850 static int
mwl_tx_setup(struct mwl_softc * sc,int ac,int mvtype)2851 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2852 {
2853 	struct mwl_txq *txq;
2854 
2855 	if (ac >= nitems(sc->sc_ac2q)) {
2856 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2857 			ac, nitems(sc->sc_ac2q));
2858 		return 0;
2859 	}
2860 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2861 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2862 			mvtype, MWL_NUM_TX_QUEUES);
2863 		return 0;
2864 	}
2865 	txq = &sc->sc_txq[mvtype];
2866 	mwl_txq_init(sc, txq, mvtype);
2867 	sc->sc_ac2q[ac] = txq;
2868 	return 1;
2869 }
2870 
2871 /*
2872  * Update WME parameters for a transmit queue.
2873  */
2874 static int
mwl_txq_update(struct mwl_softc * sc,int ac)2875 mwl_txq_update(struct mwl_softc *sc, int ac)
2876 {
2877 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2878 	struct ieee80211com *ic = &sc->sc_ic;
2879 	struct chanAccParams chp;
2880 	struct mwl_txq *txq = sc->sc_ac2q[ac];
2881 	struct wmeParams *wmep;
2882 	struct mwl_hal *mh = sc->sc_mh;
2883 	int aifs, cwmin, cwmax, txoplim;
2884 
2885 	ieee80211_wme_ic_getparams(ic, &chp);
2886 	wmep = &chp.cap_wmeParams[ac];
2887 
2888 	aifs = wmep->wmep_aifsn;
2889 	/* XXX in sta mode need to pass log values for cwmin/max */
2890 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2891 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2892 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2893 
2894 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2895 		device_printf(sc->sc_dev, "unable to update hardware queue "
2896 			"parameters for %s traffic!\n",
2897 			ieee80211_wme_acnames[ac]);
2898 		return 0;
2899 	}
2900 	return 1;
2901 #undef MWL_EXPONENT_TO_VALUE
2902 }
2903 
2904 /*
2905  * Callback from the 802.11 layer to update WME parameters.
2906  */
2907 static int
mwl_wme_update(struct ieee80211com * ic)2908 mwl_wme_update(struct ieee80211com *ic)
2909 {
2910 	struct mwl_softc *sc = ic->ic_softc;
2911 
2912 	return !mwl_txq_update(sc, WME_AC_BE) ||
2913 	    !mwl_txq_update(sc, WME_AC_BK) ||
2914 	    !mwl_txq_update(sc, WME_AC_VI) ||
2915 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2916 }
2917 
2918 /*
2919  * Reclaim resources for a setup queue.
2920  */
2921 static void
mwl_tx_cleanupq(struct mwl_softc * sc,struct mwl_txq * txq)2922 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2923 {
2924 	/* XXX hal work? */
2925 	MWL_TXQ_LOCK_DESTROY(txq);
2926 }
2927 
2928 /*
2929  * Reclaim all tx queue resources.
2930  */
2931 static void
mwl_tx_cleanup(struct mwl_softc * sc)2932 mwl_tx_cleanup(struct mwl_softc *sc)
2933 {
2934 	int i;
2935 
2936 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2937 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2938 }
2939 
2940 static int
mwl_tx_dmasetup(struct mwl_softc * sc,struct mwl_txbuf * bf,struct mbuf * m0)2941 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2942 {
2943 	struct mbuf *m;
2944 	int error;
2945 
2946 	/*
2947 	 * Load the DMA map so any coalescing is done.  This
2948 	 * also calculates the number of descriptors we need.
2949 	 */
2950 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2951 				     bf->bf_segs, &bf->bf_nseg,
2952 				     BUS_DMA_NOWAIT);
2953 	if (error == EFBIG) {
2954 		/* XXX packet requires too many descriptors */
2955 		bf->bf_nseg = MWL_TXDESC+1;
2956 	} else if (error != 0) {
2957 		sc->sc_stats.mst_tx_busdma++;
2958 		m_freem(m0);
2959 		return error;
2960 	}
2961 	/*
2962 	 * Discard null packets and check for packets that
2963 	 * require too many TX descriptors.  We try to convert
2964 	 * the latter to a cluster.
2965 	 */
2966 	if (error == EFBIG) {		/* too many desc's, linearize */
2967 		sc->sc_stats.mst_tx_linear++;
2968 #if MWL_TXDESC > 1
2969 		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2970 #else
2971 		m = m_defrag(m0, M_NOWAIT);
2972 #endif
2973 		if (m == NULL) {
2974 			m_freem(m0);
2975 			sc->sc_stats.mst_tx_nombuf++;
2976 			return ENOMEM;
2977 		}
2978 		m0 = m;
2979 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2980 					     bf->bf_segs, &bf->bf_nseg,
2981 					     BUS_DMA_NOWAIT);
2982 		if (error != 0) {
2983 			sc->sc_stats.mst_tx_busdma++;
2984 			m_freem(m0);
2985 			return error;
2986 		}
2987 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
2988 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
2989 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
2990 		sc->sc_stats.mst_tx_nodata++;
2991 		m_freem(m0);
2992 		return EIO;
2993 	}
2994 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
2995 		__func__, m0, m0->m_pkthdr.len);
2996 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2997 	bf->bf_m = m0;
2998 
2999 	return 0;
3000 }
3001 
3002 static __inline int
mwl_cvtlegacyrate(int rate)3003 mwl_cvtlegacyrate(int rate)
3004 {
3005 	switch (rate) {
3006 	case 2:	 return 0;
3007 	case 4:	 return 1;
3008 	case 11: return 2;
3009 	case 22: return 3;
3010 	case 44: return 4;
3011 	case 12: return 5;
3012 	case 18: return 6;
3013 	case 24: return 7;
3014 	case 36: return 8;
3015 	case 48: return 9;
3016 	case 72: return 10;
3017 	case 96: return 11;
3018 	case 108:return 12;
3019 	}
3020 	return 0;
3021 }
3022 
3023 /*
3024  * Calculate fixed tx rate information per client state;
3025  * this value is suitable for writing to the Format field
3026  * of a tx descriptor.
3027  */
3028 static uint16_t
mwl_calcformat(uint8_t rate,const struct ieee80211_node * ni)3029 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3030 {
3031 	uint16_t fmt;
3032 
3033 	fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3034 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3035 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3036 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3037 		fmt |= EAGLE_TXD_FORMAT_HT
3038 		    /* NB: 0x80 implicitly stripped from ucastrate */
3039 		    | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3040 		/* XXX short/long GI may be wrong; re-check */
3041 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3042 			fmt |= EAGLE_TXD_CHW_40
3043 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3044 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3045 		} else {
3046 			fmt |= EAGLE_TXD_CHW_20
3047 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3048 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3049 		}
3050 	} else {			/* legacy rate */
3051 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3052 		    | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3053 			EAGLE_TXD_RATE)
3054 		    | EAGLE_TXD_CHW_20
3055 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3056 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3057 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3058 	}
3059 	return fmt;
3060 }
3061 
3062 static int
mwl_tx_start(struct mwl_softc * sc,struct ieee80211_node * ni,struct mwl_txbuf * bf,struct mbuf * m0)3063 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3064     struct mbuf *m0)
3065 {
3066 	struct ieee80211com *ic = &sc->sc_ic;
3067 	struct ieee80211vap *vap = ni->ni_vap;
3068 	int error, iswep, ismcast;
3069 	int hdrlen, pktlen;
3070 	struct mwl_txdesc *ds;
3071 	struct mwl_txq *txq;
3072 	struct ieee80211_frame *wh;
3073 	struct mwltxrec *tr;
3074 	struct mwl_node *mn;
3075 	uint16_t qos;
3076 #if MWL_TXDESC > 1
3077 	int i;
3078 #endif
3079 
3080 	wh = mtod(m0, struct ieee80211_frame *);
3081 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3082 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3083 	hdrlen = ieee80211_anyhdrsize(wh);
3084 	pktlen = m0->m_pkthdr.len;
3085 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3086 		qos = *(uint16_t *)ieee80211_getqos(wh);
3087 	} else
3088 		qos = 0;
3089 
3090 	if (iswep) {
3091 		const struct ieee80211_cipher *cip;
3092 		struct ieee80211_key *k;
3093 
3094 		/*
3095 		 * Construct the 802.11 header+trailer for an encrypted
3096 		 * frame. The only reason this can fail is because of an
3097 		 * unknown or unsupported cipher/key type.
3098 		 *
3099 		 * NB: we do this even though the firmware will ignore
3100 		 *     what we've done for WEP and TKIP as we need the
3101 		 *     ExtIV filled in for CCMP and this also adjusts
3102 		 *     the headers which simplifies our work below.
3103 		 */
3104 		k = ieee80211_crypto_encap(ni, m0);
3105 		if (k == NULL) {
3106 			/*
3107 			 * This can happen when the key is yanked after the
3108 			 * frame was queued.  Just discard the frame; the
3109 			 * 802.11 layer counts failures and provides
3110 			 * debugging/diagnostics.
3111 			 */
3112 			m_freem(m0);
3113 			return EIO;
3114 		}
3115 		/*
3116 		 * Adjust the packet length for the crypto additions
3117 		 * done during encap and any other bits that the f/w
3118 		 * will add later on.
3119 		 */
3120 		cip = k->wk_cipher;
3121 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3122 
3123 		/* packet header may have moved, reset our local pointer */
3124 		wh = mtod(m0, struct ieee80211_frame *);
3125 	}
3126 
3127 	if (ieee80211_radiotap_active_vap(vap)) {
3128 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3129 		if (iswep)
3130 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3131 #if 0
3132 		sc->sc_tx_th.wt_rate = ds->DataRate;
3133 #endif
3134 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3135 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3136 
3137 		ieee80211_radiotap_tx(vap, m0);
3138 	}
3139 	/*
3140 	 * Copy up/down the 802.11 header; the firmware requires
3141 	 * we present a 2-byte payload length followed by a
3142 	 * 4-address header (w/o QoS), followed (optionally) by
3143 	 * any WEP/ExtIV header (but only filled in for CCMP).
3144 	 * We are assured the mbuf has sufficient headroom to
3145 	 * prepend in-place by the setup of ic_headroom in
3146 	 * mwl_attach.
3147 	 */
3148 	if (hdrlen < sizeof(struct mwltxrec)) {
3149 		const int space = sizeof(struct mwltxrec) - hdrlen;
3150 		if (M_LEADINGSPACE(m0) < space) {
3151 			/* NB: should never happen */
3152 			device_printf(sc->sc_dev,
3153 			    "not enough headroom, need %d found %zd, "
3154 			    "m_flags 0x%x m_len %d\n",
3155 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3156 			ieee80211_dump_pkt(ic,
3157 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3158 			m_freem(m0);
3159 			sc->sc_stats.mst_tx_noheadroom++;
3160 			return EIO;
3161 		}
3162 		M_PREPEND(m0, space, M_NOWAIT);
3163 	}
3164 	tr = mtod(m0, struct mwltxrec *);
3165 	if (wh != (struct ieee80211_frame *) &tr->wh)
3166 		ovbcopy(wh, &tr->wh, hdrlen);
3167 	/*
3168 	 * Note: the "firmware length" is actually the length
3169 	 * of the fully formed "802.11 payload".  That is, it's
3170 	 * everything except for the 802.11 header.  In particular
3171 	 * this includes all crypto material including the MIC!
3172 	 */
3173 	tr->fwlen = htole16(pktlen - hdrlen);
3174 
3175 	/*
3176 	 * Load the DMA map so any coalescing is done.  This
3177 	 * also calculates the number of descriptors we need.
3178 	 */
3179 	error = mwl_tx_dmasetup(sc, bf, m0);
3180 	if (error != 0) {
3181 		/* NB: stat collected in mwl_tx_dmasetup */
3182 		DPRINTF(sc, MWL_DEBUG_XMIT,
3183 		    "%s: unable to setup dma\n", __func__);
3184 		return error;
3185 	}
3186 	bf->bf_node = ni;			/* NB: held reference */
3187 	m0 = bf->bf_m;				/* NB: may have changed */
3188 	tr = mtod(m0, struct mwltxrec *);
3189 	wh = (struct ieee80211_frame *)&tr->wh;
3190 
3191 	/*
3192 	 * Formulate tx descriptor.
3193 	 */
3194 	ds = bf->bf_desc;
3195 	txq = bf->bf_txq;
3196 
3197 	ds->QosCtrl = qos;			/* NB: already little-endian */
3198 #if MWL_TXDESC == 1
3199 	/*
3200 	 * NB: multiframes should be zero because the descriptors
3201 	 *     are initialized to zero.  This should handle the case
3202 	 *     where the driver is built with MWL_TXDESC=1 but we are
3203 	 *     using firmware with multi-segment support.
3204 	 */
3205 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3206 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3207 #else
3208 	ds->multiframes = htole32(bf->bf_nseg);
3209 	ds->PktLen = htole16(m0->m_pkthdr.len);
3210 	for (i = 0; i < bf->bf_nseg; i++) {
3211 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3212 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3213 	}
3214 #endif
3215 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3216 	ds->Format = 0;
3217 	ds->pad = 0;
3218 	ds->ack_wcb_addr = 0;
3219 
3220 	mn = MWL_NODE(ni);
3221 	/*
3222 	 * Select transmit rate.
3223 	 */
3224 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3225 	case IEEE80211_FC0_TYPE_MGT:
3226 		sc->sc_stats.mst_tx_mgmt++;
3227 		/* fall thru... */
3228 	case IEEE80211_FC0_TYPE_CTL:
3229 		/* NB: assign to BE q to avoid bursting */
3230 		ds->TxPriority = MWL_WME_AC_BE;
3231 		break;
3232 	case IEEE80211_FC0_TYPE_DATA:
3233 		if (!ismcast) {
3234 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3235 			/*
3236 			 * EAPOL frames get forced to a fixed rate and w/o
3237 			 * aggregation; otherwise check for any fixed rate
3238 			 * for the client (may depend on association state).
3239 			 */
3240 			if (m0->m_flags & M_EAPOL) {
3241 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3242 				ds->Format = mvp->mv_eapolformat;
3243 				ds->pad = htole16(
3244 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3245 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3246 				/* XXX pre-calculate per node */
3247 				ds->Format = htole16(
3248 				    mwl_calcformat(tp->ucastrate, ni));
3249 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3250 			}
3251 			/* NB: EAPOL frames will never have qos set */
3252 			if (qos == 0)
3253 				ds->TxPriority = txq->qnum;
3254 #if MWL_MAXBA > 3
3255 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3256 				ds->TxPriority = mn->mn_ba[3].txq;
3257 #endif
3258 #if MWL_MAXBA > 2
3259 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3260 				ds->TxPriority = mn->mn_ba[2].txq;
3261 #endif
3262 #if MWL_MAXBA > 1
3263 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3264 				ds->TxPriority = mn->mn_ba[1].txq;
3265 #endif
3266 #if MWL_MAXBA > 0
3267 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3268 				ds->TxPriority = mn->mn_ba[0].txq;
3269 #endif
3270 			else
3271 				ds->TxPriority = txq->qnum;
3272 		} else
3273 			ds->TxPriority = txq->qnum;
3274 		break;
3275 	default:
3276 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3277 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3278 		sc->sc_stats.mst_tx_badframetype++;
3279 		m_freem(m0);
3280 		return EIO;
3281 	}
3282 
3283 	if (IFF_DUMPPKTS_XMIT(sc))
3284 		ieee80211_dump_pkt(ic,
3285 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3286 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3287 
3288 	MWL_TXQ_LOCK(txq);
3289 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3290 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3291 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3292 
3293 	sc->sc_tx_timer = 5;
3294 	MWL_TXQ_UNLOCK(txq);
3295 
3296 	return 0;
3297 }
3298 
3299 static __inline int
mwl_cvtlegacyrix(int rix)3300 mwl_cvtlegacyrix(int rix)
3301 {
3302 	static const int ieeerates[] =
3303 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3304 	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3305 }
3306 
3307 /*
3308  * Process completed xmit descriptors from the specified queue.
3309  */
3310 static int
mwl_tx_processq(struct mwl_softc * sc,struct mwl_txq * txq)3311 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3312 {
3313 #define	EAGLE_TXD_STATUS_MCAST \
3314 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3315 	struct ieee80211com *ic = &sc->sc_ic;
3316 	struct mwl_txbuf *bf;
3317 	struct mwl_txdesc *ds;
3318 	struct ieee80211_node *ni;
3319 	int nreaped;
3320 	uint32_t status;
3321 
3322 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3323 	for (nreaped = 0;; nreaped++) {
3324 		MWL_TXQ_LOCK(txq);
3325 		bf = STAILQ_FIRST(&txq->active);
3326 		if (bf == NULL) {
3327 			MWL_TXQ_UNLOCK(txq);
3328 			break;
3329 		}
3330 		ds = bf->bf_desc;
3331 		MWL_TXDESC_SYNC(txq, ds,
3332 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3333 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3334 			MWL_TXQ_UNLOCK(txq);
3335 			break;
3336 		}
3337 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3338 		MWL_TXQ_UNLOCK(txq);
3339 
3340 #ifdef MWL_DEBUG
3341 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3342 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3343 #endif
3344 		ni = bf->bf_node;
3345 		if (ni != NULL) {
3346 			status = le32toh(ds->Status);
3347 			int rate;
3348 			if (status & EAGLE_TXD_STATUS_OK) {
3349 				uint16_t Format = le16toh(ds->Format);
3350 				uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3351 				    EAGLE_TXD_ANTENNA);
3352 
3353 				sc->sc_stats.mst_ant_tx[txant]++;
3354 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3355 					sc->sc_stats.mst_tx_retries++;
3356 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3357 					sc->sc_stats.mst_tx_mretries++;
3358 				if (txq->qnum >= MWL_WME_AC_VO)
3359 					ic->ic_wme.wme_hipri_traffic++;
3360 				rate = _IEEE80211_MASKSHIFT(Format,
3361 				    EAGLE_TXD_RATE);
3362 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3363 					rate = mwl_cvtlegacyrix(rate);
3364 				} else
3365 					rate |= IEEE80211_RATE_MCS;
3366 				sc->sc_stats.mst_tx_rate = rate;
3367 				ieee80211_node_set_txrate_dot11rate(ni, rate);
3368 			} else {
3369 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3370 					sc->sc_stats.mst_tx_linkerror++;
3371 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3372 					sc->sc_stats.mst_tx_xretries++;
3373 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3374 					sc->sc_stats.mst_tx_aging++;
3375 				if (bf->bf_m->m_flags & M_FF)
3376 					sc->sc_stats.mst_ff_txerr++;
3377 			}
3378 			if (bf->bf_m->m_flags & M_TXCB)
3379 				/* XXX strip fw len in case header inspected */
3380 				m_adj(bf->bf_m, sizeof(uint16_t));
3381 			ieee80211_tx_complete(ni, bf->bf_m,
3382 			    (status & EAGLE_TXD_STATUS_OK) == 0);
3383 		} else
3384 			m_freem(bf->bf_m);
3385 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3386 
3387 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3388 		    BUS_DMASYNC_POSTWRITE);
3389 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3390 
3391 		mwl_puttxbuf_tail(txq, bf);
3392 	}
3393 	return nreaped;
3394 #undef EAGLE_TXD_STATUS_MCAST
3395 }
3396 
3397 /*
3398  * Deferred processing of transmit interrupt; special-cased
3399  * for four hardware queues, 0-3.
3400  */
3401 static void
mwl_tx_proc(void * arg,int npending)3402 mwl_tx_proc(void *arg, int npending)
3403 {
3404 	struct mwl_softc *sc = arg;
3405 	int nreaped;
3406 
3407 	/*
3408 	 * Process each active queue.
3409 	 */
3410 	nreaped = 0;
3411 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3412 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3413 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3414 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3415 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3416 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3417 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3418 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3419 
3420 	if (nreaped != 0) {
3421 		sc->sc_tx_timer = 0;
3422 		if (mbufq_first(&sc->sc_snd) != NULL) {
3423 			/* NB: kick fw; the tx thread may have been preempted */
3424 			mwl_hal_txstart(sc->sc_mh, 0);
3425 			mwl_start(sc);
3426 		}
3427 	}
3428 }
3429 
3430 static void
mwl_tx_draintxq(struct mwl_softc * sc,struct mwl_txq * txq)3431 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3432 {
3433 	struct ieee80211_node *ni;
3434 	struct mwl_txbuf *bf;
3435 	u_int ix __unused;
3436 
3437 	/*
3438 	 * NB: this assumes output has been stopped and
3439 	 *     we do not need to block mwl_tx_tasklet
3440 	 */
3441 	for (ix = 0;; ix++) {
3442 		MWL_TXQ_LOCK(txq);
3443 		bf = STAILQ_FIRST(&txq->active);
3444 		if (bf == NULL) {
3445 			MWL_TXQ_UNLOCK(txq);
3446 			break;
3447 		}
3448 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3449 		MWL_TXQ_UNLOCK(txq);
3450 #ifdef MWL_DEBUG
3451 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3452 			struct ieee80211com *ic = &sc->sc_ic;
3453 			const struct mwltxrec *tr =
3454 			    mtod(bf->bf_m, const struct mwltxrec *);
3455 			mwl_printtxbuf(bf, txq->qnum, ix);
3456 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3457 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3458 		}
3459 #endif /* MWL_DEBUG */
3460 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3461 		ni = bf->bf_node;
3462 		if (ni != NULL) {
3463 			/*
3464 			 * Reclaim node reference.
3465 			 */
3466 			ieee80211_free_node(ni);
3467 		}
3468 		m_freem(bf->bf_m);
3469 
3470 		mwl_puttxbuf_tail(txq, bf);
3471 	}
3472 }
3473 
3474 /*
3475  * Drain the transmit queues and reclaim resources.
3476  */
3477 static void
mwl_draintxq(struct mwl_softc * sc)3478 mwl_draintxq(struct mwl_softc *sc)
3479 {
3480 	int i;
3481 
3482 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3483 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3484 	sc->sc_tx_timer = 0;
3485 }
3486 
3487 #ifdef MWL_DIAGAPI
3488 /*
3489  * Reset the transmit queues to a pristine state after a fw download.
3490  */
3491 static void
mwl_resettxq(struct mwl_softc * sc)3492 mwl_resettxq(struct mwl_softc *sc)
3493 {
3494 	int i;
3495 
3496 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3497 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3498 }
3499 #endif /* MWL_DIAGAPI */
3500 
3501 /*
3502  * Clear the transmit queues of any frames submitted for the
3503  * specified vap.  This is done when the vap is deleted so we
3504  * don't potentially reference the vap after it is gone.
3505  * Note we cannot remove the frames; we only reclaim the node
3506  * reference.
3507  */
3508 static void
mwl_cleartxq(struct mwl_softc * sc,struct ieee80211vap * vap)3509 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3510 {
3511 	struct mwl_txq *txq;
3512 	struct mwl_txbuf *bf;
3513 	int i;
3514 
3515 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3516 		txq = &sc->sc_txq[i];
3517 		MWL_TXQ_LOCK(txq);
3518 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3519 			struct ieee80211_node *ni = bf->bf_node;
3520 			if (ni != NULL && ni->ni_vap == vap) {
3521 				bf->bf_node = NULL;
3522 				ieee80211_free_node(ni);
3523 			}
3524 		}
3525 		MWL_TXQ_UNLOCK(txq);
3526 	}
3527 }
3528 
3529 static int
mwl_recv_action(struct ieee80211_node * ni,const struct ieee80211_frame * wh,const uint8_t * frm,const uint8_t * efrm)3530 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3531 	const uint8_t *frm, const uint8_t *efrm)
3532 {
3533 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3534 	const struct ieee80211_action *ia;
3535 
3536 	ia = (const struct ieee80211_action *) frm;
3537 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3538 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3539 		const struct ieee80211_action_ht_mimopowersave *mps =
3540 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3541 
3542 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3543 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3544 		    _IEEE80211_MASKSHIFT(mps->am_control,
3545 			IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3546 		return 0;
3547 	} else
3548 		return sc->sc_recv_action(ni, wh, frm, efrm);
3549 }
3550 
3551 static int
mwl_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)3552 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3553 	int dialogtoken, int baparamset, int batimeout)
3554 {
3555 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3556 	struct ieee80211vap *vap = ni->ni_vap;
3557 	struct mwl_node *mn = MWL_NODE(ni);
3558 	struct mwl_bastate *bas;
3559 
3560 	bas = tap->txa_private;
3561 	if (bas == NULL) {
3562 		const MWL_HAL_BASTREAM *sp;
3563 		/*
3564 		 * Check for a free BA stream slot.
3565 		 */
3566 #if MWL_MAXBA > 3
3567 		if (mn->mn_ba[3].bastream == NULL)
3568 			bas = &mn->mn_ba[3];
3569 		else
3570 #endif
3571 #if MWL_MAXBA > 2
3572 		if (mn->mn_ba[2].bastream == NULL)
3573 			bas = &mn->mn_ba[2];
3574 		else
3575 #endif
3576 #if MWL_MAXBA > 1
3577 		if (mn->mn_ba[1].bastream == NULL)
3578 			bas = &mn->mn_ba[1];
3579 		else
3580 #endif
3581 #if MWL_MAXBA > 0
3582 		if (mn->mn_ba[0].bastream == NULL)
3583 			bas = &mn->mn_ba[0];
3584 		else
3585 #endif
3586 		{
3587 			/* sta already has max BA streams */
3588 			/* XXX assign BA stream to highest priority tid */
3589 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3590 			    "%s: already has max bastreams\n", __func__);
3591 			sc->sc_stats.mst_ampdu_reject++;
3592 			return 0;
3593 		}
3594 		/* NB: no held reference to ni */
3595 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3596 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3597 		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3598 		    ni, tap);
3599 		if (sp == NULL) {
3600 			/*
3601 			 * No available stream, return 0 so no
3602 			 * a-mpdu aggregation will be done.
3603 			 */
3604 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3605 			    "%s: no bastream available\n", __func__);
3606 			sc->sc_stats.mst_ampdu_nostream++;
3607 			return 0;
3608 		}
3609 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3610 		    __func__, sp);
3611 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3612 		bas->bastream = sp;
3613 		tap->txa_private = bas;
3614 	}
3615 	/* fetch current seq# from the firmware; if available */
3616 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3617 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3618 	    &tap->txa_start) != 0)
3619 		tap->txa_start = 0;
3620 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3621 }
3622 
3623 static int
mwl_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)3624 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3625 	int code, int baparamset, int batimeout)
3626 {
3627 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3628 	struct mwl_bastate *bas;
3629 
3630 	bas = tap->txa_private;
3631 	if (bas == NULL) {
3632 		/* XXX should not happen */
3633 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3634 		    "%s: no BA stream allocated, TID %d\n",
3635 		    __func__, tap->txa_tid);
3636 		sc->sc_stats.mst_addba_nostream++;
3637 		return 0;
3638 	}
3639 	if (code == IEEE80211_STATUS_SUCCESS) {
3640 		struct ieee80211vap *vap = ni->ni_vap;
3641 		int bufsiz, error;
3642 
3643 		/*
3644 		 * Tell the firmware to setup the BA stream;
3645 		 * we know resources are available because we
3646 		 * pre-allocated one before forming the request.
3647 		 */
3648 		bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3649 		if (bufsiz == 0)
3650 			bufsiz = IEEE80211_AGGR_BAWMAX;
3651 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3652 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3653 		if (error != 0) {
3654 			/*
3655 			 * Setup failed, return immediately so no a-mpdu
3656 			 * aggregation will be done.
3657 			 */
3658 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3659 			mwl_bastream_free(bas);
3660 			tap->txa_private = NULL;
3661 
3662 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3663 			    "%s: create failed, error %d, bufsiz %d TID %d "
3664 			    "htparam 0x%x\n", __func__, error, bufsiz,
3665 			    tap->txa_tid, ni->ni_htparam);
3666 			sc->sc_stats.mst_bacreate_failed++;
3667 			return 0;
3668 		}
3669 		/* NB: cache txq to avoid ptr indirect */
3670 		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3671 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3672 		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3673 		    "htparam 0x%x\n", __func__, bas->bastream,
3674 		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3675 	} else {
3676 		/*
3677 		 * Other side NAK'd us; return the resources.
3678 		 */
3679 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3680 		    "%s: request failed with code %d, destroy bastream %p\n",
3681 		    __func__, code, bas->bastream);
3682 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3683 		mwl_bastream_free(bas);
3684 		tap->txa_private = NULL;
3685 	}
3686 	/* NB: firmware sends BAR so we don't need to */
3687 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3688 }
3689 
3690 static void
mwl_addba_stop(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)3691 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3692 {
3693 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3694 	struct mwl_bastate *bas;
3695 
3696 	bas = tap->txa_private;
3697 	if (bas != NULL) {
3698 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3699 		    __func__, bas->bastream);
3700 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3701 		mwl_bastream_free(bas);
3702 		tap->txa_private = NULL;
3703 	}
3704 	sc->sc_addba_stop(ni, tap);
3705 }
3706 
3707 /*
3708  * Setup the rx data structures.  This should only be
3709  * done once or we may get out of sync with the firmware.
3710  */
3711 static int
mwl_startrecv(struct mwl_softc * sc)3712 mwl_startrecv(struct mwl_softc *sc)
3713 {
3714 	if (!sc->sc_recvsetup) {
3715 		struct mwl_rxbuf *bf, *prev;
3716 		struct mwl_rxdesc *ds;
3717 
3718 		prev = NULL;
3719 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3720 			int error = mwl_rxbuf_init(sc, bf);
3721 			if (error != 0) {
3722 				DPRINTF(sc, MWL_DEBUG_RECV,
3723 					"%s: mwl_rxbuf_init failed %d\n",
3724 					__func__, error);
3725 				return error;
3726 			}
3727 			if (prev != NULL) {
3728 				ds = prev->bf_desc;
3729 				ds->pPhysNext = htole32(bf->bf_daddr);
3730 			}
3731 			prev = bf;
3732 		}
3733 		if (prev != NULL) {
3734 			ds = prev->bf_desc;
3735 			ds->pPhysNext =
3736 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3737 		}
3738 		sc->sc_recvsetup = 1;
3739 	}
3740 	mwl_mode_init(sc);		/* set filters, etc. */
3741 	return 0;
3742 }
3743 
3744 static MWL_HAL_APMODE
mwl_getapmode(const struct ieee80211vap * vap,struct ieee80211_channel * chan)3745 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3746 {
3747 	MWL_HAL_APMODE mode;
3748 
3749 	if (IEEE80211_IS_CHAN_HT(chan)) {
3750 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3751 			mode = AP_MODE_N_ONLY;
3752 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3753 			mode = AP_MODE_AandN;
3754 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3755 			mode = AP_MODE_GandN;
3756 		else
3757 			mode = AP_MODE_BandGandN;
3758 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3759 		if (vap->iv_flags & IEEE80211_F_PUREG)
3760 			mode = AP_MODE_G_ONLY;
3761 		else
3762 			mode = AP_MODE_MIXED;
3763 	} else if (IEEE80211_IS_CHAN_B(chan))
3764 		mode = AP_MODE_B_ONLY;
3765 	else if (IEEE80211_IS_CHAN_A(chan))
3766 		mode = AP_MODE_A_ONLY;
3767 	else
3768 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3769 	return mode;
3770 }
3771 
3772 static int
mwl_setapmode(struct ieee80211vap * vap,struct ieee80211_channel * chan)3773 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3774 {
3775 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3776 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3777 }
3778 
3779 /*
3780  * Set/change channels.
3781  */
3782 static int
mwl_chan_set(struct mwl_softc * sc,struct ieee80211_channel * chan)3783 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3784 {
3785 	struct mwl_hal *mh = sc->sc_mh;
3786 	struct ieee80211com *ic = &sc->sc_ic;
3787 	MWL_HAL_CHANNEL hchan;
3788 	int maxtxpow;
3789 
3790 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3791 	    __func__, chan->ic_freq, chan->ic_flags);
3792 
3793 	/*
3794 	 * Convert to a HAL channel description with
3795 	 * the flags constrained to reflect the current
3796 	 * operating mode.
3797 	 */
3798 	mwl_mapchan(&hchan, chan);
3799 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3800 #if 0
3801 	mwl_draintxq(sc);		/* clear pending tx frames */
3802 #endif
3803 	mwl_hal_setchannel(mh, &hchan);
3804 	/*
3805 	 * Tx power is cap'd by the regulatory setting and
3806 	 * possibly a user-set limit.  We pass the min of
3807 	 * these to the hal to apply them to the cal data
3808 	 * for this channel.
3809 	 * XXX min bound?
3810 	 */
3811 	maxtxpow = 2*chan->ic_maxregpower;
3812 	if (maxtxpow > ic->ic_txpowlimit)
3813 		maxtxpow = ic->ic_txpowlimit;
3814 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3815 	/* NB: potentially change mcast/mgt rates */
3816 	mwl_setcurchanrates(sc);
3817 
3818 	/*
3819 	 * Update internal state.
3820 	 */
3821 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3822 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3823 	if (IEEE80211_IS_CHAN_A(chan)) {
3824 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3825 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3826 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3827 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3828 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3829 	} else {
3830 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3831 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3832 	}
3833 	sc->sc_curchan = hchan;
3834 	mwl_hal_intrset(mh, sc->sc_imask);
3835 
3836 	return 0;
3837 }
3838 
3839 static void
mwl_scan_start(struct ieee80211com * ic)3840 mwl_scan_start(struct ieee80211com *ic)
3841 {
3842 	struct mwl_softc *sc = ic->ic_softc;
3843 
3844 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3845 }
3846 
3847 static void
mwl_scan_end(struct ieee80211com * ic)3848 mwl_scan_end(struct ieee80211com *ic)
3849 {
3850 	struct mwl_softc *sc = ic->ic_softc;
3851 
3852 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3853 }
3854 
3855 static void
mwl_set_channel(struct ieee80211com * ic)3856 mwl_set_channel(struct ieee80211com *ic)
3857 {
3858 	struct mwl_softc *sc = ic->ic_softc;
3859 
3860 	(void) mwl_chan_set(sc, ic->ic_curchan);
3861 }
3862 
3863 /*
3864  * Handle a channel switch request.  We inform the firmware
3865  * and mark the global state to suppress various actions.
3866  * NB: we issue only one request to the fw; we may be called
3867  * multiple times if there are multiple vap's.
3868  */
3869 static void
mwl_startcsa(struct ieee80211vap * vap)3870 mwl_startcsa(struct ieee80211vap *vap)
3871 {
3872 	struct ieee80211com *ic = vap->iv_ic;
3873 	struct mwl_softc *sc = ic->ic_softc;
3874 	MWL_HAL_CHANNEL hchan;
3875 
3876 	if (sc->sc_csapending)
3877 		return;
3878 
3879 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3880 	/* 1 =>'s quiet channel */
3881 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3882 	sc->sc_csapending = 1;
3883 }
3884 
3885 /*
3886  * Plumb any static WEP key for the station.  This is
3887  * necessary as we must propagate the key from the
3888  * global key table of the vap to each sta db entry.
3889  */
3890 static void
mwl_setanywepkey(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])3891 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3892 {
3893 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3894 		IEEE80211_F_PRIVACY &&
3895 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3896 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3897 		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3898 				    mac);
3899 }
3900 
3901 static int
mwl_peerstadb(struct ieee80211_node * ni,int aid,int staid,MWL_HAL_PEERINFO * pi)3902 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3903 {
3904 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3905 	struct ieee80211vap *vap = ni->ni_vap;
3906 	struct mwl_hal_vap *hvap;
3907 	int error;
3908 
3909 	if (vap->iv_opmode == IEEE80211_M_WDS) {
3910 		/*
3911 		 * WDS vap's do not have a f/w vap; instead they piggyback
3912 		 * on an AP vap and we must install the sta db entry and
3913 		 * crypto state using that AP's handle (the WDS vap has none).
3914 		 */
3915 		hvap = MWL_VAP(vap)->mv_ap_hvap;
3916 	} else
3917 		hvap = MWL_VAP(vap)->mv_hvap;
3918 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3919 	    aid, staid, pi,
3920 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3921 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3922 	if (error == 0) {
3923 		/*
3924 		 * Setup security for this station.  For sta mode this is
3925 		 * needed even though do the same thing on transition to
3926 		 * AUTH state because the call to mwl_hal_newstation
3927 		 * clobbers the crypto state we setup.
3928 		 */
3929 		mwl_setanywepkey(vap, ni->ni_macaddr);
3930 	}
3931 	return error;
3932 #undef WME
3933 }
3934 
3935 static void
mwl_setglobalkeys(struct ieee80211vap * vap)3936 mwl_setglobalkeys(struct ieee80211vap *vap)
3937 {
3938 	struct ieee80211_key *wk;
3939 
3940 	wk = &vap->iv_nw_keys[0];
3941 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3942 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3943 			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3944 }
3945 
3946 /*
3947  * Convert a legacy rate set to a firmware bitmask.
3948  */
3949 static uint32_t
get_rate_bitmap(const struct ieee80211_rateset * rs)3950 get_rate_bitmap(const struct ieee80211_rateset *rs)
3951 {
3952 	uint32_t rates;
3953 	int i;
3954 
3955 	rates = 0;
3956 	for (i = 0; i < rs->rs_nrates; i++)
3957 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3958 		case 2:	  rates |= 0x001; break;
3959 		case 4:	  rates |= 0x002; break;
3960 		case 11:  rates |= 0x004; break;
3961 		case 22:  rates |= 0x008; break;
3962 		case 44:  rates |= 0x010; break;
3963 		case 12:  rates |= 0x020; break;
3964 		case 18:  rates |= 0x040; break;
3965 		case 24:  rates |= 0x080; break;
3966 		case 36:  rates |= 0x100; break;
3967 		case 48:  rates |= 0x200; break;
3968 		case 72:  rates |= 0x400; break;
3969 		case 96:  rates |= 0x800; break;
3970 		case 108: rates |= 0x1000; break;
3971 		}
3972 	return rates;
3973 }
3974 
3975 /*
3976  * Construct an HT firmware bitmask from an HT rate set.
3977  */
3978 static uint32_t
get_htrate_bitmap(const struct ieee80211_htrateset * rs)3979 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3980 {
3981 	uint32_t rates;
3982 	int i;
3983 
3984 	rates = 0;
3985 	for (i = 0; i < rs->rs_nrates; i++) {
3986 		if (rs->rs_rates[i] < 16)
3987 			rates |= 1<<rs->rs_rates[i];
3988 	}
3989 	return rates;
3990 }
3991 
3992 /*
3993  * Craft station database entry for station.
3994  * NB: use host byte order here, the hal handles byte swapping.
3995  */
3996 static MWL_HAL_PEERINFO *
mkpeerinfo(MWL_HAL_PEERINFO * pi,const struct ieee80211_node * ni)3997 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
3998 {
3999 	const struct ieee80211vap *vap = ni->ni_vap;
4000 
4001 	memset(pi, 0, sizeof(*pi));
4002 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4003 	pi->CapInfo = ni->ni_capinfo;
4004 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4005 		/* HT capabilities, etc */
4006 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4007 		/* XXX pi.HTCapabilitiesInfo */
4008 	        pi->MacHTParamInfo = ni->ni_htparam;
4009 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4010 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4011 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4012 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4013 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4014 
4015 		/* constrain according to local configuration */
4016 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4017 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4018 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4019 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4020 		if (ni->ni_chw != IEEE80211_STA_RX_BW_40)
4021 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4022 	}
4023 	return pi;
4024 }
4025 
4026 /*
4027  * Re-create the local sta db entry for a vap to ensure
4028  * up to date WME state is pushed to the firmware.  Because
4029  * this resets crypto state this must be followed by a
4030  * reload of any keys in the global key table.
4031  */
4032 static int
mwl_localstadb(struct ieee80211vap * vap)4033 mwl_localstadb(struct ieee80211vap *vap)
4034 {
4035 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4036 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4037 	struct ieee80211_node *bss;
4038 	MWL_HAL_PEERINFO pi;
4039 	int error;
4040 
4041 	switch (vap->iv_opmode) {
4042 	case IEEE80211_M_STA:
4043 		bss = vap->iv_bss;
4044 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4045 		    vap->iv_state == IEEE80211_S_RUN ?
4046 			mkpeerinfo(&pi, bss) : NULL,
4047 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4048 		    bss->ni_ies.wme_ie != NULL ?
4049 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4050 		if (error == 0)
4051 			mwl_setglobalkeys(vap);
4052 		break;
4053 	case IEEE80211_M_HOSTAP:
4054 	case IEEE80211_M_MBSS:
4055 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4056 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4057 		if (error == 0)
4058 			mwl_setglobalkeys(vap);
4059 		break;
4060 	default:
4061 		error = 0;
4062 		break;
4063 	}
4064 	return error;
4065 #undef WME
4066 }
4067 
4068 static int
mwl_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4069 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4070 {
4071 	struct mwl_vap *mvp = MWL_VAP(vap);
4072 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4073 	struct ieee80211com *ic = vap->iv_ic;
4074 	struct ieee80211_node *ni = NULL;
4075 	struct mwl_softc *sc = ic->ic_softc;
4076 	struct mwl_hal *mh = sc->sc_mh;
4077 	enum ieee80211_state ostate = vap->iv_state;
4078 	int error;
4079 
4080 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4081 	    if_name(vap->iv_ifp), __func__,
4082 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4083 
4084 	callout_stop(&sc->sc_timer);
4085 	/*
4086 	 * Clear current radar detection state.
4087 	 */
4088 	if (ostate == IEEE80211_S_CAC) {
4089 		/* stop quiet mode radar detection */
4090 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4091 	} else if (sc->sc_radarena) {
4092 		/* stop in-service radar detection */
4093 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4094 		sc->sc_radarena = 0;
4095 	}
4096 	/*
4097 	 * Carry out per-state actions before doing net80211 work.
4098 	 */
4099 	if (nstate == IEEE80211_S_INIT) {
4100 		/* NB: only ap+sta vap's have a fw entity */
4101 		if (hvap != NULL)
4102 			mwl_hal_stop(hvap);
4103 	} else if (nstate == IEEE80211_S_SCAN) {
4104 		mwl_hal_start(hvap);
4105 		/* NB: this disables beacon frames */
4106 		mwl_hal_setinframode(hvap);
4107 	} else if (nstate == IEEE80211_S_AUTH) {
4108 		/*
4109 		 * Must create a sta db entry in case a WEP key needs to
4110 		 * be plumbed.  This entry will be overwritten if we
4111 		 * associate; otherwise it will be reclaimed on node free.
4112 		 */
4113 		ni = vap->iv_bss;
4114 		MWL_NODE(ni)->mn_hvap = hvap;
4115 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4116 	} else if (nstate == IEEE80211_S_CSA) {
4117 		/* XXX move to below? */
4118 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4119 		    vap->iv_opmode == IEEE80211_M_MBSS)
4120 			mwl_startcsa(vap);
4121 	} else if (nstate == IEEE80211_S_CAC) {
4122 		/* XXX move to below? */
4123 		/* stop ap xmit and enable quiet mode radar detection */
4124 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4125 	}
4126 
4127 	/*
4128 	 * Invoke the parent method to do net80211 work.
4129 	 */
4130 	error = mvp->mv_newstate(vap, nstate, arg);
4131 
4132 	/*
4133 	 * Carry out work that must be done after net80211 runs;
4134 	 * this work requires up to date state (e.g. iv_bss).
4135 	 */
4136 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4137 		/* NB: collect bss node again, it may have changed */
4138 		ni = vap->iv_bss;
4139 
4140 		DPRINTF(sc, MWL_DEBUG_STATE,
4141 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4142 		    "capinfo 0x%04x chan %d\n",
4143 		    if_name(vap->iv_ifp), __func__, vap->iv_flags,
4144 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4145 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4146 
4147 		/*
4148 		 * Recreate local sta db entry to update WME/HT state.
4149 		 */
4150 		mwl_localstadb(vap);
4151 		switch (vap->iv_opmode) {
4152 		case IEEE80211_M_HOSTAP:
4153 		case IEEE80211_M_MBSS:
4154 			if (ostate == IEEE80211_S_CAC) {
4155 				/* enable in-service radar detection */
4156 				mwl_hal_setradardetection(mh,
4157 				    DR_IN_SERVICE_MONITOR_START);
4158 				sc->sc_radarena = 1;
4159 			}
4160 			/*
4161 			 * Allocate and setup the beacon frame
4162 			 * (and related state).
4163 			 */
4164 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4165 			if (error != 0) {
4166 				DPRINTF(sc, MWL_DEBUG_STATE,
4167 				    "%s: beacon setup failed, error %d\n",
4168 				    __func__, error);
4169 				goto bad;
4170 			}
4171 			/* NB: must be after setting up beacon */
4172 			mwl_hal_start(hvap);
4173 			break;
4174 		case IEEE80211_M_STA:
4175 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4176 			    if_name(vap->iv_ifp), __func__, ni->ni_associd);
4177 			/*
4178 			 * Set state now that we're associated.
4179 			 */
4180 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4181 			mwl_setrates(vap);
4182 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4183 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4184 			    sc->sc_ndwdsvaps++ == 0)
4185 				mwl_hal_setdwds(mh, 1);
4186 			break;
4187 		case IEEE80211_M_WDS:
4188 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4189 			    if_name(vap->iv_ifp), __func__,
4190 			    ether_sprintf(ni->ni_bssid));
4191 			mwl_seteapolformat(vap);
4192 			break;
4193 		default:
4194 			break;
4195 		}
4196 		/*
4197 		 * Set CS mode according to operating channel;
4198 		 * this mostly an optimization for 5GHz.
4199 		 *
4200 		 * NB: must follow mwl_hal_start which resets csmode
4201 		 */
4202 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4203 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4204 		else
4205 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4206 		/*
4207 		 * Start timer to prod firmware.
4208 		 */
4209 		if (sc->sc_ageinterval != 0)
4210 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4211 			    mwl_agestations, sc);
4212 	} else if (nstate == IEEE80211_S_SLEEP) {
4213 		/* XXX set chip in power save */
4214 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4215 	    --sc->sc_ndwdsvaps == 0)
4216 		mwl_hal_setdwds(mh, 0);
4217 bad:
4218 	return error;
4219 }
4220 
4221 /*
4222  * Manage station id's; these are separate from AID's
4223  * as AID's may have values out of the range of possible
4224  * station id's acceptable to the firmware.
4225  */
4226 static int
allocstaid(struct mwl_softc * sc,int aid)4227 allocstaid(struct mwl_softc *sc, int aid)
4228 {
4229 	int staid;
4230 
4231 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4232 		/* NB: don't use 0 */
4233 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4234 			if (isclr(sc->sc_staid, staid))
4235 				break;
4236 	} else
4237 		staid = aid;
4238 	setbit(sc->sc_staid, staid);
4239 	return staid;
4240 }
4241 
4242 static void
delstaid(struct mwl_softc * sc,int staid)4243 delstaid(struct mwl_softc *sc, int staid)
4244 {
4245 	clrbit(sc->sc_staid, staid);
4246 }
4247 
4248 /*
4249  * Setup driver-specific state for a newly associated node.
4250  * Note that we're called also on a re-associate, the isnew
4251  * param tells us if this is the first time or not.
4252  */
4253 static void
mwl_newassoc(struct ieee80211_node * ni,int isnew)4254 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4255 {
4256 	struct ieee80211vap *vap = ni->ni_vap;
4257         struct mwl_softc *sc = vap->iv_ic->ic_softc;
4258 	struct mwl_node *mn = MWL_NODE(ni);
4259 	MWL_HAL_PEERINFO pi;
4260 	uint16_t aid;
4261 	int error;
4262 
4263 	aid = IEEE80211_AID(ni->ni_associd);
4264 	if (isnew) {
4265 		mn->mn_staid = allocstaid(sc, aid);
4266 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4267 	} else {
4268 		mn = MWL_NODE(ni);
4269 		/* XXX reset BA stream? */
4270 	}
4271 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4272 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4273 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4274 	if (error != 0) {
4275 		DPRINTF(sc, MWL_DEBUG_NODE,
4276 		    "%s: error %d creating sta db entry\n",
4277 		    __func__, error);
4278 		/* XXX how to deal with error? */
4279 	}
4280 }
4281 
4282 /*
4283  * Periodically poke the firmware to age out station state
4284  * (power save queues, pending tx aggregates).
4285  */
4286 static void
mwl_agestations(void * arg)4287 mwl_agestations(void *arg)
4288 {
4289 	struct mwl_softc *sc = arg;
4290 
4291 	mwl_hal_setkeepalive(sc->sc_mh);
4292 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4293 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4294 }
4295 
4296 static const struct mwl_hal_channel *
findhalchannel(const MWL_HAL_CHANNELINFO * ci,int ieee)4297 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4298 {
4299 	int i;
4300 
4301 	for (i = 0; i < ci->nchannels; i++) {
4302 		const struct mwl_hal_channel *hc = &ci->channels[i];
4303 		if (hc->ieee == ieee)
4304 			return hc;
4305 	}
4306 	return NULL;
4307 }
4308 
4309 static int
mwl_setregdomain(struct ieee80211com * ic,struct ieee80211_regdomain * rd,int nchan,struct ieee80211_channel chans[])4310 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4311 	int nchan, struct ieee80211_channel chans[])
4312 {
4313 	struct mwl_softc *sc = ic->ic_softc;
4314 	struct mwl_hal *mh = sc->sc_mh;
4315 	const MWL_HAL_CHANNELINFO *ci;
4316 	int i;
4317 
4318 	for (i = 0; i < nchan; i++) {
4319 		struct ieee80211_channel *c = &chans[i];
4320 		const struct mwl_hal_channel *hc;
4321 
4322 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4323 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4324 			    IEEE80211_IS_CHAN_HT40(c) ?
4325 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4326 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4327 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4328 			    IEEE80211_IS_CHAN_HT40(c) ?
4329 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4330 		} else {
4331 			device_printf(sc->sc_dev,
4332 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4333 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4334 			return EINVAL;
4335 		}
4336 		/*
4337 		 * Verify channel has cal data and cap tx power.
4338 		 */
4339 		hc = findhalchannel(ci, c->ic_ieee);
4340 		if (hc != NULL) {
4341 			if (c->ic_maxpower > 2*hc->maxTxPow)
4342 				c->ic_maxpower = 2*hc->maxTxPow;
4343 			goto next;
4344 		}
4345 		if (IEEE80211_IS_CHAN_HT40(c)) {
4346 			/*
4347 			 * Look for the extension channel since the
4348 			 * hal table only has the primary channel.
4349 			 */
4350 			hc = findhalchannel(ci, c->ic_extieee);
4351 			if (hc != NULL) {
4352 				if (c->ic_maxpower > 2*hc->maxTxPow)
4353 					c->ic_maxpower = 2*hc->maxTxPow;
4354 				goto next;
4355 			}
4356 		}
4357 		device_printf(sc->sc_dev,
4358 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4359 		    __func__, c->ic_ieee, c->ic_extieee,
4360 		    c->ic_freq, c->ic_flags);
4361 		return EINVAL;
4362 	next:
4363 		;
4364 	}
4365 	return 0;
4366 }
4367 
4368 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4369 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4370 
4371 static void
addht40channels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,int flags)4372 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4373 	const MWL_HAL_CHANNELINFO *ci, int flags)
4374 {
4375 	int i, error;
4376 
4377 	for (i = 0; i < ci->nchannels; i++) {
4378 		const struct mwl_hal_channel *hc = &ci->channels[i];
4379 
4380 		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4381 		    hc->ieee, hc->maxTxPow, flags);
4382 		if (error != 0 && error != ENOENT)
4383 			break;
4384 	}
4385 }
4386 
4387 static void
addchannels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,const uint8_t bands[])4388 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4389 	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4390 {
4391 	int i, error;
4392 
4393 	error = 0;
4394 	for (i = 0; i < ci->nchannels && error == 0; i++) {
4395 		const struct mwl_hal_channel *hc = &ci->channels[i];
4396 
4397 		error = ieee80211_add_channel(chans, maxchans, nchans,
4398 		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4399 	}
4400 }
4401 
4402 static void
getchannels(struct mwl_softc * sc,int maxchans,int * nchans,struct ieee80211_channel chans[])4403 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4404 	struct ieee80211_channel chans[])
4405 {
4406 	const MWL_HAL_CHANNELINFO *ci;
4407 	uint8_t bands[IEEE80211_MODE_BYTES];
4408 
4409 	/*
4410 	 * Use the channel info from the hal to craft the
4411 	 * channel list.  Note that we pass back an unsorted
4412 	 * list; the caller is required to sort it for us
4413 	 * (if desired).
4414 	 */
4415 	*nchans = 0;
4416 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4417 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4418 		memset(bands, 0, sizeof(bands));
4419 		setbit(bands, IEEE80211_MODE_11B);
4420 		setbit(bands, IEEE80211_MODE_11G);
4421 		setbit(bands, IEEE80211_MODE_11NG);
4422 		addchannels(chans, maxchans, nchans, ci, bands);
4423 	}
4424 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4425 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4426 		memset(bands, 0, sizeof(bands));
4427 		setbit(bands, IEEE80211_MODE_11A);
4428 		setbit(bands, IEEE80211_MODE_11NA);
4429 		addchannels(chans, maxchans, nchans, ci, bands);
4430 	}
4431 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4432 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4433 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4434 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4435 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4436 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4437 }
4438 
4439 static void
mwl_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])4440 mwl_getradiocaps(struct ieee80211com *ic,
4441 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4442 {
4443 	struct mwl_softc *sc = ic->ic_softc;
4444 
4445 	getchannels(sc, maxchans, nchans, chans);
4446 }
4447 
4448 static int
mwl_getchannels(struct mwl_softc * sc)4449 mwl_getchannels(struct mwl_softc *sc)
4450 {
4451 	struct ieee80211com *ic = &sc->sc_ic;
4452 
4453 	/*
4454 	 * Use the channel info from the hal to craft the
4455 	 * channel list for net80211.  Note that we pass up
4456 	 * an unsorted list; net80211 will sort it for us.
4457 	 */
4458 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4459 	ic->ic_nchans = 0;
4460 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4461 
4462 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4463 	ic->ic_regdomain.country = CTRY_DEFAULT;
4464 	ic->ic_regdomain.location = 'I';
4465 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4466 	ic->ic_regdomain.isocc[1] = ' ';
4467 	return (ic->ic_nchans == 0 ? EIO : 0);
4468 }
4469 #undef IEEE80211_CHAN_HTA
4470 #undef IEEE80211_CHAN_HTG
4471 
4472 #ifdef MWL_DEBUG
4473 static void
mwl_printrxbuf(const struct mwl_rxbuf * bf,u_int ix)4474 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4475 {
4476 	const struct mwl_rxdesc *ds = bf->bf_desc;
4477 	uint32_t status = le32toh(ds->Status);
4478 
4479 	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4480 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4481 	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4482 	    le32toh(ds->pPhysBuffData), ds->RxControl,
4483 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4484 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4485 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4486 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4487 }
4488 
4489 static void
mwl_printtxbuf(const struct mwl_txbuf * bf,u_int qnum,u_int ix)4490 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4491 {
4492 	const struct mwl_txdesc *ds = bf->bf_desc;
4493 	uint32_t status = le32toh(ds->Status);
4494 
4495 	printf("Q%u[%3u]", qnum, ix);
4496 	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4497 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4498 	    le32toh(ds->pPhysNext),
4499 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4500 	    status & EAGLE_TXD_STATUS_USED ?
4501 		"" : (status & 3) != 0 ? " *" : " !");
4502 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4503 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4504 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4505 #if MWL_TXDESC > 1
4506 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4507 	    , le32toh(ds->multiframes)
4508 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4509 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4510 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4511 	);
4512 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4513 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4514 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4515 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4516 	);
4517 #endif
4518 #if 0
4519 { const uint8_t *cp = (const uint8_t *) ds;
4520   int i;
4521   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4522 	printf("%02x ", cp[i]);
4523 	if (((i+1) % 16) == 0)
4524 		printf("\n");
4525   }
4526   printf("\n");
4527 }
4528 #endif
4529 }
4530 #endif /* MWL_DEBUG */
4531 
4532 #if 0
4533 static void
4534 mwl_txq_dump(struct mwl_txq *txq)
4535 {
4536 	struct mwl_txbuf *bf;
4537 	int i = 0;
4538 
4539 	MWL_TXQ_LOCK(txq);
4540 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4541 		struct mwl_txdesc *ds = bf->bf_desc;
4542 		MWL_TXDESC_SYNC(txq, ds,
4543 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4544 #ifdef MWL_DEBUG
4545 		mwl_printtxbuf(bf, txq->qnum, i);
4546 #endif
4547 		i++;
4548 	}
4549 	MWL_TXQ_UNLOCK(txq);
4550 }
4551 #endif
4552 
4553 static void
mwl_watchdog(void * arg)4554 mwl_watchdog(void *arg)
4555 {
4556 	struct mwl_softc *sc = arg;
4557 
4558 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4559 	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4560 		return;
4561 
4562 	if (sc->sc_running && !sc->sc_invalid) {
4563 		if (mwl_hal_setkeepalive(sc->sc_mh))
4564 			device_printf(sc->sc_dev,
4565 			    "transmit timeout (firmware hung?)\n");
4566 		else
4567 			device_printf(sc->sc_dev,
4568 			    "transmit timeout\n");
4569 #if 0
4570 		mwl_reset(sc);
4571 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4572 #endif
4573 		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4574 		sc->sc_stats.mst_watchdog++;
4575 	}
4576 }
4577 
4578 #ifdef MWL_DIAGAPI
4579 /*
4580  * Diagnostic interface to the HAL.  This is used by various
4581  * tools to do things like retrieve register contents for
4582  * debugging.  The mechanism is intentionally opaque so that
4583  * it can change frequently w/o concern for compatibility.
4584  */
4585 static int
mwl_ioctl_diag(struct mwl_softc * sc,struct mwl_diag * md)4586 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4587 {
4588 	struct mwl_hal *mh = sc->sc_mh;
4589 	u_int id = md->md_id & MWL_DIAG_ID;
4590 	void *indata = NULL;
4591 	void *outdata = NULL;
4592 	u_int32_t insize = md->md_in_size;
4593 	u_int32_t outsize = md->md_out_size;
4594 	int error = 0;
4595 
4596 	if (md->md_id & MWL_DIAG_IN) {
4597 		/*
4598 		 * Copy in data.
4599 		 */
4600 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4601 		if (indata == NULL) {
4602 			error = ENOMEM;
4603 			goto bad;
4604 		}
4605 		error = copyin(md->md_in_data, indata, insize);
4606 		if (error)
4607 			goto bad;
4608 	}
4609 	if (md->md_id & MWL_DIAG_DYN) {
4610 		/*
4611 		 * Allocate a buffer for the results (otherwise the HAL
4612 		 * returns a pointer to a buffer where we can read the
4613 		 * results).  Note that we depend on the HAL leaving this
4614 		 * pointer for us to use below in reclaiming the buffer;
4615 		 * may want to be more defensive.
4616 		 */
4617 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4618 		if (outdata == NULL) {
4619 			error = ENOMEM;
4620 			goto bad;
4621 		}
4622 	}
4623 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4624 		if (outsize < md->md_out_size)
4625 			md->md_out_size = outsize;
4626 		if (outdata != NULL)
4627 			error = copyout(outdata, md->md_out_data,
4628 					md->md_out_size);
4629 	} else {
4630 		error = EINVAL;
4631 	}
4632 bad:
4633 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4634 		free(indata, M_TEMP);
4635 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4636 		free(outdata, M_TEMP);
4637 	return error;
4638 }
4639 
4640 static int
mwl_ioctl_reset(struct mwl_softc * sc,struct mwl_diag * md)4641 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4642 {
4643 	struct mwl_hal *mh = sc->sc_mh;
4644 	int error;
4645 
4646 	MWL_LOCK_ASSERT(sc);
4647 
4648 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4649 		device_printf(sc->sc_dev, "unable to load firmware\n");
4650 		return EIO;
4651 	}
4652 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4653 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4654 		return EIO;
4655 	}
4656 	error = mwl_setupdma(sc);
4657 	if (error != 0) {
4658 		/* NB: mwl_setupdma prints a msg */
4659 		return error;
4660 	}
4661 	/*
4662 	 * Reset tx/rx data structures; after reload we must
4663 	 * re-start the driver's notion of the next xmit/recv.
4664 	 */
4665 	mwl_draintxq(sc);		/* clear pending frames */
4666 	mwl_resettxq(sc);		/* rebuild tx q lists */
4667 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4668 	return 0;
4669 }
4670 #endif /* MWL_DIAGAPI */
4671 
4672 static void
mwl_parent(struct ieee80211com * ic)4673 mwl_parent(struct ieee80211com *ic)
4674 {
4675 	struct mwl_softc *sc = ic->ic_softc;
4676 	int startall = 0;
4677 
4678 	MWL_LOCK(sc);
4679 	if (ic->ic_nrunning > 0) {
4680 		if (sc->sc_running) {
4681 			/*
4682 			 * To avoid rescanning another access point,
4683 			 * do not call mwl_init() here.  Instead,
4684 			 * only reflect promisc mode settings.
4685 			 */
4686 			mwl_mode_init(sc);
4687 		} else {
4688 			/*
4689 			 * Beware of being called during attach/detach
4690 			 * to reset promiscuous mode.  In that case we
4691 			 * will still be marked UP but not RUNNING.
4692 			 * However trying to re-init the interface
4693 			 * is the wrong thing to do as we've already
4694 			 * torn down much of our state.  There's
4695 			 * probably a better way to deal with this.
4696 			 */
4697 			if (!sc->sc_invalid) {
4698 				mwl_init(sc);	/* XXX lose error */
4699 				startall = 1;
4700 			}
4701 		}
4702 	} else
4703 		mwl_stop(sc);
4704 	MWL_UNLOCK(sc);
4705 	if (startall)
4706 		ieee80211_start_all(ic);
4707 }
4708 
4709 static int
mwl_ioctl(struct ieee80211com * ic,u_long cmd,void * data)4710 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4711 {
4712 	struct mwl_softc *sc = ic->ic_softc;
4713 	struct ifreq *ifr = data;
4714 	int error = 0;
4715 
4716 	switch (cmd) {
4717 	case SIOCGMVSTATS:
4718 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4719 #if 0
4720 		/* NB: embed these numbers to get a consistent view */
4721 		sc->sc_stats.mst_tx_packets =
4722 		    if_get_counter(ifp, IFCOUNTER_OPACKETS);
4723 		sc->sc_stats.mst_rx_packets =
4724 		    if_get_counter(ifp, IFCOUNTER_IPACKETS);
4725 #endif
4726 		/*
4727 		 * NB: Drop the softc lock in case of a page fault;
4728 		 * we'll accept any potential inconsisentcy in the
4729 		 * statistics.  The alternative is to copy the data
4730 		 * to a local structure.
4731 		 */
4732 		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4733 		    sizeof (sc->sc_stats)));
4734 #ifdef MWL_DIAGAPI
4735 	case SIOCGMVDIAG:
4736 		/* XXX check privs */
4737 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4738 	case SIOCGMVRESET:
4739 		/* XXX check privs */
4740 		MWL_LOCK(sc);
4741 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4742 		MWL_UNLOCK(sc);
4743 		break;
4744 #endif /* MWL_DIAGAPI */
4745 	default:
4746 		error = ENOTTY;
4747 		break;
4748 	}
4749 	return (error);
4750 }
4751 
4752 #ifdef	MWL_DEBUG
4753 static int
mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)4754 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4755 {
4756 	struct mwl_softc *sc = arg1;
4757 	int debug, error;
4758 
4759 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4760 	error = sysctl_handle_int(oidp, &debug, 0, req);
4761 	if (error || !req->newptr)
4762 		return error;
4763 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4764 	sc->sc_debug = debug & 0x00ffffff;
4765 	return 0;
4766 }
4767 #endif /* MWL_DEBUG */
4768 
4769 static void
mwl_sysctlattach(struct mwl_softc * sc)4770 mwl_sysctlattach(struct mwl_softc *sc)
4771 {
4772 #ifdef	MWL_DEBUG
4773 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4774 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4775 
4776 	sc->sc_debug = mwl_debug;
4777 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
4778 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
4779 	    mwl_sysctl_debug, "I", "control debugging printfs");
4780 #endif
4781 }
4782 
4783 /*
4784  * Announce various information on device/driver attach.
4785  */
4786 static void
mwl_announce(struct mwl_softc * sc)4787 mwl_announce(struct mwl_softc *sc)
4788 {
4789 
4790 	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4791 		sc->sc_hwspecs.hwVersion,
4792 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4793 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4794 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4795 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4796 		sc->sc_hwspecs.regionCode);
4797 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4798 
4799 	if (bootverbose) {
4800 		int i;
4801 		for (i = 0; i <= WME_AC_VO; i++) {
4802 			struct mwl_txq *txq = sc->sc_ac2q[i];
4803 			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4804 				txq->qnum, ieee80211_wme_acnames[i]);
4805 		}
4806 	}
4807 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4808 		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4809 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4810 		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4811 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4812 		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4813 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4814 		device_printf(sc->sc_dev, "multi-bss support\n");
4815 #ifdef MWL_TX_NODROP
4816 	if (bootverbose)
4817 		device_printf(sc->sc_dev, "no tx drop\n");
4818 #endif
4819 }
4820