1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33 #include <sys/cdefs.h>
34 /*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 #include "opt_wlan.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
54 #include <sys/bus.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
58
59 #include <machine/bus.h>
60
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
69
70 #include <net/bpf.h>
71
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_input.h>
74 #include <net80211/ieee80211_regdomain.h>
75
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #endif /* INET */
80
81 #include <dev/mwl/if_mwlvar.h>
82 #include <dev/mwl/mwldiag.h>
83
84 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
85 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
86 const uint8_t [IEEE80211_ADDR_LEN],
87 const uint8_t [IEEE80211_ADDR_LEN]);
88 static void mwl_vap_delete(struct ieee80211vap *);
89 static int mwl_setupdma(struct mwl_softc *);
90 static int mwl_hal_reset(struct mwl_softc *sc);
91 static int mwl_init(struct mwl_softc *);
92 static void mwl_parent(struct ieee80211com *);
93 static int mwl_reset(struct ieee80211vap *, u_long);
94 static void mwl_stop(struct mwl_softc *);
95 static void mwl_start(struct mwl_softc *);
96 static int mwl_transmit(struct ieee80211com *, struct mbuf *);
97 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
98 const struct ieee80211_bpf_params *);
99 static int mwl_media_change(if_t);
100 static void mwl_watchdog(void *);
101 static int mwl_ioctl(struct ieee80211com *, u_long, void *);
102 static void mwl_radar_proc(void *, int);
103 static void mwl_chanswitch_proc(void *, int);
104 static void mwl_bawatchdog_proc(void *, int);
105 static int mwl_key_alloc(struct ieee80211vap *,
106 struct ieee80211_key *,
107 ieee80211_keyix *, ieee80211_keyix *);
108 static int mwl_key_delete(struct ieee80211vap *,
109 const struct ieee80211_key *);
110 static int mwl_key_set(struct ieee80211vap *,
111 const struct ieee80211_key *);
112 static int _mwl_key_set(struct ieee80211vap *,
113 const struct ieee80211_key *,
114 const uint8_t mac[IEEE80211_ADDR_LEN]);
115 static int mwl_mode_init(struct mwl_softc *);
116 static void mwl_update_mcast(struct ieee80211com *);
117 static void mwl_update_promisc(struct ieee80211com *);
118 static void mwl_updateslot(struct ieee80211com *);
119 static int mwl_beacon_setup(struct ieee80211vap *);
120 static void mwl_beacon_update(struct ieee80211vap *, int);
121 #ifdef MWL_HOST_PS_SUPPORT
122 static void mwl_update_ps(struct ieee80211vap *, int);
123 static int mwl_set_tim(struct ieee80211_node *, int);
124 #endif
125 static int mwl_dma_setup(struct mwl_softc *);
126 static void mwl_dma_cleanup(struct mwl_softc *);
127 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128 const uint8_t [IEEE80211_ADDR_LEN]);
129 static void mwl_node_cleanup(struct ieee80211_node *);
130 static void mwl_node_drain(struct ieee80211_node *);
131 static void mwl_node_getsignal(const struct ieee80211_node *,
132 int8_t *, int8_t *);
133 static void mwl_node_getmimoinfo(const struct ieee80211_node *,
134 struct ieee80211_mimo_info *);
135 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136 static void mwl_rx_proc(void *, int);
137 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138 static int mwl_tx_setup(struct mwl_softc *, int, int);
139 static int mwl_wme_update(struct ieee80211com *);
140 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141 static void mwl_tx_cleanup(struct mwl_softc *);
142 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144 struct mwl_txbuf *, struct mbuf *);
145 static void mwl_tx_proc(void *, int);
146 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147 static void mwl_draintxq(struct mwl_softc *);
148 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149 static int mwl_recv_action(struct ieee80211_node *,
150 const struct ieee80211_frame *,
151 const uint8_t *, const uint8_t *);
152 static int mwl_addba_request(struct ieee80211_node *,
153 struct ieee80211_tx_ampdu *, int dialogtoken,
154 int baparamset, int batimeout);
155 static int mwl_addba_response(struct ieee80211_node *,
156 struct ieee80211_tx_ampdu *, int status,
157 int baparamset, int batimeout);
158 static void mwl_addba_stop(struct ieee80211_node *,
159 struct ieee80211_tx_ampdu *);
160 static int mwl_startrecv(struct mwl_softc *);
161 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162 struct ieee80211_channel *);
163 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164 static void mwl_scan_start(struct ieee80211com *);
165 static void mwl_scan_end(struct ieee80211com *);
166 static void mwl_set_channel(struct ieee80211com *);
167 static int mwl_peerstadb(struct ieee80211_node *,
168 int aid, int staid, MWL_HAL_PEERINFO *pi);
169 static int mwl_localstadb(struct ieee80211vap *);
170 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171 static int allocstaid(struct mwl_softc *sc, int aid);
172 static void delstaid(struct mwl_softc *sc, int staid);
173 static void mwl_newassoc(struct ieee80211_node *, int);
174 static void mwl_agestations(void *);
175 static int mwl_setregdomain(struct ieee80211com *,
176 struct ieee80211_regdomain *, int,
177 struct ieee80211_channel []);
178 static void mwl_getradiocaps(struct ieee80211com *, int, int *,
179 struct ieee80211_channel []);
180 static int mwl_getchannels(struct mwl_softc *);
181
182 static void mwl_sysctlattach(struct mwl_softc *);
183 static void mwl_announce(struct mwl_softc *);
184
185 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
186 "Marvell driver parameters");
187
188 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
189 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
190 0, "rx descriptors allocated");
191 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
192 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
193 0, "rx buffers allocated");
194 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
195 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
196 0, "tx buffers allocated");
197 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
198 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
199 0, "tx buffers to send at once");
200 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
202 0, "max rx buffers to process per interrupt");
203 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
204 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
205 0, "min free rx buffers before restarting traffic");
206
207 #ifdef MWL_DEBUG
208 static int mwl_debug = 0;
209 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
210 0, "control debugging printfs");
211 enum {
212 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
213 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
214 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
215 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
216 MWL_DEBUG_RESET = 0x00000010, /* reset processing */
217 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
218 MWL_DEBUG_INTR = 0x00000040, /* ISR */
219 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
220 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
221 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
222 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
223 MWL_DEBUG_NODE = 0x00000800, /* node management */
224 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
225 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
226 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
227 MWL_DEBUG_ANY = 0xffffffff
228 };
229 #define IFF_DUMPPKTS_RECV(sc, wh) \
230 ((sc->sc_debug & MWL_DEBUG_RECV) && \
231 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh)))
232 #define IFF_DUMPPKTS_XMIT(sc) \
233 (sc->sc_debug & MWL_DEBUG_XMIT)
234
235 #define DPRINTF(sc, m, fmt, ...) do { \
236 if (sc->sc_debug & (m)) \
237 printf(fmt, __VA_ARGS__); \
238 } while (0)
239 #define KEYPRINTF(sc, hk, mac) do { \
240 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
241 mwl_keyprint(sc, __func__, hk, mac); \
242 } while (0)
243 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
244 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
245 #else
246 #define IFF_DUMPPKTS_RECV(sc, wh) 0
247 #define IFF_DUMPPKTS_XMIT(sc) 0
248 #define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0)
249 #define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0)
250 #endif
251
252 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
253
254 /*
255 * Each packet has fixed front matter: a 2-byte length
256 * of the payload, followed by a 4-address 802.11 header
257 * (regardless of the actual header and always w/o any
258 * QoS header). The payload then follows.
259 */
260 struct mwltxrec {
261 uint16_t fwlen;
262 struct ieee80211_frame_addr4 wh;
263 } __packed;
264
265 /*
266 * Read/Write shorthands for accesses to BAR 0. Note
267 * that all BAR 1 operations are done in the "hal" and
268 * there should be no reference to them here.
269 */
270 #ifdef MWL_DEBUG
271 static __inline uint32_t
RD4(struct mwl_softc * sc,bus_size_t off)272 RD4(struct mwl_softc *sc, bus_size_t off)
273 {
274 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
275 }
276 #endif
277
278 static __inline void
WR4(struct mwl_softc * sc,bus_size_t off,uint32_t val)279 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
280 {
281 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
282 }
283
284 int
mwl_attach(uint16_t devid,struct mwl_softc * sc)285 mwl_attach(uint16_t devid, struct mwl_softc *sc)
286 {
287 struct ieee80211com *ic = &sc->sc_ic;
288 struct mwl_hal *mh;
289 int error = 0;
290
291 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
292
293 /*
294 * Setup the RX free list lock early, so it can be consistently
295 * removed.
296 */
297 MWL_RXFREE_INIT(sc);
298
299 mh = mwl_hal_attach(sc->sc_dev, devid,
300 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
301 if (mh == NULL) {
302 device_printf(sc->sc_dev, "unable to attach HAL\n");
303 error = EIO;
304 goto bad;
305 }
306 sc->sc_mh = mh;
307 /*
308 * Load firmware so we can get setup. We arbitrarily
309 * pick station firmware; we'll re-load firmware as
310 * needed so setting up the wrong mode isn't a big deal.
311 */
312 if (mwl_hal_fwload(mh, NULL) != 0) {
313 device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
314 error = EIO;
315 goto bad1;
316 }
317 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
318 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
319 error = EIO;
320 goto bad1;
321 }
322 error = mwl_getchannels(sc);
323 if (error != 0)
324 goto bad1;
325
326 sc->sc_txantenna = 0; /* h/w default */
327 sc->sc_rxantenna = 0; /* h/w default */
328 sc->sc_invalid = 0; /* ready to go, enable int handling */
329 sc->sc_ageinterval = MWL_AGEINTERVAL;
330
331 /*
332 * Allocate tx+rx descriptors and populate the lists.
333 * We immediately push the information to the firmware
334 * as otherwise it gets upset.
335 */
336 error = mwl_dma_setup(sc);
337 if (error != 0) {
338 device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
339 error);
340 goto bad1;
341 }
342 error = mwl_setupdma(sc); /* push to firmware */
343 if (error != 0) /* NB: mwl_setupdma prints msg */
344 goto bad1;
345
346 callout_init(&sc->sc_timer, 1);
347 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
348 mbufq_init(&sc->sc_snd, ifqmaxlen);
349
350 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
351 taskqueue_thread_enqueue, &sc->sc_tq);
352 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
353 "%s taskq", device_get_nameunit(sc->sc_dev));
354
355 NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
356 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
357 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
358 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
359
360 /* NB: insure BK queue is the lowest priority h/w queue */
361 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
362 device_printf(sc->sc_dev,
363 "unable to setup xmit queue for %s traffic!\n",
364 ieee80211_wme_acnames[WME_AC_BK]);
365 error = EIO;
366 goto bad2;
367 }
368 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
369 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
370 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
371 /*
372 * Not enough hardware tx queues to properly do WME;
373 * just punt and assign them all to the same h/w queue.
374 * We could do a better job of this if, for example,
375 * we allocate queues when we switch from station to
376 * AP mode.
377 */
378 if (sc->sc_ac2q[WME_AC_VI] != NULL)
379 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
380 if (sc->sc_ac2q[WME_AC_BE] != NULL)
381 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
382 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
383 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
384 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
385 }
386 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
387
388 ic->ic_softc = sc;
389 ic->ic_name = device_get_nameunit(sc->sc_dev);
390 /* XXX not right but it's not used anywhere important */
391 ic->ic_phytype = IEEE80211_T_OFDM;
392 ic->ic_opmode = IEEE80211_M_STA;
393 ic->ic_caps =
394 IEEE80211_C_STA /* station mode supported */
395 | IEEE80211_C_HOSTAP /* hostap mode */
396 | IEEE80211_C_MONITOR /* monitor mode */
397 #if 0
398 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
399 | IEEE80211_C_AHDEMO /* adhoc demo mode */
400 #endif
401 | IEEE80211_C_MBSS /* mesh point link mode */
402 | IEEE80211_C_WDS /* WDS supported */
403 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
404 | IEEE80211_C_SHSLOT /* short slot time supported */
405 | IEEE80211_C_WME /* WME/WMM supported */
406 | IEEE80211_C_BURST /* xmit bursting supported */
407 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
408 | IEEE80211_C_BGSCAN /* capable of bg scanning */
409 | IEEE80211_C_TXFRAG /* handle tx frags */
410 | IEEE80211_C_TXPMGT /* capable of txpow mgt */
411 | IEEE80211_C_DFS /* DFS supported */
412 ;
413
414 ic->ic_htcaps =
415 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
416 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
417 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
418 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
419 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
420 #if MWL_AGGR_SIZE == 7935
421 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
422 #else
423 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
424 #endif
425 #if 0
426 | IEEE80211_HTCAP_PSMP /* PSMP supported */
427 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
428 #endif
429 /* s/w capabilities */
430 | IEEE80211_HTC_HT /* HT operation */
431 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
432 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
433 | IEEE80211_HTC_SMPS /* SMPS available */
434 ;
435
436 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
437
438 /*
439 * Mark h/w crypto support.
440 * XXX no way to query h/w support.
441 */
442 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
443 | IEEE80211_CRYPTO_AES_CCM
444 | IEEE80211_CRYPTO_TKIP
445 | IEEE80211_CRYPTO_TKIPMIC
446 ;
447 /*
448 * Transmit requires space in the packet for a special
449 * format transmit record and optional padding between
450 * this record and the payload. Ask the net80211 layer
451 * to arrange this when encapsulating packets so we can
452 * add it efficiently.
453 */
454 ic->ic_headroom = sizeof(struct mwltxrec) -
455 sizeof(struct ieee80211_frame);
456
457 IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
458
459 /* call MI attach routine. */
460 ieee80211_ifattach(ic);
461 ic->ic_setregdomain = mwl_setregdomain;
462 ic->ic_getradiocaps = mwl_getradiocaps;
463 /* override default methods */
464 ic->ic_raw_xmit = mwl_raw_xmit;
465 ic->ic_newassoc = mwl_newassoc;
466 ic->ic_updateslot = mwl_updateslot;
467 ic->ic_update_mcast = mwl_update_mcast;
468 ic->ic_update_promisc = mwl_update_promisc;
469 ic->ic_wme.wme_update = mwl_wme_update;
470 ic->ic_transmit = mwl_transmit;
471 ic->ic_ioctl = mwl_ioctl;
472 ic->ic_parent = mwl_parent;
473
474 ic->ic_node_alloc = mwl_node_alloc;
475 sc->sc_node_cleanup = ic->ic_node_cleanup;
476 ic->ic_node_cleanup = mwl_node_cleanup;
477 sc->sc_node_drain = ic->ic_node_drain;
478 ic->ic_node_drain = mwl_node_drain;
479 ic->ic_node_getsignal = mwl_node_getsignal;
480 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
481
482 ic->ic_scan_start = mwl_scan_start;
483 ic->ic_scan_end = mwl_scan_end;
484 ic->ic_set_channel = mwl_set_channel;
485
486 sc->sc_recv_action = ic->ic_recv_action;
487 ic->ic_recv_action = mwl_recv_action;
488 sc->sc_addba_request = ic->ic_addba_request;
489 ic->ic_addba_request = mwl_addba_request;
490 sc->sc_addba_response = ic->ic_addba_response;
491 ic->ic_addba_response = mwl_addba_response;
492 sc->sc_addba_stop = ic->ic_addba_stop;
493 ic->ic_addba_stop = mwl_addba_stop;
494
495 ic->ic_vap_create = mwl_vap_create;
496 ic->ic_vap_delete = mwl_vap_delete;
497
498 ieee80211_radiotap_attach(ic,
499 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
500 MWL_TX_RADIOTAP_PRESENT,
501 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
502 MWL_RX_RADIOTAP_PRESENT);
503 /*
504 * Setup dynamic sysctl's now that country code and
505 * regdomain are available from the hal.
506 */
507 mwl_sysctlattach(sc);
508
509 if (bootverbose)
510 ieee80211_announce(ic);
511 mwl_announce(sc);
512 return 0;
513 bad2:
514 mwl_dma_cleanup(sc);
515 bad1:
516 mwl_hal_detach(mh);
517 bad:
518 MWL_RXFREE_DESTROY(sc);
519 sc->sc_invalid = 1;
520 return error;
521 }
522
523 int
mwl_detach(struct mwl_softc * sc)524 mwl_detach(struct mwl_softc *sc)
525 {
526 struct ieee80211com *ic = &sc->sc_ic;
527
528 MWL_LOCK(sc);
529 mwl_stop(sc);
530 MWL_UNLOCK(sc);
531 /*
532 * NB: the order of these is important:
533 * o call the 802.11 layer before detaching the hal to
534 * insure callbacks into the driver to delete global
535 * key cache entries can be handled
536 * o reclaim the tx queue data structures after calling
537 * the 802.11 layer as we'll get called back to reclaim
538 * node state and potentially want to use them
539 * o to cleanup the tx queues the hal is called, so detach
540 * it last
541 * Other than that, it's straightforward...
542 */
543 ieee80211_ifdetach(ic);
544 callout_drain(&sc->sc_watchdog);
545 mwl_dma_cleanup(sc);
546 MWL_RXFREE_DESTROY(sc);
547 mwl_tx_cleanup(sc);
548 mwl_hal_detach(sc->sc_mh);
549 mbufq_drain(&sc->sc_snd);
550
551 return 0;
552 }
553
554 /*
555 * MAC address handling for multiple BSS on the same radio.
556 * The first vap uses the MAC address from the EEPROM. For
557 * subsequent vap's we set the U/L bit (bit 1) in the MAC
558 * address and use the next six bits as an index.
559 */
560 static void
assign_address(struct mwl_softc * sc,uint8_t mac[IEEE80211_ADDR_LEN],int clone)561 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
562 {
563 int i;
564
565 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
566 /* NB: we only do this if h/w supports multiple bssid */
567 for (i = 0; i < 32; i++)
568 if ((sc->sc_bssidmask & (1<<i)) == 0)
569 break;
570 if (i != 0)
571 mac[0] |= (i << 2)|0x2;
572 } else
573 i = 0;
574 sc->sc_bssidmask |= 1<<i;
575 if (i == 0)
576 sc->sc_nbssid0++;
577 }
578
579 static void
reclaim_address(struct mwl_softc * sc,const uint8_t mac[IEEE80211_ADDR_LEN])580 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
581 {
582 int i = mac[0] >> 2;
583 if (i != 0 || --sc->sc_nbssid0 == 0)
584 sc->sc_bssidmask &= ~(1<<i);
585 }
586
587 static struct ieee80211vap *
mwl_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac0[IEEE80211_ADDR_LEN])588 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
589 enum ieee80211_opmode opmode, int flags,
590 const uint8_t bssid[IEEE80211_ADDR_LEN],
591 const uint8_t mac0[IEEE80211_ADDR_LEN])
592 {
593 struct mwl_softc *sc = ic->ic_softc;
594 struct mwl_hal *mh = sc->sc_mh;
595 struct ieee80211vap *vap, *apvap;
596 struct mwl_hal_vap *hvap;
597 struct mwl_vap *mvp;
598 uint8_t mac[IEEE80211_ADDR_LEN];
599
600 IEEE80211_ADDR_COPY(mac, mac0);
601 switch (opmode) {
602 case IEEE80211_M_HOSTAP:
603 case IEEE80211_M_MBSS:
604 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
605 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
606 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
607 if (hvap == NULL) {
608 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
609 reclaim_address(sc, mac);
610 return NULL;
611 }
612 break;
613 case IEEE80211_M_STA:
614 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
615 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
616 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
617 if (hvap == NULL) {
618 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
619 reclaim_address(sc, mac);
620 return NULL;
621 }
622 /* no h/w beacon miss support; always use s/w */
623 flags |= IEEE80211_CLONE_NOBEACONS;
624 break;
625 case IEEE80211_M_WDS:
626 hvap = NULL; /* NB: we use associated AP vap */
627 if (sc->sc_napvaps == 0)
628 return NULL; /* no existing AP vap */
629 break;
630 case IEEE80211_M_MONITOR:
631 hvap = NULL;
632 break;
633 case IEEE80211_M_IBSS:
634 case IEEE80211_M_AHDEMO:
635 default:
636 return NULL;
637 }
638
639 mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
640 mvp->mv_hvap = hvap;
641 if (opmode == IEEE80211_M_WDS) {
642 /*
643 * WDS vaps must have an associated AP vap; find one.
644 * XXX not right.
645 */
646 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
647 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
648 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
649 break;
650 }
651 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
652 }
653 vap = &mvp->mv_vap;
654 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
655 /* override with driver methods */
656 mvp->mv_newstate = vap->iv_newstate;
657 vap->iv_newstate = mwl_newstate;
658 vap->iv_max_keyix = 0; /* XXX */
659 vap->iv_key_alloc = mwl_key_alloc;
660 vap->iv_key_delete = mwl_key_delete;
661 vap->iv_key_set = mwl_key_set;
662 #ifdef MWL_HOST_PS_SUPPORT
663 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
664 vap->iv_update_ps = mwl_update_ps;
665 mvp->mv_set_tim = vap->iv_set_tim;
666 vap->iv_set_tim = mwl_set_tim;
667 }
668 #endif
669 vap->iv_reset = mwl_reset;
670 vap->iv_update_beacon = mwl_beacon_update;
671
672 /* override max aid so sta's cannot assoc when we're out of sta id's */
673 vap->iv_max_aid = MWL_MAXSTAID;
674 /* override default A-MPDU rx parameters */
675 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
676 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
677
678 /* complete setup */
679 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
680 mac);
681
682 switch (vap->iv_opmode) {
683 case IEEE80211_M_HOSTAP:
684 case IEEE80211_M_MBSS:
685 case IEEE80211_M_STA:
686 /*
687 * Setup sta db entry for local address.
688 */
689 mwl_localstadb(vap);
690 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
691 vap->iv_opmode == IEEE80211_M_MBSS)
692 sc->sc_napvaps++;
693 else
694 sc->sc_nstavaps++;
695 break;
696 case IEEE80211_M_WDS:
697 sc->sc_nwdsvaps++;
698 break;
699 default:
700 break;
701 }
702 /*
703 * Setup overall operating mode.
704 */
705 if (sc->sc_napvaps)
706 ic->ic_opmode = IEEE80211_M_HOSTAP;
707 else if (sc->sc_nstavaps)
708 ic->ic_opmode = IEEE80211_M_STA;
709 else
710 ic->ic_opmode = opmode;
711
712 return vap;
713 }
714
715 static void
mwl_vap_delete(struct ieee80211vap * vap)716 mwl_vap_delete(struct ieee80211vap *vap)
717 {
718 struct mwl_vap *mvp = MWL_VAP(vap);
719 struct mwl_softc *sc = vap->iv_ic->ic_softc;
720 struct mwl_hal *mh = sc->sc_mh;
721 struct mwl_hal_vap *hvap = mvp->mv_hvap;
722 enum ieee80211_opmode opmode = vap->iv_opmode;
723
724 /* XXX disallow ap vap delete if WDS still present */
725 if (sc->sc_running) {
726 /* quiesce h/w while we remove the vap */
727 mwl_hal_intrset(mh, 0); /* disable interrupts */
728 }
729 ieee80211_vap_detach(vap);
730 switch (opmode) {
731 case IEEE80211_M_HOSTAP:
732 case IEEE80211_M_MBSS:
733 case IEEE80211_M_STA:
734 KASSERT(hvap != NULL, ("no hal vap handle"));
735 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
736 mwl_hal_delvap(hvap);
737 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
738 sc->sc_napvaps--;
739 else
740 sc->sc_nstavaps--;
741 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
742 reclaim_address(sc, vap->iv_myaddr);
743 break;
744 case IEEE80211_M_WDS:
745 sc->sc_nwdsvaps--;
746 break;
747 default:
748 break;
749 }
750 mwl_cleartxq(sc, vap);
751 free(mvp, M_80211_VAP);
752 if (sc->sc_running)
753 mwl_hal_intrset(mh, sc->sc_imask);
754 }
755
756 void
mwl_suspend(struct mwl_softc * sc)757 mwl_suspend(struct mwl_softc *sc)
758 {
759
760 MWL_LOCK(sc);
761 mwl_stop(sc);
762 MWL_UNLOCK(sc);
763 }
764
765 void
mwl_resume(struct mwl_softc * sc)766 mwl_resume(struct mwl_softc *sc)
767 {
768 int error = EDOOFUS;
769
770 MWL_LOCK(sc);
771 if (sc->sc_ic.ic_nrunning > 0)
772 error = mwl_init(sc);
773 MWL_UNLOCK(sc);
774
775 if (error == 0)
776 ieee80211_start_all(&sc->sc_ic); /* start all vap's */
777 }
778
779 void
mwl_shutdown(void * arg)780 mwl_shutdown(void *arg)
781 {
782 struct mwl_softc *sc = arg;
783
784 MWL_LOCK(sc);
785 mwl_stop(sc);
786 MWL_UNLOCK(sc);
787 }
788
789 /*
790 * Interrupt handler. Most of the actual processing is deferred.
791 */
792 void
mwl_intr(void * arg)793 mwl_intr(void *arg)
794 {
795 struct mwl_softc *sc = arg;
796 struct mwl_hal *mh = sc->sc_mh;
797 uint32_t status;
798
799 if (sc->sc_invalid) {
800 /*
801 * The hardware is not ready/present, don't touch anything.
802 * Note this can happen early on if the IRQ is shared.
803 */
804 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
805 return;
806 }
807 /*
808 * Figure out the reason(s) for the interrupt.
809 */
810 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
811 if (status == 0) /* must be a shared irq */
812 return;
813
814 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
815 __func__, status, sc->sc_imask);
816 if (status & MACREG_A2HRIC_BIT_RX_RDY)
817 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
818 if (status & MACREG_A2HRIC_BIT_TX_DONE)
819 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
820 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
821 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
822 if (status & MACREG_A2HRIC_BIT_OPC_DONE)
823 mwl_hal_cmddone(mh);
824 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
825 ;
826 }
827 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
828 /* TKIP ICV error */
829 sc->sc_stats.mst_rx_badtkipicv++;
830 }
831 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
832 /* 11n aggregation queue is empty, re-fill */
833 ;
834 }
835 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
836 ;
837 }
838 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
839 /* radar detected, process event */
840 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
841 }
842 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
843 /* DFS channel switch */
844 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
845 }
846 }
847
848 static void
mwl_radar_proc(void * arg,int pending)849 mwl_radar_proc(void *arg, int pending)
850 {
851 struct mwl_softc *sc = arg;
852 struct ieee80211com *ic = &sc->sc_ic;
853
854 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
855 __func__, pending);
856
857 sc->sc_stats.mst_radardetect++;
858 /* XXX stop h/w BA streams? */
859
860 IEEE80211_LOCK(ic);
861 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
862 IEEE80211_UNLOCK(ic);
863 }
864
865 static void
mwl_chanswitch_proc(void * arg,int pending)866 mwl_chanswitch_proc(void *arg, int pending)
867 {
868 struct mwl_softc *sc = arg;
869 struct ieee80211com *ic = &sc->sc_ic;
870
871 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
872 __func__, pending);
873
874 IEEE80211_LOCK(ic);
875 sc->sc_csapending = 0;
876 ieee80211_csa_completeswitch(ic);
877 IEEE80211_UNLOCK(ic);
878 }
879
880 static void
mwl_bawatchdog(const MWL_HAL_BASTREAM * sp)881 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
882 {
883 struct ieee80211_node *ni = sp->data[0];
884
885 /* send DELBA and drop the stream */
886 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
887 }
888
889 static void
mwl_bawatchdog_proc(void * arg,int pending)890 mwl_bawatchdog_proc(void *arg, int pending)
891 {
892 struct mwl_softc *sc = arg;
893 struct mwl_hal *mh = sc->sc_mh;
894 const MWL_HAL_BASTREAM *sp;
895 uint8_t bitmap, n;
896
897 sc->sc_stats.mst_bawatchdog++;
898
899 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
900 DPRINTF(sc, MWL_DEBUG_AMPDU,
901 "%s: could not get bitmap\n", __func__);
902 sc->sc_stats.mst_bawatchdog_failed++;
903 return;
904 }
905 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
906 if (bitmap == 0xff) {
907 n = 0;
908 /* disable all ba streams */
909 for (bitmap = 0; bitmap < 8; bitmap++) {
910 sp = mwl_hal_bastream_lookup(mh, bitmap);
911 if (sp != NULL) {
912 mwl_bawatchdog(sp);
913 n++;
914 }
915 }
916 if (n == 0) {
917 DPRINTF(sc, MWL_DEBUG_AMPDU,
918 "%s: no BA streams found\n", __func__);
919 sc->sc_stats.mst_bawatchdog_empty++;
920 }
921 } else if (bitmap != 0xaa) {
922 /* disable a single ba stream */
923 sp = mwl_hal_bastream_lookup(mh, bitmap);
924 if (sp != NULL) {
925 mwl_bawatchdog(sp);
926 } else {
927 DPRINTF(sc, MWL_DEBUG_AMPDU,
928 "%s: no BA stream %d\n", __func__, bitmap);
929 sc->sc_stats.mst_bawatchdog_notfound++;
930 }
931 }
932 }
933
934 /*
935 * Convert net80211 channel to a HAL channel.
936 */
937 static void
mwl_mapchan(MWL_HAL_CHANNEL * hc,const struct ieee80211_channel * chan)938 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
939 {
940 hc->channel = chan->ic_ieee;
941
942 *(uint32_t *)&hc->channelFlags = 0;
943 if (IEEE80211_IS_CHAN_2GHZ(chan))
944 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
945 else if (IEEE80211_IS_CHAN_5GHZ(chan))
946 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
947 if (IEEE80211_IS_CHAN_HT40(chan)) {
948 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
949 if (IEEE80211_IS_CHAN_HT40U(chan))
950 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
951 else
952 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
953 } else
954 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
955 /* XXX 10MHz channels */
956 }
957
958 /*
959 * Inform firmware of our tx/rx dma setup. The BAR 0
960 * writes below are for compatibility with older firmware.
961 * For current firmware we send this information with a
962 * cmd block via mwl_hal_sethwdma.
963 */
964 static int
mwl_setupdma(struct mwl_softc * sc)965 mwl_setupdma(struct mwl_softc *sc)
966 {
967 int error, i;
968
969 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
970 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
971 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
972
973 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
974 struct mwl_txq *txq = &sc->sc_txq[i];
975 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
976 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
977 }
978 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
979 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
980
981 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
982 if (error != 0) {
983 device_printf(sc->sc_dev,
984 "unable to setup tx/rx dma; hal status %u\n", error);
985 /* XXX */
986 }
987 return error;
988 }
989
990 /*
991 * Inform firmware of tx rate parameters.
992 * Called after a channel change.
993 */
994 static int
mwl_setcurchanrates(struct mwl_softc * sc)995 mwl_setcurchanrates(struct mwl_softc *sc)
996 {
997 struct ieee80211com *ic = &sc->sc_ic;
998 const struct ieee80211_rateset *rs;
999 MWL_HAL_TXRATE rates;
1000
1001 memset(&rates, 0, sizeof(rates));
1002 rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1003 /* rate used to send management frames */
1004 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1005 /* rate used to send multicast frames */
1006 rates.McastRate = rates.MgtRate;
1007
1008 return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1009 }
1010
1011 /*
1012 * Inform firmware of tx rate parameters. Called whenever
1013 * user-settable params change and after a channel change.
1014 */
1015 static int
mwl_setrates(struct ieee80211vap * vap)1016 mwl_setrates(struct ieee80211vap *vap)
1017 {
1018 struct mwl_vap *mvp = MWL_VAP(vap);
1019 struct ieee80211_node *ni = vap->iv_bss;
1020 const struct ieee80211_txparam *tp = ni->ni_txparms;
1021 MWL_HAL_TXRATE rates;
1022
1023 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1024
1025 /*
1026 * Update the h/w rate map.
1027 * NB: 0x80 for MCS is passed through unchanged
1028 */
1029 memset(&rates, 0, sizeof(rates));
1030 /* rate used to send management frames */
1031 rates.MgtRate = tp->mgmtrate;
1032 /* rate used to send multicast frames */
1033 rates.McastRate = tp->mcastrate;
1034
1035 /* while here calculate EAPOL fixed rate cookie */
1036 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1037
1038 return mwl_hal_settxrate(mvp->mv_hvap,
1039 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1040 RATE_FIXED : RATE_AUTO, &rates);
1041 }
1042
1043 /*
1044 * Setup a fixed xmit rate cookie for EAPOL frames.
1045 */
1046 static void
mwl_seteapolformat(struct ieee80211vap * vap)1047 mwl_seteapolformat(struct ieee80211vap *vap)
1048 {
1049 struct mwl_vap *mvp = MWL_VAP(vap);
1050 struct ieee80211_node *ni = vap->iv_bss;
1051 enum ieee80211_phymode mode;
1052 uint8_t rate;
1053
1054 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1055
1056 mode = ieee80211_chan2mode(ni->ni_chan);
1057 /*
1058 * Use legacy rates when operating a mixed HT+non-HT bss.
1059 * NB: this may violate POLA for sta and wds vap's.
1060 */
1061 if (mode == IEEE80211_MODE_11NA &&
1062 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1063 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1064 else if (mode == IEEE80211_MODE_11NG &&
1065 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1066 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1067 else
1068 rate = vap->iv_txparms[mode].mgmtrate;
1069
1070 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1071 }
1072
1073 /*
1074 * Map SKU+country code to region code for radar bin'ing.
1075 */
1076 static int
mwl_map2regioncode(const struct ieee80211_regdomain * rd)1077 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1078 {
1079 switch (rd->regdomain) {
1080 case SKU_FCC:
1081 case SKU_FCC3:
1082 return DOMAIN_CODE_FCC;
1083 case SKU_CA:
1084 return DOMAIN_CODE_IC;
1085 case SKU_ETSI:
1086 case SKU_ETSI2:
1087 case SKU_ETSI3:
1088 if (rd->country == CTRY_SPAIN)
1089 return DOMAIN_CODE_SPAIN;
1090 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1091 return DOMAIN_CODE_FRANCE;
1092 /* XXX force 1.3.1 radar type */
1093 return DOMAIN_CODE_ETSI_131;
1094 case SKU_JAPAN:
1095 return DOMAIN_CODE_MKK;
1096 case SKU_ROW:
1097 return DOMAIN_CODE_DGT; /* Taiwan */
1098 case SKU_APAC:
1099 case SKU_APAC2:
1100 case SKU_APAC3:
1101 return DOMAIN_CODE_AUS; /* Australia */
1102 }
1103 /* XXX KOREA? */
1104 return DOMAIN_CODE_FCC; /* XXX? */
1105 }
1106
1107 static int
mwl_hal_reset(struct mwl_softc * sc)1108 mwl_hal_reset(struct mwl_softc *sc)
1109 {
1110 struct ieee80211com *ic = &sc->sc_ic;
1111 struct mwl_hal *mh = sc->sc_mh;
1112
1113 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1114 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1115 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1116 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1117 mwl_chan_set(sc, ic->ic_curchan);
1118 /* NB: RF/RA performance tuned for indoor mode */
1119 mwl_hal_setrateadaptmode(mh, 0);
1120 mwl_hal_setoptimizationlevel(mh,
1121 (ic->ic_flags & IEEE80211_F_BURST) != 0);
1122
1123 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1124
1125 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
1126 mwl_hal_setcfend(mh, 0); /* XXX */
1127
1128 return 1;
1129 }
1130
1131 static int
mwl_init(struct mwl_softc * sc)1132 mwl_init(struct mwl_softc *sc)
1133 {
1134 struct mwl_hal *mh = sc->sc_mh;
1135 int error = 0;
1136
1137 MWL_LOCK_ASSERT(sc);
1138
1139 /*
1140 * Stop anything previously setup. This is safe
1141 * whether this is the first time through or not.
1142 */
1143 mwl_stop(sc);
1144
1145 /*
1146 * Push vap-independent state to the firmware.
1147 */
1148 if (!mwl_hal_reset(sc)) {
1149 device_printf(sc->sc_dev, "unable to reset hardware\n");
1150 return EIO;
1151 }
1152
1153 /*
1154 * Setup recv (once); transmit is already good to go.
1155 */
1156 error = mwl_startrecv(sc);
1157 if (error != 0) {
1158 device_printf(sc->sc_dev, "unable to start recv logic\n");
1159 return error;
1160 }
1161
1162 /*
1163 * Enable interrupts.
1164 */
1165 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1166 | MACREG_A2HRIC_BIT_TX_DONE
1167 | MACREG_A2HRIC_BIT_OPC_DONE
1168 #if 0
1169 | MACREG_A2HRIC_BIT_MAC_EVENT
1170 #endif
1171 | MACREG_A2HRIC_BIT_ICV_ERROR
1172 | MACREG_A2HRIC_BIT_RADAR_DETECT
1173 | MACREG_A2HRIC_BIT_CHAN_SWITCH
1174 #if 0
1175 | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1176 #endif
1177 | MACREG_A2HRIC_BIT_BA_WATCHDOG
1178 | MACREQ_A2HRIC_BIT_TX_ACK
1179 ;
1180
1181 sc->sc_running = 1;
1182 mwl_hal_intrset(mh, sc->sc_imask);
1183 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1184
1185 return 0;
1186 }
1187
1188 static void
mwl_stop(struct mwl_softc * sc)1189 mwl_stop(struct mwl_softc *sc)
1190 {
1191
1192 MWL_LOCK_ASSERT(sc);
1193 if (sc->sc_running) {
1194 /*
1195 * Shutdown the hardware and driver.
1196 */
1197 sc->sc_running = 0;
1198 callout_stop(&sc->sc_watchdog);
1199 sc->sc_tx_timer = 0;
1200 mwl_draintxq(sc);
1201 }
1202 }
1203
1204 static int
mwl_reset_vap(struct ieee80211vap * vap,int state)1205 mwl_reset_vap(struct ieee80211vap *vap, int state)
1206 {
1207 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1208 struct ieee80211com *ic = vap->iv_ic;
1209
1210 if (state == IEEE80211_S_RUN)
1211 mwl_setrates(vap);
1212 /* XXX off by 1? */
1213 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1214 /* XXX auto? 20/40 split? */
1215 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1216 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1217 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1218 HTPROTECT_NONE : HTPROTECT_AUTO);
1219 /* XXX txpower cap */
1220
1221 /* re-setup beacons */
1222 if (state == IEEE80211_S_RUN &&
1223 (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1224 vap->iv_opmode == IEEE80211_M_MBSS ||
1225 vap->iv_opmode == IEEE80211_M_IBSS)) {
1226 mwl_setapmode(vap, vap->iv_bss->ni_chan);
1227 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1228 ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1229 return mwl_beacon_setup(vap);
1230 }
1231 return 0;
1232 }
1233
1234 /*
1235 * Reset the hardware w/o losing operational state.
1236 * Used to reset or reload hardware state for a vap.
1237 */
1238 static int
mwl_reset(struct ieee80211vap * vap,u_long cmd)1239 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1240 {
1241 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1242 int error = 0;
1243
1244 if (hvap != NULL) { /* WDS, MONITOR, etc. */
1245 struct ieee80211com *ic = vap->iv_ic;
1246 struct mwl_softc *sc = ic->ic_softc;
1247 struct mwl_hal *mh = sc->sc_mh;
1248
1249 /* XXX handle DWDS sta vap change */
1250 /* XXX do we need to disable interrupts? */
1251 mwl_hal_intrset(mh, 0); /* disable interrupts */
1252 error = mwl_reset_vap(vap, vap->iv_state);
1253 mwl_hal_intrset(mh, sc->sc_imask);
1254 }
1255 return error;
1256 }
1257
1258 /*
1259 * Allocate a tx buffer for sending a frame. The
1260 * packet is assumed to have the WME AC stored so
1261 * we can use it to select the appropriate h/w queue.
1262 */
1263 static struct mwl_txbuf *
mwl_gettxbuf(struct mwl_softc * sc,struct mwl_txq * txq)1264 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1265 {
1266 struct mwl_txbuf *bf;
1267
1268 /*
1269 * Grab a TX buffer and associated resources.
1270 */
1271 MWL_TXQ_LOCK(txq);
1272 bf = STAILQ_FIRST(&txq->free);
1273 if (bf != NULL) {
1274 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1275 txq->nfree--;
1276 }
1277 MWL_TXQ_UNLOCK(txq);
1278 if (bf == NULL)
1279 DPRINTF(sc, MWL_DEBUG_XMIT,
1280 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1281 return bf;
1282 }
1283
1284 /*
1285 * Return a tx buffer to the queue it came from. Note there
1286 * are two cases because we must preserve the order of buffers
1287 * as it reflects the fixed order of descriptors in memory
1288 * (the firmware pre-fetches descriptors so we cannot reorder).
1289 */
1290 static void
mwl_puttxbuf_head(struct mwl_txq * txq,struct mwl_txbuf * bf)1291 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1292 {
1293 bf->bf_m = NULL;
1294 bf->bf_node = NULL;
1295 MWL_TXQ_LOCK(txq);
1296 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1297 txq->nfree++;
1298 MWL_TXQ_UNLOCK(txq);
1299 }
1300
1301 static void
mwl_puttxbuf_tail(struct mwl_txq * txq,struct mwl_txbuf * bf)1302 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1303 {
1304 bf->bf_m = NULL;
1305 bf->bf_node = NULL;
1306 MWL_TXQ_LOCK(txq);
1307 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1308 txq->nfree++;
1309 MWL_TXQ_UNLOCK(txq);
1310 }
1311
1312 static int
mwl_transmit(struct ieee80211com * ic,struct mbuf * m)1313 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1314 {
1315 struct mwl_softc *sc = ic->ic_softc;
1316 int error;
1317
1318 MWL_LOCK(sc);
1319 if (!sc->sc_running) {
1320 MWL_UNLOCK(sc);
1321 return (ENXIO);
1322 }
1323 error = mbufq_enqueue(&sc->sc_snd, m);
1324 if (error) {
1325 MWL_UNLOCK(sc);
1326 return (error);
1327 }
1328 mwl_start(sc);
1329 MWL_UNLOCK(sc);
1330 return (0);
1331 }
1332
1333 static void
mwl_start(struct mwl_softc * sc)1334 mwl_start(struct mwl_softc *sc)
1335 {
1336 struct ieee80211_node *ni;
1337 struct mwl_txbuf *bf;
1338 struct mbuf *m;
1339 struct mwl_txq *txq = NULL; /* XXX silence gcc */
1340 int nqueued;
1341
1342 MWL_LOCK_ASSERT(sc);
1343 if (!sc->sc_running || sc->sc_invalid)
1344 return;
1345 nqueued = 0;
1346 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1347 /*
1348 * Grab the node for the destination.
1349 */
1350 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1351 KASSERT(ni != NULL, ("no node"));
1352 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
1353 /*
1354 * Grab a TX buffer and associated resources.
1355 * We honor the classification by the 802.11 layer.
1356 */
1357 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1358 bf = mwl_gettxbuf(sc, txq);
1359 if (bf == NULL) {
1360 m_freem(m);
1361 ieee80211_free_node(ni);
1362 #ifdef MWL_TX_NODROP
1363 sc->sc_stats.mst_tx_qstop++;
1364 break;
1365 #else
1366 DPRINTF(sc, MWL_DEBUG_XMIT,
1367 "%s: tail drop on q %d\n", __func__, txq->qnum);
1368 sc->sc_stats.mst_tx_qdrop++;
1369 continue;
1370 #endif /* MWL_TX_NODROP */
1371 }
1372
1373 /*
1374 * Pass the frame to the h/w for transmission.
1375 */
1376 if (mwl_tx_start(sc, ni, bf, m)) {
1377 if_inc_counter(ni->ni_vap->iv_ifp,
1378 IFCOUNTER_OERRORS, 1);
1379 mwl_puttxbuf_head(txq, bf);
1380 ieee80211_free_node(ni);
1381 continue;
1382 }
1383 nqueued++;
1384 if (nqueued >= mwl_txcoalesce) {
1385 /*
1386 * Poke the firmware to process queued frames;
1387 * see below about (lack of) locking.
1388 */
1389 nqueued = 0;
1390 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1391 }
1392 }
1393 if (nqueued) {
1394 /*
1395 * NB: We don't need to lock against tx done because
1396 * this just prods the firmware to check the transmit
1397 * descriptors. The firmware will also start fetching
1398 * descriptors by itself if it notices new ones are
1399 * present when it goes to deliver a tx done interrupt
1400 * to the host. So if we race with tx done processing
1401 * it's ok. Delivering the kick here rather than in
1402 * mwl_tx_start is an optimization to avoid poking the
1403 * firmware for each packet.
1404 *
1405 * NB: the queue id isn't used so 0 is ok.
1406 */
1407 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1408 }
1409 }
1410
1411 static int
mwl_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)1412 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1413 const struct ieee80211_bpf_params *params)
1414 {
1415 struct ieee80211com *ic = ni->ni_ic;
1416 struct mwl_softc *sc = ic->ic_softc;
1417 struct mwl_txbuf *bf;
1418 struct mwl_txq *txq;
1419
1420 if (!sc->sc_running || sc->sc_invalid) {
1421 m_freem(m);
1422 return ENETDOWN;
1423 }
1424 /*
1425 * Grab a TX buffer and associated resources.
1426 * Note that we depend on the classification
1427 * by the 802.11 layer to get to the right h/w
1428 * queue. Management frames must ALWAYS go on
1429 * queue 1 but we cannot just force that here
1430 * because we may receive non-mgt frames.
1431 */
1432 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1433 bf = mwl_gettxbuf(sc, txq);
1434 if (bf == NULL) {
1435 sc->sc_stats.mst_tx_qstop++;
1436 m_freem(m);
1437 return ENOBUFS;
1438 }
1439 /*
1440 * Pass the frame to the h/w for transmission.
1441 */
1442 if (mwl_tx_start(sc, ni, bf, m)) {
1443 mwl_puttxbuf_head(txq, bf);
1444
1445 return EIO; /* XXX */
1446 }
1447 /*
1448 * NB: We don't need to lock against tx done because
1449 * this just prods the firmware to check the transmit
1450 * descriptors. The firmware will also start fetching
1451 * descriptors by itself if it notices new ones are
1452 * present when it goes to deliver a tx done interrupt
1453 * to the host. So if we race with tx done processing
1454 * it's ok. Delivering the kick here rather than in
1455 * mwl_tx_start is an optimization to avoid poking the
1456 * firmware for each packet.
1457 *
1458 * NB: the queue id isn't used so 0 is ok.
1459 */
1460 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1461 return 0;
1462 }
1463
1464 static int
mwl_media_change(if_t ifp)1465 mwl_media_change(if_t ifp)
1466 {
1467 struct ieee80211vap *vap;
1468 int error;
1469
1470 /* NB: only the fixed rate can change and that doesn't need a reset */
1471 error = ieee80211_media_change(ifp);
1472 if (error != 0)
1473 return (error);
1474
1475 vap = if_getsoftc(ifp);
1476 mwl_setrates(vap);
1477 return (0);
1478 }
1479
1480 #ifdef MWL_DEBUG
1481 static void
mwl_keyprint(struct mwl_softc * sc,const char * tag,const MWL_HAL_KEYVAL * hk,const uint8_t mac[IEEE80211_ADDR_LEN])1482 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1483 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1484 {
1485 static const char *ciphers[] = {
1486 "WEP",
1487 "TKIP",
1488 "AES-CCM",
1489 };
1490 int i, n;
1491
1492 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1493 for (i = 0, n = hk->keyLen; i < n; i++)
1494 printf(" %02x", hk->key.aes[i]);
1495 printf(" mac %s", ether_sprintf(mac));
1496 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1497 printf(" %s", "rxmic");
1498 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1499 printf(" %02x", hk->key.tkip.rxMic[i]);
1500 printf(" txmic");
1501 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1502 printf(" %02x", hk->key.tkip.txMic[i]);
1503 }
1504 printf(" flags 0x%x\n", hk->keyFlags);
1505 }
1506 #endif
1507
1508 /*
1509 * Allocate a key cache slot for a unicast key. The
1510 * firmware handles key allocation and every station is
1511 * guaranteed key space so we are always successful.
1512 */
1513 static int
mwl_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)1514 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1515 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1516 {
1517 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1518
1519 if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1520 (k->wk_flags & IEEE80211_KEY_GROUP)) {
1521 if (!ieee80211_is_key_global(vap, k)) {
1522 /* should not happen */
1523 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1524 "%s: bogus group key\n", __func__);
1525 return 0;
1526 }
1527 /* give the caller what they requested */
1528 *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1529 } else {
1530 /*
1531 * Firmware handles key allocation.
1532 */
1533 *keyix = *rxkeyix = 0;
1534 }
1535 return 1;
1536 }
1537
1538 /*
1539 * Delete a key entry allocated by mwl_key_alloc.
1540 */
1541 static int
mwl_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)1542 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1543 {
1544 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1545 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1546 MWL_HAL_KEYVAL hk;
1547 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1548 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1549
1550 if (hvap == NULL) {
1551 if (vap->iv_opmode != IEEE80211_M_WDS) {
1552 /* XXX monitor mode? */
1553 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1554 "%s: no hvap for opmode %d\n", __func__,
1555 vap->iv_opmode);
1556 return 0;
1557 }
1558 hvap = MWL_VAP(vap)->mv_ap_hvap;
1559 }
1560
1561 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1562 __func__, k->wk_keyix);
1563
1564 memset(&hk, 0, sizeof(hk));
1565 hk.keyIndex = k->wk_keyix;
1566 switch (k->wk_cipher->ic_cipher) {
1567 case IEEE80211_CIPHER_WEP:
1568 hk.keyTypeId = KEY_TYPE_ID_WEP;
1569 break;
1570 case IEEE80211_CIPHER_TKIP:
1571 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1572 break;
1573 case IEEE80211_CIPHER_AES_CCM:
1574 hk.keyTypeId = KEY_TYPE_ID_AES;
1575 break;
1576 default:
1577 /* XXX should not happen */
1578 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1579 __func__, k->wk_cipher->ic_cipher);
1580 return 0;
1581 }
1582 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
1583 }
1584
1585 static __inline int
addgroupflags(MWL_HAL_KEYVAL * hk,const struct ieee80211_key * k)1586 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1587 {
1588 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1589 if (k->wk_flags & IEEE80211_KEY_XMIT)
1590 hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1591 if (k->wk_flags & IEEE80211_KEY_RECV)
1592 hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1593 return 1;
1594 } else
1595 return 0;
1596 }
1597
1598 /*
1599 * Set the key cache contents for the specified key. Key cache
1600 * slot(s) must already have been allocated by mwl_key_alloc.
1601 */
1602 static int
mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)1603 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1604 {
1605 return (_mwl_key_set(vap, k, k->wk_macaddr));
1606 }
1607
1608 static int
_mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1609 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1610 const uint8_t mac[IEEE80211_ADDR_LEN])
1611 {
1612 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1613 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1614 #define IEEE80211_IS_STATICKEY(k) \
1615 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1616 (GRPXMIT|IEEE80211_KEY_RECV))
1617 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1618 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1619 const struct ieee80211_cipher *cip = k->wk_cipher;
1620 const uint8_t *macaddr;
1621 MWL_HAL_KEYVAL hk;
1622
1623 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1624 ("s/w crypto set?"));
1625
1626 if (hvap == NULL) {
1627 if (vap->iv_opmode != IEEE80211_M_WDS) {
1628 /* XXX monitor mode? */
1629 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1630 "%s: no hvap for opmode %d\n", __func__,
1631 vap->iv_opmode);
1632 return 0;
1633 }
1634 hvap = MWL_VAP(vap)->mv_ap_hvap;
1635 }
1636 memset(&hk, 0, sizeof(hk));
1637 hk.keyIndex = k->wk_keyix;
1638 switch (cip->ic_cipher) {
1639 case IEEE80211_CIPHER_WEP:
1640 hk.keyTypeId = KEY_TYPE_ID_WEP;
1641 hk.keyLen = k->wk_keylen;
1642 if (k->wk_keyix == vap->iv_def_txkey)
1643 hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1644 if (!IEEE80211_IS_STATICKEY(k)) {
1645 /* NB: WEP is never used for the PTK */
1646 (void) addgroupflags(&hk, k);
1647 }
1648 break;
1649 case IEEE80211_CIPHER_TKIP:
1650 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1651 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1652 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1653 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1654 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1655 if (!addgroupflags(&hk, k))
1656 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1657 break;
1658 case IEEE80211_CIPHER_AES_CCM:
1659 hk.keyTypeId = KEY_TYPE_ID_AES;
1660 hk.keyLen = k->wk_keylen;
1661 if (!addgroupflags(&hk, k))
1662 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1663 break;
1664 default:
1665 /* XXX should not happen */
1666 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1667 __func__, k->wk_cipher->ic_cipher);
1668 return 0;
1669 }
1670 /*
1671 * NB: tkip mic keys get copied here too; the layout
1672 * just happens to match that in ieee80211_key.
1673 */
1674 memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1675
1676 /*
1677 * Locate address of sta db entry for writing key;
1678 * the convention unfortunately is somewhat different
1679 * than how net80211, hostapd, and wpa_supplicant think.
1680 */
1681 if (vap->iv_opmode == IEEE80211_M_STA) {
1682 /*
1683 * NB: keys plumbed before the sta reaches AUTH state
1684 * will be discarded or written to the wrong sta db
1685 * entry because iv_bss is meaningless. This is ok
1686 * (right now) because we handle deferred plumbing of
1687 * WEP keys when the sta reaches AUTH state.
1688 */
1689 macaddr = vap->iv_bss->ni_bssid;
1690 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1691 /* XXX plumb to local sta db too for static key wep */
1692 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1693 }
1694 } else if (vap->iv_opmode == IEEE80211_M_WDS &&
1695 vap->iv_state != IEEE80211_S_RUN) {
1696 /*
1697 * Prior to RUN state a WDS vap will not it's BSS node
1698 * setup so we will plumb the key to the wrong mac
1699 * address (it'll be our local address). Workaround
1700 * this for the moment by grabbing the correct address.
1701 */
1702 macaddr = vap->iv_des_bssid;
1703 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1704 macaddr = vap->iv_myaddr;
1705 else
1706 macaddr = mac;
1707 KEYPRINTF(sc, &hk, macaddr);
1708 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1709 #undef IEEE80211_IS_STATICKEY
1710 #undef GRPXMIT
1711 }
1712
1713 /*
1714 * Set the multicast filter contents into the hardware.
1715 * XXX f/w has no support; just defer to the os.
1716 */
1717 static void
mwl_setmcastfilter(struct mwl_softc * sc)1718 mwl_setmcastfilter(struct mwl_softc *sc)
1719 {
1720 #if 0
1721 struct ether_multi *enm;
1722 struct ether_multistep estep;
1723 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1724 uint8_t *mp;
1725 int nmc;
1726
1727 mp = macs;
1728 nmc = 0;
1729 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1730 while (enm != NULL) {
1731 /* XXX Punt on ranges. */
1732 if (nmc == MWL_HAL_MCAST_MAX ||
1733 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1734 if_setflagsbit(ifp, IFF_ALLMULTI, 0);
1735 return;
1736 }
1737 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1738 mp += IEEE80211_ADDR_LEN, nmc++;
1739 ETHER_NEXT_MULTI(estep, enm);
1740 }
1741 if_setflagsbit(ifp, 0, IFF_ALLMULTI);
1742 mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1743 #endif
1744 }
1745
1746 static int
mwl_mode_init(struct mwl_softc * sc)1747 mwl_mode_init(struct mwl_softc *sc)
1748 {
1749 struct ieee80211com *ic = &sc->sc_ic;
1750 struct mwl_hal *mh = sc->sc_mh;
1751
1752 mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1753 mwl_setmcastfilter(sc);
1754
1755 return 0;
1756 }
1757
1758 /*
1759 * Callback from the 802.11 layer after a multicast state change.
1760 */
1761 static void
mwl_update_mcast(struct ieee80211com * ic)1762 mwl_update_mcast(struct ieee80211com *ic)
1763 {
1764 struct mwl_softc *sc = ic->ic_softc;
1765
1766 mwl_setmcastfilter(sc);
1767 }
1768
1769 /*
1770 * Callback from the 802.11 layer after a promiscuous mode change.
1771 * Note this interface does not check the operating mode as this
1772 * is an internal callback and we are expected to honor the current
1773 * state (e.g. this is used for setting the interface in promiscuous
1774 * mode when operating in hostap mode to do ACS).
1775 */
1776 static void
mwl_update_promisc(struct ieee80211com * ic)1777 mwl_update_promisc(struct ieee80211com *ic)
1778 {
1779 struct mwl_softc *sc = ic->ic_softc;
1780
1781 mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1782 }
1783
1784 /*
1785 * Callback from the 802.11 layer to update the slot time
1786 * based on the current setting. We use it to notify the
1787 * firmware of ERP changes and the f/w takes care of things
1788 * like slot time and preamble.
1789 */
1790 static void
mwl_updateslot(struct ieee80211com * ic)1791 mwl_updateslot(struct ieee80211com *ic)
1792 {
1793 struct mwl_softc *sc = ic->ic_softc;
1794 struct mwl_hal *mh = sc->sc_mh;
1795 int prot;
1796
1797 /* NB: can be called early; suppress needless cmds */
1798 if (!sc->sc_running)
1799 return;
1800
1801 /*
1802 * Calculate the ERP flags. The firmware will use
1803 * this to carry out the appropriate measures.
1804 */
1805 prot = 0;
1806 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1807 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1808 prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1809 if (ic->ic_flags & IEEE80211_F_USEPROT)
1810 prot |= IEEE80211_ERP_USE_PROTECTION;
1811 if (ic->ic_flags & IEEE80211_F_USEBARKER)
1812 prot |= IEEE80211_ERP_LONG_PREAMBLE;
1813 }
1814
1815 DPRINTF(sc, MWL_DEBUG_RESET,
1816 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1817 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1818 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1819 ic->ic_flags);
1820
1821 mwl_hal_setgprot(mh, prot);
1822 }
1823
1824 /*
1825 * Setup the beacon frame.
1826 */
1827 static int
mwl_beacon_setup(struct ieee80211vap * vap)1828 mwl_beacon_setup(struct ieee80211vap *vap)
1829 {
1830 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1831 struct ieee80211_node *ni = vap->iv_bss;
1832 struct mbuf *m;
1833
1834 m = ieee80211_beacon_alloc(ni);
1835 if (m == NULL)
1836 return ENOBUFS;
1837 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1838 m_free(m);
1839
1840 return 0;
1841 }
1842
1843 /*
1844 * Update the beacon frame in response to a change.
1845 */
1846 static void
mwl_beacon_update(struct ieee80211vap * vap,int item)1847 mwl_beacon_update(struct ieee80211vap *vap, int item)
1848 {
1849 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1850 struct ieee80211com *ic = vap->iv_ic;
1851
1852 KASSERT(hvap != NULL, ("no beacon"));
1853 switch (item) {
1854 case IEEE80211_BEACON_ERP:
1855 mwl_updateslot(ic);
1856 break;
1857 case IEEE80211_BEACON_HTINFO:
1858 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1859 ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1860 break;
1861 case IEEE80211_BEACON_CAPS:
1862 case IEEE80211_BEACON_WME:
1863 case IEEE80211_BEACON_APPIE:
1864 case IEEE80211_BEACON_CSA:
1865 break;
1866 case IEEE80211_BEACON_TIM:
1867 /* NB: firmware always forms TIM */
1868 return;
1869 }
1870 /* XXX retain beacon frame and update */
1871 mwl_beacon_setup(vap);
1872 }
1873
1874 static void
mwl_load_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1875 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1876 {
1877 bus_addr_t *paddr = (bus_addr_t*) arg;
1878 KASSERT(error == 0, ("error %u on bus_dma callback", error));
1879 *paddr = segs->ds_addr;
1880 }
1881
1882 #ifdef MWL_HOST_PS_SUPPORT
1883 /*
1884 * Handle power save station occupancy changes.
1885 */
1886 static void
mwl_update_ps(struct ieee80211vap * vap,int nsta)1887 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1888 {
1889 struct mwl_vap *mvp = MWL_VAP(vap);
1890
1891 if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1892 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1893 mvp->mv_last_ps_sta = nsta;
1894 }
1895
1896 /*
1897 * Handle associated station power save state changes.
1898 */
1899 static int
mwl_set_tim(struct ieee80211_node * ni,int set)1900 mwl_set_tim(struct ieee80211_node *ni, int set)
1901 {
1902 struct ieee80211vap *vap = ni->ni_vap;
1903 struct mwl_vap *mvp = MWL_VAP(vap);
1904
1905 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
1906 mwl_hal_setpowersave_sta(mvp->mv_hvap,
1907 IEEE80211_AID(ni->ni_associd), set);
1908 return 1;
1909 } else
1910 return 0;
1911 }
1912 #endif /* MWL_HOST_PS_SUPPORT */
1913
1914 static int
mwl_desc_setup(struct mwl_softc * sc,const char * name,struct mwl_descdma * dd,int nbuf,size_t bufsize,int ndesc,size_t descsize)1915 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1916 struct mwl_descdma *dd,
1917 int nbuf, size_t bufsize, int ndesc, size_t descsize)
1918 {
1919 uint8_t *ds;
1920 int error;
1921
1922 DPRINTF(sc, MWL_DEBUG_RESET,
1923 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1924 __func__, name, nbuf, (uintmax_t) bufsize,
1925 ndesc, (uintmax_t) descsize);
1926
1927 dd->dd_name = name;
1928 dd->dd_desc_len = nbuf * ndesc * descsize;
1929
1930 /*
1931 * Setup DMA descriptor area.
1932 */
1933 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1934 PAGE_SIZE, 0, /* alignment, bounds */
1935 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1936 BUS_SPACE_MAXADDR, /* highaddr */
1937 NULL, NULL, /* filter, filterarg */
1938 dd->dd_desc_len, /* maxsize */
1939 1, /* nsegments */
1940 dd->dd_desc_len, /* maxsegsize */
1941 BUS_DMA_ALLOCNOW, /* flags */
1942 NULL, /* lockfunc */
1943 NULL, /* lockarg */
1944 &dd->dd_dmat);
1945 if (error != 0) {
1946 device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1947 return error;
1948 }
1949
1950 /* allocate descriptors */
1951 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1952 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1953 &dd->dd_dmamap);
1954 if (error != 0) {
1955 device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1956 "error %u\n", nbuf * ndesc, dd->dd_name, error);
1957 goto fail1;
1958 }
1959
1960 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1961 dd->dd_desc, dd->dd_desc_len,
1962 mwl_load_cb, &dd->dd_desc_paddr,
1963 BUS_DMA_NOWAIT);
1964 if (error != 0) {
1965 device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1966 dd->dd_name, error);
1967 goto fail2;
1968 }
1969
1970 ds = dd->dd_desc;
1971 memset(ds, 0, dd->dd_desc_len);
1972 DPRINTF(sc, MWL_DEBUG_RESET,
1973 "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1974 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1975 (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1976
1977 return 0;
1978 fail2:
1979 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1980 fail1:
1981 bus_dma_tag_destroy(dd->dd_dmat);
1982 memset(dd, 0, sizeof(*dd));
1983 return error;
1984 #undef DS2PHYS
1985 }
1986
1987 static void
mwl_desc_cleanup(struct mwl_softc * sc,struct mwl_descdma * dd)1988 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1989 {
1990 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1991 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1992 bus_dma_tag_destroy(dd->dd_dmat);
1993
1994 memset(dd, 0, sizeof(*dd));
1995 }
1996
1997 /*
1998 * Construct a tx q's free list. The order of entries on
1999 * the list must reflect the physical layout of tx descriptors
2000 * because the firmware pre-fetches descriptors.
2001 *
2002 * XXX might be better to use indices into the buffer array.
2003 */
2004 static void
mwl_txq_reset(struct mwl_softc * sc,struct mwl_txq * txq)2005 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2006 {
2007 struct mwl_txbuf *bf;
2008 int i;
2009
2010 bf = txq->dma.dd_bufptr;
2011 STAILQ_INIT(&txq->free);
2012 for (i = 0; i < mwl_txbuf; i++, bf++)
2013 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2014 txq->nfree = i;
2015 }
2016
2017 #define DS2PHYS(_dd, _ds) \
2018 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2019
2020 static int
mwl_txdma_setup(struct mwl_softc * sc,struct mwl_txq * txq)2021 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2022 {
2023 int error, bsize, i;
2024 struct mwl_txbuf *bf;
2025 struct mwl_txdesc *ds;
2026
2027 error = mwl_desc_setup(sc, "tx", &txq->dma,
2028 mwl_txbuf, sizeof(struct mwl_txbuf),
2029 MWL_TXDESC, sizeof(struct mwl_txdesc));
2030 if (error != 0)
2031 return error;
2032
2033 /* allocate and setup tx buffers */
2034 bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2035 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2036 if (bf == NULL) {
2037 device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2038 mwl_txbuf);
2039 return ENOMEM;
2040 }
2041 txq->dma.dd_bufptr = bf;
2042
2043 ds = txq->dma.dd_desc;
2044 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2045 bf->bf_desc = ds;
2046 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2047 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2048 &bf->bf_dmamap);
2049 if (error != 0) {
2050 device_printf(sc->sc_dev, "unable to create dmamap for tx "
2051 "buffer %u, error %u\n", i, error);
2052 return error;
2053 }
2054 }
2055 mwl_txq_reset(sc, txq);
2056 return 0;
2057 }
2058
2059 static void
mwl_txdma_cleanup(struct mwl_softc * sc,struct mwl_txq * txq)2060 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2061 {
2062 struct mwl_txbuf *bf;
2063 int i;
2064
2065 bf = txq->dma.dd_bufptr;
2066 for (i = 0; i < mwl_txbuf; i++, bf++) {
2067 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2068 KASSERT(bf->bf_node == NULL, ("node on free list"));
2069 if (bf->bf_dmamap != NULL)
2070 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2071 }
2072 STAILQ_INIT(&txq->free);
2073 txq->nfree = 0;
2074 if (txq->dma.dd_bufptr != NULL) {
2075 free(txq->dma.dd_bufptr, M_MWLDEV);
2076 txq->dma.dd_bufptr = NULL;
2077 }
2078 if (txq->dma.dd_desc_len != 0)
2079 mwl_desc_cleanup(sc, &txq->dma);
2080 }
2081
2082 static int
mwl_rxdma_setup(struct mwl_softc * sc)2083 mwl_rxdma_setup(struct mwl_softc *sc)
2084 {
2085 int error, jumbosize, bsize, i;
2086 struct mwl_rxbuf *bf;
2087 struct mwl_jumbo *rbuf;
2088 struct mwl_rxdesc *ds;
2089 caddr_t data;
2090
2091 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2092 mwl_rxdesc, sizeof(struct mwl_rxbuf),
2093 1, sizeof(struct mwl_rxdesc));
2094 if (error != 0)
2095 return error;
2096
2097 /*
2098 * Receive is done to a private pool of jumbo buffers.
2099 * This allows us to attach to mbuf's and avoid re-mapping
2100 * memory on each rx we post. We allocate a large chunk
2101 * of memory and manage it in the driver. The mbuf free
2102 * callback method is used to reclaim frames after sending
2103 * them up the stack. By default we allocate 2x the number of
2104 * rx descriptors configured so we have some slop to hold
2105 * us while frames are processed.
2106 */
2107 if (mwl_rxbuf < 2*mwl_rxdesc) {
2108 device_printf(sc->sc_dev,
2109 "too few rx dma buffers (%d); increasing to %d\n",
2110 mwl_rxbuf, 2*mwl_rxdesc);
2111 mwl_rxbuf = 2*mwl_rxdesc;
2112 }
2113 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2114 sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2115
2116 error = bus_dma_tag_create(sc->sc_dmat, /* parent */
2117 PAGE_SIZE, 0, /* alignment, bounds */
2118 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2119 BUS_SPACE_MAXADDR, /* highaddr */
2120 NULL, NULL, /* filter, filterarg */
2121 sc->sc_rxmemsize, /* maxsize */
2122 1, /* nsegments */
2123 sc->sc_rxmemsize, /* maxsegsize */
2124 BUS_DMA_ALLOCNOW, /* flags */
2125 NULL, /* lockfunc */
2126 NULL, /* lockarg */
2127 &sc->sc_rxdmat);
2128 if (error != 0) {
2129 device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2130 return error;
2131 }
2132
2133 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2134 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2135 &sc->sc_rxmap);
2136 if (error != 0) {
2137 device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2138 (uintmax_t) sc->sc_rxmemsize);
2139 return error;
2140 }
2141
2142 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2143 sc->sc_rxmem, sc->sc_rxmemsize,
2144 mwl_load_cb, &sc->sc_rxmem_paddr,
2145 BUS_DMA_NOWAIT);
2146 if (error != 0) {
2147 device_printf(sc->sc_dev, "could not load rx DMA map\n");
2148 return error;
2149 }
2150
2151 /*
2152 * Allocate rx buffers and set them up.
2153 */
2154 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2155 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2156 if (bf == NULL) {
2157 device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2158 return error;
2159 }
2160 sc->sc_rxdma.dd_bufptr = bf;
2161
2162 STAILQ_INIT(&sc->sc_rxbuf);
2163 ds = sc->sc_rxdma.dd_desc;
2164 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2165 bf->bf_desc = ds;
2166 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2167 /* pre-assign dma buffer */
2168 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2169 /* NB: tail is intentional to preserve descriptor order */
2170 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2171 }
2172
2173 /*
2174 * Place remainder of dma memory buffers on the free list.
2175 */
2176 SLIST_INIT(&sc->sc_rxfree);
2177 for (; i < mwl_rxbuf; i++) {
2178 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2179 rbuf = MWL_JUMBO_DATA2BUF(data);
2180 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2181 sc->sc_nrxfree++;
2182 }
2183 return 0;
2184 }
2185 #undef DS2PHYS
2186
2187 static void
mwl_rxdma_cleanup(struct mwl_softc * sc)2188 mwl_rxdma_cleanup(struct mwl_softc *sc)
2189 {
2190 if (sc->sc_rxmem_paddr != 0) {
2191 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2192 sc->sc_rxmem_paddr = 0;
2193 }
2194 if (sc->sc_rxmem != NULL) {
2195 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2196 sc->sc_rxmem = NULL;
2197 }
2198 if (sc->sc_rxdma.dd_bufptr != NULL) {
2199 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2200 sc->sc_rxdma.dd_bufptr = NULL;
2201 }
2202 if (sc->sc_rxdma.dd_desc_len != 0)
2203 mwl_desc_cleanup(sc, &sc->sc_rxdma);
2204 }
2205
2206 static int
mwl_dma_setup(struct mwl_softc * sc)2207 mwl_dma_setup(struct mwl_softc *sc)
2208 {
2209 int error, i;
2210
2211 error = mwl_rxdma_setup(sc);
2212 if (error != 0) {
2213 mwl_rxdma_cleanup(sc);
2214 return error;
2215 }
2216
2217 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2218 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2219 if (error != 0) {
2220 mwl_dma_cleanup(sc);
2221 return error;
2222 }
2223 }
2224 return 0;
2225 }
2226
2227 static void
mwl_dma_cleanup(struct mwl_softc * sc)2228 mwl_dma_cleanup(struct mwl_softc *sc)
2229 {
2230 int i;
2231
2232 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2233 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2234 mwl_rxdma_cleanup(sc);
2235 }
2236
2237 static struct ieee80211_node *
mwl_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])2238 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2239 {
2240 struct ieee80211com *ic = vap->iv_ic;
2241 struct mwl_softc *sc = ic->ic_softc;
2242 const size_t space = sizeof(struct mwl_node);
2243 struct mwl_node *mn;
2244
2245 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2246 if (mn == NULL) {
2247 /* XXX stat+msg */
2248 return NULL;
2249 }
2250 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2251 return &mn->mn_node;
2252 }
2253
2254 static void
mwl_node_cleanup(struct ieee80211_node * ni)2255 mwl_node_cleanup(struct ieee80211_node *ni)
2256 {
2257 struct ieee80211com *ic = ni->ni_ic;
2258 struct mwl_softc *sc = ic->ic_softc;
2259 struct mwl_node *mn = MWL_NODE(ni);
2260
2261 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2262 __func__, ni, ni->ni_ic, mn->mn_staid);
2263
2264 if (mn->mn_staid != 0) {
2265 struct ieee80211vap *vap = ni->ni_vap;
2266
2267 if (mn->mn_hvap != NULL) {
2268 if (vap->iv_opmode == IEEE80211_M_STA)
2269 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2270 else
2271 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2272 }
2273 /*
2274 * NB: legacy WDS peer sta db entry is installed using
2275 * the associate ap's hvap; use it again to delete it.
2276 * XXX can vap be NULL?
2277 */
2278 else if (vap->iv_opmode == IEEE80211_M_WDS &&
2279 MWL_VAP(vap)->mv_ap_hvap != NULL)
2280 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2281 ni->ni_macaddr);
2282 delstaid(sc, mn->mn_staid);
2283 mn->mn_staid = 0;
2284 }
2285 sc->sc_node_cleanup(ni);
2286 }
2287
2288 /*
2289 * Reclaim rx dma buffers from packets sitting on the ampdu
2290 * reorder queue for a station. We replace buffers with a
2291 * system cluster (if available).
2292 */
2293 static void
mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu * rap)2294 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2295 {
2296 #if 0
2297 int i, n, off;
2298 struct mbuf *m;
2299 void *cl;
2300
2301 n = rap->rxa_qframes;
2302 for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2303 m = rap->rxa_m[i];
2304 if (m == NULL)
2305 continue;
2306 n--;
2307 /* our dma buffers have a well-known free routine */
2308 if ((m->m_flags & M_EXT) == 0 ||
2309 m->m_ext.ext_free != mwl_ext_free)
2310 continue;
2311 /*
2312 * Try to allocate a cluster and move the data.
2313 */
2314 off = m->m_data - m->m_ext.ext_buf;
2315 if (off + m->m_pkthdr.len > MCLBYTES) {
2316 /* XXX no AMSDU for now */
2317 continue;
2318 }
2319 cl = pool_cache_get_paddr(&mclpool_cache, 0,
2320 &m->m_ext.ext_paddr);
2321 if (cl != NULL) {
2322 /*
2323 * Copy the existing data to the cluster, remove
2324 * the rx dma buffer, and attach the cluster in
2325 * its place. Note we preserve the offset to the
2326 * data so frames being bridged can still prepend
2327 * their headers without adding another mbuf.
2328 */
2329 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2330 MEXTREMOVE(m);
2331 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2332 /* setup mbuf like _MCLGET does */
2333 m->m_flags |= M_CLUSTER | M_EXT_RW;
2334 _MOWNERREF(m, M_EXT | M_CLUSTER);
2335 /* NB: m_data is clobbered by MEXTADDR, adjust */
2336 m->m_data += off;
2337 }
2338 }
2339 #endif
2340 }
2341
2342 /*
2343 * Callback to reclaim resources. We first let the
2344 * net80211 layer do it's thing, then if we are still
2345 * blocked by a lack of rx dma buffers we walk the ampdu
2346 * reorder q's to reclaim buffers by copying to a system
2347 * cluster.
2348 */
2349 static void
mwl_node_drain(struct ieee80211_node * ni)2350 mwl_node_drain(struct ieee80211_node *ni)
2351 {
2352 struct ieee80211com *ic = ni->ni_ic;
2353 struct mwl_softc *sc = ic->ic_softc;
2354 struct mwl_node *mn = MWL_NODE(ni);
2355
2356 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2357 __func__, ni, ni->ni_vap, mn->mn_staid);
2358
2359 /* NB: call up first to age out ampdu q's */
2360 sc->sc_node_drain(ni);
2361
2362 /* XXX better to not check low water mark? */
2363 if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2364 (ni->ni_flags & IEEE80211_NODE_HT)) {
2365 uint8_t tid;
2366 /*
2367 * Walk the reorder q and reclaim rx dma buffers by copying
2368 * the packet contents into clusters.
2369 */
2370 for (tid = 0; tid < WME_NUM_TID; tid++) {
2371 struct ieee80211_rx_ampdu *rap;
2372
2373 rap = &ni->ni_rx_ampdu[tid];
2374 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2375 continue;
2376 if (rap->rxa_qframes)
2377 mwl_ampdu_rxdma_reclaim(rap);
2378 }
2379 }
2380 }
2381
2382 static void
mwl_node_getsignal(const struct ieee80211_node * ni,int8_t * rssi,int8_t * noise)2383 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2384 {
2385 *rssi = ni->ni_ic->ic_node_getrssi(ni);
2386 #ifdef MWL_ANT_INFO_SUPPORT
2387 #if 0
2388 /* XXX need to smooth data */
2389 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2390 #else
2391 *noise = -95; /* XXX */
2392 #endif
2393 #else
2394 *noise = -95; /* XXX */
2395 #endif
2396 }
2397
2398 /*
2399 * Convert Hardware per-antenna rssi info to common format:
2400 * Let a1, a2, a3 represent the amplitudes per chain
2401 * Let amax represent max[a1, a2, a3]
2402 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2403 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2404 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2405 * maintain some extra precision.
2406 *
2407 * Values are stored in .5 db format capped at 127.
2408 */
2409 static void
mwl_node_getmimoinfo(const struct ieee80211_node * ni,struct ieee80211_mimo_info * mi)2410 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2411 struct ieee80211_mimo_info *mi)
2412 {
2413 #define CVT(_dst, _src) do { \
2414 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
2415 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
2416 } while (0)
2417 static const int8_t logdbtbl[32] = {
2418 0, 0, 24, 38, 48, 56, 62, 68,
2419 72, 76, 80, 83, 86, 89, 92, 94,
2420 96, 98, 100, 102, 104, 106, 107, 109,
2421 110, 112, 113, 115, 116, 117, 118, 119
2422 };
2423 const struct mwl_node *mn = MWL_NODE_CONST(ni);
2424 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
2425 uint32_t rssi_max;
2426
2427 rssi_max = mn->mn_ai.rssi_a;
2428 if (mn->mn_ai.rssi_b > rssi_max)
2429 rssi_max = mn->mn_ai.rssi_b;
2430 if (mn->mn_ai.rssi_c > rssi_max)
2431 rssi_max = mn->mn_ai.rssi_c;
2432
2433 CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2434 CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2435 CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2436
2437 mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2438 mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2439 mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2440 #undef CVT
2441 }
2442
2443 static __inline void *
mwl_getrxdma(struct mwl_softc * sc)2444 mwl_getrxdma(struct mwl_softc *sc)
2445 {
2446 struct mwl_jumbo *buf;
2447 void *data;
2448
2449 /*
2450 * Allocate from jumbo pool.
2451 */
2452 MWL_RXFREE_LOCK(sc);
2453 buf = SLIST_FIRST(&sc->sc_rxfree);
2454 if (buf == NULL) {
2455 DPRINTF(sc, MWL_DEBUG_ANY,
2456 "%s: out of rx dma buffers\n", __func__);
2457 sc->sc_stats.mst_rx_nodmabuf++;
2458 data = NULL;
2459 } else {
2460 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2461 sc->sc_nrxfree--;
2462 data = MWL_JUMBO_BUF2DATA(buf);
2463 }
2464 MWL_RXFREE_UNLOCK(sc);
2465 return data;
2466 }
2467
2468 static __inline void
mwl_putrxdma(struct mwl_softc * sc,void * data)2469 mwl_putrxdma(struct mwl_softc *sc, void *data)
2470 {
2471 struct mwl_jumbo *buf;
2472
2473 /* XXX bounds check data */
2474 MWL_RXFREE_LOCK(sc);
2475 buf = MWL_JUMBO_DATA2BUF(data);
2476 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2477 sc->sc_nrxfree++;
2478 MWL_RXFREE_UNLOCK(sc);
2479 }
2480
2481 static int
mwl_rxbuf_init(struct mwl_softc * sc,struct mwl_rxbuf * bf)2482 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2483 {
2484 struct mwl_rxdesc *ds;
2485
2486 ds = bf->bf_desc;
2487 if (bf->bf_data == NULL) {
2488 bf->bf_data = mwl_getrxdma(sc);
2489 if (bf->bf_data == NULL) {
2490 /* mark descriptor to be skipped */
2491 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2492 /* NB: don't need PREREAD */
2493 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2494 sc->sc_stats.mst_rxbuf_failed++;
2495 return ENOMEM;
2496 }
2497 }
2498 /*
2499 * NB: DMA buffer contents is known to be unmodified
2500 * so there's no need to flush the data cache.
2501 */
2502
2503 /*
2504 * Setup descriptor.
2505 */
2506 ds->QosCtrl = 0;
2507 ds->RSSI = 0;
2508 ds->Status = EAGLE_RXD_STATUS_IDLE;
2509 ds->Channel = 0;
2510 ds->PktLen = htole16(MWL_AGGR_SIZE);
2511 ds->SQ2 = 0;
2512 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2513 /* NB: don't touch pPhysNext, set once */
2514 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2515 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2516
2517 return 0;
2518 }
2519
2520 static void
mwl_ext_free(struct mbuf * m)2521 mwl_ext_free(struct mbuf *m)
2522 {
2523 struct mwl_softc *sc = m->m_ext.ext_arg1;
2524
2525 /* XXX bounds check data */
2526 mwl_putrxdma(sc, m->m_ext.ext_buf);
2527 /*
2528 * If we were previously blocked by a lack of rx dma buffers
2529 * check if we now have enough to restart rx interrupt handling.
2530 */
2531 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2532 sc->sc_rxblocked = 0;
2533 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2534 }
2535 }
2536
2537 struct mwl_frame_bar {
2538 u_int8_t i_fc[2];
2539 u_int8_t i_dur[2];
2540 u_int8_t i_ra[IEEE80211_ADDR_LEN];
2541 u_int8_t i_ta[IEEE80211_ADDR_LEN];
2542 /* ctl, seq, FCS */
2543 } __packed;
2544
2545 /*
2546 * Like ieee80211_anyhdrsize, but handles BAR frames
2547 * specially so the logic below to piece the 802.11
2548 * header together works.
2549 */
2550 static __inline int
mwl_anyhdrsize(const void * data)2551 mwl_anyhdrsize(const void *data)
2552 {
2553 const struct ieee80211_frame *wh = data;
2554
2555 if (IEEE80211_IS_CTL(wh)) {
2556 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2557 case IEEE80211_FC0_SUBTYPE_CTS:
2558 case IEEE80211_FC0_SUBTYPE_ACK:
2559 return sizeof(struct ieee80211_frame_ack);
2560 case IEEE80211_FC0_SUBTYPE_BAR:
2561 return sizeof(struct mwl_frame_bar);
2562 }
2563 return sizeof(struct ieee80211_frame_min);
2564 } else
2565 return ieee80211_hdrsize(data);
2566 }
2567
2568 static void
mwl_handlemicerror(struct ieee80211com * ic,const uint8_t * data)2569 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2570 {
2571 const struct ieee80211_frame *wh;
2572 struct ieee80211_node *ni;
2573
2574 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2575 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2576 if (ni != NULL) {
2577 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2578 ieee80211_free_node(ni);
2579 }
2580 }
2581
2582 /*
2583 * Convert hardware signal strength to rssi. The value
2584 * provided by the device has the noise floor added in;
2585 * we need to compensate for this but we don't have that
2586 * so we use a fixed value.
2587 *
2588 * The offset of 8 is good for both 2.4 and 5GHz. The LNA
2589 * offset is already set as part of the initial gain. This
2590 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2591 */
2592 static __inline int
cvtrssi(uint8_t ssi)2593 cvtrssi(uint8_t ssi)
2594 {
2595 int rssi = (int) ssi + 8;
2596 /* XXX hack guess until we have a real noise floor */
2597 rssi = 2*(87 - rssi); /* NB: .5 dBm units */
2598 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2599 }
2600
2601 static void
mwl_rx_proc(void * arg,int npending)2602 mwl_rx_proc(void *arg, int npending)
2603 {
2604 struct mwl_softc *sc = arg;
2605 struct ieee80211com *ic = &sc->sc_ic;
2606 struct mwl_rxbuf *bf;
2607 struct mwl_rxdesc *ds;
2608 struct mbuf *m;
2609 struct ieee80211_qosframe *wh;
2610 struct ieee80211_node *ni;
2611 struct mwl_node *mn;
2612 int off, len, hdrlen, pktlen, rssi, ntodo;
2613 uint8_t *data, status;
2614 void *newdata;
2615 int16_t nf;
2616
2617 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2618 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2619 RD4(sc, sc->sc_hwspecs.rxDescWrite));
2620 nf = -96; /* XXX */
2621 bf = sc->sc_rxnext;
2622 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2623 if (bf == NULL)
2624 bf = STAILQ_FIRST(&sc->sc_rxbuf);
2625 ds = bf->bf_desc;
2626 data = bf->bf_data;
2627 if (data == NULL) {
2628 /*
2629 * If data allocation failed previously there
2630 * will be no buffer; try again to re-populate it.
2631 * Note the firmware will not advance to the next
2632 * descriptor with a dma buffer so we must mimic
2633 * this or we'll get out of sync.
2634 */
2635 DPRINTF(sc, MWL_DEBUG_ANY,
2636 "%s: rx buf w/o dma memory\n", __func__);
2637 (void) mwl_rxbuf_init(sc, bf);
2638 sc->sc_stats.mst_rx_dmabufmissing++;
2639 break;
2640 }
2641 MWL_RXDESC_SYNC(sc, ds,
2642 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2643 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2644 break;
2645 #ifdef MWL_DEBUG
2646 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2647 mwl_printrxbuf(bf, 0);
2648 #endif
2649 status = ds->Status;
2650 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2651 counter_u64_add(ic->ic_ierrors, 1);
2652 sc->sc_stats.mst_rx_crypto++;
2653 /*
2654 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2655 * for backwards compatibility.
2656 */
2657 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2658 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2659 /*
2660 * MIC error, notify upper layers.
2661 */
2662 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2663 BUS_DMASYNC_POSTREAD);
2664 mwl_handlemicerror(ic, data);
2665 sc->sc_stats.mst_rx_tkipmic++;
2666 }
2667 /* XXX too painful to tap packets */
2668 goto rx_next;
2669 }
2670 /*
2671 * Sync the data buffer.
2672 */
2673 len = le16toh(ds->PktLen);
2674 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2675 /*
2676 * The 802.11 header is provided all or in part at the front;
2677 * use it to calculate the true size of the header that we'll
2678 * construct below. We use this to figure out where to copy
2679 * payload prior to constructing the header.
2680 */
2681 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2682 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2683
2684 /* calculate rssi early so we can re-use for each aggregate */
2685 rssi = cvtrssi(ds->RSSI);
2686
2687 pktlen = hdrlen + (len - off);
2688 /*
2689 * NB: we know our frame is at least as large as
2690 * IEEE80211_MIN_LEN because there is a 4-address
2691 * frame at the front. Hence there's no need to
2692 * vet the packet length. If the frame in fact
2693 * is too small it should be discarded at the
2694 * net80211 layer.
2695 */
2696
2697 /*
2698 * Attach dma buffer to an mbuf. We tried
2699 * doing this based on the packet size (i.e.
2700 * copying small packets) but it turns out to
2701 * be a net loss. The tradeoff might be system
2702 * dependent (cache architecture is important).
2703 */
2704 MGETHDR(m, M_NOWAIT, MT_DATA);
2705 if (m == NULL) {
2706 DPRINTF(sc, MWL_DEBUG_ANY,
2707 "%s: no rx mbuf\n", __func__);
2708 sc->sc_stats.mst_rx_nombuf++;
2709 goto rx_next;
2710 }
2711 /*
2712 * Acquire the replacement dma buffer before
2713 * processing the frame. If we're out of dma
2714 * buffers we disable rx interrupts and wait
2715 * for the free pool to reach mlw_rxdmalow buffers
2716 * before starting to do work again. If the firmware
2717 * runs out of descriptors then it will toss frames
2718 * which is better than our doing it as that can
2719 * starve our processing. It is also important that
2720 * we always process rx'd frames in case they are
2721 * A-MPDU as otherwise the host's view of the BA
2722 * window may get out of sync with the firmware.
2723 */
2724 newdata = mwl_getrxdma(sc);
2725 if (newdata == NULL) {
2726 /* NB: stat+msg in mwl_getrxdma */
2727 m_free(m);
2728 /* disable RX interrupt and mark state */
2729 mwl_hal_intrset(sc->sc_mh,
2730 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2731 sc->sc_rxblocked = 1;
2732 ieee80211_drain(ic);
2733 /* XXX check rxblocked and immediately start again? */
2734 goto rx_stop;
2735 }
2736 bf->bf_data = newdata;
2737 /*
2738 * Attach the dma buffer to the mbuf;
2739 * mwl_rxbuf_init will re-setup the rx
2740 * descriptor using the replacement dma
2741 * buffer we just installed above.
2742 */
2743 m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2744 EXT_NET_DRV);
2745 m->m_data += off - hdrlen;
2746 m->m_pkthdr.len = m->m_len = pktlen;
2747 /* NB: dma buffer assumed read-only */
2748
2749 /*
2750 * Piece 802.11 header together.
2751 */
2752 wh = mtod(m, struct ieee80211_qosframe *);
2753 /* NB: don't need to do this sometimes but ... */
2754 /* XXX special case so we can memcpy after m_devget? */
2755 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2756 if (IEEE80211_QOS_HAS_SEQ(wh))
2757 *(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2758 /*
2759 * The f/w strips WEP header but doesn't clear
2760 * the WEP bit; mark the packet with M_WEP so
2761 * net80211 will treat the data as decrypted.
2762 * While here also clear the PWR_MGT bit since
2763 * power save is handled by the firmware and
2764 * passing this up will potentially cause the
2765 * upper layer to put a station in power save
2766 * (except when configured with MWL_HOST_PS_SUPPORT).
2767 */
2768 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2769 m->m_flags |= M_WEP;
2770 #ifdef MWL_HOST_PS_SUPPORT
2771 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2772 #else
2773 wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2774 IEEE80211_FC1_PWR_MGT);
2775 #endif
2776
2777 if (ieee80211_radiotap_active(ic)) {
2778 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2779
2780 tap->wr_flags = 0;
2781 tap->wr_rate = ds->Rate;
2782 tap->wr_antsignal = rssi + nf;
2783 tap->wr_antnoise = nf;
2784 }
2785 if (IFF_DUMPPKTS_RECV(sc, wh)) {
2786 ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2787 len, ds->Rate, rssi);
2788 }
2789 /* dispatch */
2790 ni = ieee80211_find_rxnode(ic,
2791 (const struct ieee80211_frame_min *) wh);
2792 if (ni != NULL) {
2793 mn = MWL_NODE(ni);
2794 #ifdef MWL_ANT_INFO_SUPPORT
2795 mn->mn_ai.rssi_a = ds->ai.rssi_a;
2796 mn->mn_ai.rssi_b = ds->ai.rssi_b;
2797 mn->mn_ai.rssi_c = ds->ai.rssi_c;
2798 mn->mn_ai.rsvd1 = rssi;
2799 #endif
2800 /* tag AMPDU aggregates for reorder processing */
2801 if (ni->ni_flags & IEEE80211_NODE_HT)
2802 m->m_flags |= M_AMPDU;
2803 (void) ieee80211_input(ni, m, rssi, nf);
2804 ieee80211_free_node(ni);
2805 } else
2806 (void) ieee80211_input_all(ic, m, rssi, nf);
2807 rx_next:
2808 /* NB: ignore ENOMEM so we process more descriptors */
2809 (void) mwl_rxbuf_init(sc, bf);
2810 bf = STAILQ_NEXT(bf, bf_list);
2811 }
2812 rx_stop:
2813 sc->sc_rxnext = bf;
2814
2815 if (mbufq_first(&sc->sc_snd) != NULL) {
2816 /* NB: kick fw; the tx thread may have been preempted */
2817 mwl_hal_txstart(sc->sc_mh, 0);
2818 mwl_start(sc);
2819 }
2820 }
2821
2822 static void
mwl_txq_init(struct mwl_softc * sc,struct mwl_txq * txq,int qnum)2823 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2824 {
2825 struct mwl_txbuf *bf, *bn;
2826 struct mwl_txdesc *ds;
2827
2828 MWL_TXQ_LOCK_INIT(sc, txq);
2829 txq->qnum = qnum;
2830 txq->txpri = 0; /* XXX */
2831 #if 0
2832 /* NB: q setup by mwl_txdma_setup XXX */
2833 STAILQ_INIT(&txq->free);
2834 #endif
2835 STAILQ_FOREACH(bf, &txq->free, bf_list) {
2836 bf->bf_txq = txq;
2837
2838 ds = bf->bf_desc;
2839 bn = STAILQ_NEXT(bf, bf_list);
2840 if (bn == NULL)
2841 bn = STAILQ_FIRST(&txq->free);
2842 ds->pPhysNext = htole32(bn->bf_daddr);
2843 }
2844 STAILQ_INIT(&txq->active);
2845 }
2846
2847 /*
2848 * Setup a hardware data transmit queue for the specified
2849 * access control. We record the mapping from ac's
2850 * to h/w queues for use by mwl_tx_start.
2851 */
2852 static int
mwl_tx_setup(struct mwl_softc * sc,int ac,int mvtype)2853 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2854 {
2855 struct mwl_txq *txq;
2856
2857 if (ac >= nitems(sc->sc_ac2q)) {
2858 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2859 ac, nitems(sc->sc_ac2q));
2860 return 0;
2861 }
2862 if (mvtype >= MWL_NUM_TX_QUEUES) {
2863 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2864 mvtype, MWL_NUM_TX_QUEUES);
2865 return 0;
2866 }
2867 txq = &sc->sc_txq[mvtype];
2868 mwl_txq_init(sc, txq, mvtype);
2869 sc->sc_ac2q[ac] = txq;
2870 return 1;
2871 }
2872
2873 /*
2874 * Update WME parameters for a transmit queue.
2875 */
2876 static int
mwl_txq_update(struct mwl_softc * sc,int ac)2877 mwl_txq_update(struct mwl_softc *sc, int ac)
2878 {
2879 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2880 struct ieee80211com *ic = &sc->sc_ic;
2881 struct chanAccParams chp;
2882 struct mwl_txq *txq = sc->sc_ac2q[ac];
2883 struct wmeParams *wmep;
2884 struct mwl_hal *mh = sc->sc_mh;
2885 int aifs, cwmin, cwmax, txoplim;
2886
2887 ieee80211_wme_ic_getparams(ic, &chp);
2888 wmep = &chp.cap_wmeParams[ac];
2889
2890 aifs = wmep->wmep_aifsn;
2891 /* XXX in sta mode need to pass log values for cwmin/max */
2892 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2893 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2894 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
2895
2896 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2897 device_printf(sc->sc_dev, "unable to update hardware queue "
2898 "parameters for %s traffic!\n",
2899 ieee80211_wme_acnames[ac]);
2900 return 0;
2901 }
2902 return 1;
2903 #undef MWL_EXPONENT_TO_VALUE
2904 }
2905
2906 /*
2907 * Callback from the 802.11 layer to update WME parameters.
2908 */
2909 static int
mwl_wme_update(struct ieee80211com * ic)2910 mwl_wme_update(struct ieee80211com *ic)
2911 {
2912 struct mwl_softc *sc = ic->ic_softc;
2913
2914 return !mwl_txq_update(sc, WME_AC_BE) ||
2915 !mwl_txq_update(sc, WME_AC_BK) ||
2916 !mwl_txq_update(sc, WME_AC_VI) ||
2917 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2918 }
2919
2920 /*
2921 * Reclaim resources for a setup queue.
2922 */
2923 static void
mwl_tx_cleanupq(struct mwl_softc * sc,struct mwl_txq * txq)2924 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2925 {
2926 /* XXX hal work? */
2927 MWL_TXQ_LOCK_DESTROY(txq);
2928 }
2929
2930 /*
2931 * Reclaim all tx queue resources.
2932 */
2933 static void
mwl_tx_cleanup(struct mwl_softc * sc)2934 mwl_tx_cleanup(struct mwl_softc *sc)
2935 {
2936 int i;
2937
2938 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2939 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2940 }
2941
2942 static int
mwl_tx_dmasetup(struct mwl_softc * sc,struct mwl_txbuf * bf,struct mbuf * m0)2943 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2944 {
2945 struct mbuf *m;
2946 int error;
2947
2948 /*
2949 * Load the DMA map so any coalescing is done. This
2950 * also calculates the number of descriptors we need.
2951 */
2952 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2953 bf->bf_segs, &bf->bf_nseg,
2954 BUS_DMA_NOWAIT);
2955 if (error == EFBIG) {
2956 /* XXX packet requires too many descriptors */
2957 bf->bf_nseg = MWL_TXDESC+1;
2958 } else if (error != 0) {
2959 sc->sc_stats.mst_tx_busdma++;
2960 m_freem(m0);
2961 return error;
2962 }
2963 /*
2964 * Discard null packets and check for packets that
2965 * require too many TX descriptors. We try to convert
2966 * the latter to a cluster.
2967 */
2968 if (error == EFBIG) { /* too many desc's, linearize */
2969 sc->sc_stats.mst_tx_linear++;
2970 #if MWL_TXDESC > 1
2971 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2972 #else
2973 m = m_defrag(m0, M_NOWAIT);
2974 #endif
2975 if (m == NULL) {
2976 m_freem(m0);
2977 sc->sc_stats.mst_tx_nombuf++;
2978 return ENOMEM;
2979 }
2980 m0 = m;
2981 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2982 bf->bf_segs, &bf->bf_nseg,
2983 BUS_DMA_NOWAIT);
2984 if (error != 0) {
2985 sc->sc_stats.mst_tx_busdma++;
2986 m_freem(m0);
2987 return error;
2988 }
2989 KASSERT(bf->bf_nseg <= MWL_TXDESC,
2990 ("too many segments after defrag; nseg %u", bf->bf_nseg));
2991 } else if (bf->bf_nseg == 0) { /* null packet, discard */
2992 sc->sc_stats.mst_tx_nodata++;
2993 m_freem(m0);
2994 return EIO;
2995 }
2996 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
2997 __func__, m0, m0->m_pkthdr.len);
2998 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2999 bf->bf_m = m0;
3000
3001 return 0;
3002 }
3003
3004 static __inline int
mwl_cvtlegacyrate(int rate)3005 mwl_cvtlegacyrate(int rate)
3006 {
3007 switch (rate) {
3008 case 2: return 0;
3009 case 4: return 1;
3010 case 11: return 2;
3011 case 22: return 3;
3012 case 44: return 4;
3013 case 12: return 5;
3014 case 18: return 6;
3015 case 24: return 7;
3016 case 36: return 8;
3017 case 48: return 9;
3018 case 72: return 10;
3019 case 96: return 11;
3020 case 108:return 12;
3021 }
3022 return 0;
3023 }
3024
3025 /*
3026 * Calculate fixed tx rate information per client state;
3027 * this value is suitable for writing to the Format field
3028 * of a tx descriptor.
3029 */
3030 static uint16_t
mwl_calcformat(uint8_t rate,const struct ieee80211_node * ni)3031 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3032 {
3033 uint16_t fmt;
3034
3035 fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3036 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3037 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3038 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
3039 fmt |= EAGLE_TXD_FORMAT_HT
3040 /* NB: 0x80 implicitly stripped from ucastrate */
3041 | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3042 /* XXX short/long GI may be wrong; re-check */
3043 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3044 fmt |= EAGLE_TXD_CHW_40
3045 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3046 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3047 } else {
3048 fmt |= EAGLE_TXD_CHW_20
3049 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3050 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3051 }
3052 } else { /* legacy rate */
3053 fmt |= EAGLE_TXD_FORMAT_LEGACY
3054 | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3055 EAGLE_TXD_RATE)
3056 | EAGLE_TXD_CHW_20
3057 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3058 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3059 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3060 }
3061 return fmt;
3062 }
3063
3064 static int
mwl_tx_start(struct mwl_softc * sc,struct ieee80211_node * ni,struct mwl_txbuf * bf,struct mbuf * m0)3065 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3066 struct mbuf *m0)
3067 {
3068 struct ieee80211com *ic = &sc->sc_ic;
3069 struct ieee80211vap *vap = ni->ni_vap;
3070 int error, iswep, ismcast;
3071 int hdrlen, pktlen;
3072 struct mwl_txdesc *ds;
3073 struct mwl_txq *txq;
3074 struct ieee80211_frame *wh;
3075 struct mwltxrec *tr;
3076 struct mwl_node *mn;
3077 uint16_t qos;
3078 #if MWL_TXDESC > 1
3079 int i;
3080 #endif
3081
3082 wh = mtod(m0, struct ieee80211_frame *);
3083 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3084 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3085 hdrlen = ieee80211_anyhdrsize(wh);
3086 pktlen = m0->m_pkthdr.len;
3087 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3088 qos = *(uint16_t *)ieee80211_getqos(wh);
3089 } else
3090 qos = 0;
3091
3092 ieee80211_output_seqno_assign(ni, -1, m0);
3093
3094 if (iswep) {
3095 const struct ieee80211_cipher *cip;
3096 struct ieee80211_key *k;
3097
3098 /*
3099 * Construct the 802.11 header+trailer for an encrypted
3100 * frame. The only reason this can fail is because of an
3101 * unknown or unsupported cipher/key type.
3102 *
3103 * NB: we do this even though the firmware will ignore
3104 * what we've done for WEP and TKIP as we need the
3105 * ExtIV filled in for CCMP and this also adjusts
3106 * the headers which simplifies our work below.
3107 */
3108 k = ieee80211_crypto_encap(ni, m0);
3109 if (k == NULL) {
3110 /*
3111 * This can happen when the key is yanked after the
3112 * frame was queued. Just discard the frame; the
3113 * 802.11 layer counts failures and provides
3114 * debugging/diagnostics.
3115 */
3116 m_freem(m0);
3117 return EIO;
3118 }
3119 /*
3120 * Adjust the packet length for the crypto additions
3121 * done during encap and any other bits that the f/w
3122 * will add later on.
3123 */
3124 cip = k->wk_cipher;
3125 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3126
3127 /* packet header may have moved, reset our local pointer */
3128 wh = mtod(m0, struct ieee80211_frame *);
3129 }
3130
3131 if (ieee80211_radiotap_active_vap(vap)) {
3132 sc->sc_tx_th.wt_flags = 0; /* XXX */
3133 if (iswep)
3134 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3135 #if 0
3136 sc->sc_tx_th.wt_rate = ds->DataRate;
3137 #endif
3138 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3139 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3140
3141 ieee80211_radiotap_tx(vap, m0);
3142 }
3143 /*
3144 * Copy up/down the 802.11 header; the firmware requires
3145 * we present a 2-byte payload length followed by a
3146 * 4-address header (w/o QoS), followed (optionally) by
3147 * any WEP/ExtIV header (but only filled in for CCMP).
3148 * We are assured the mbuf has sufficient headroom to
3149 * prepend in-place by the setup of ic_headroom in
3150 * mwl_attach.
3151 */
3152 if (hdrlen < sizeof(struct mwltxrec)) {
3153 const int space = sizeof(struct mwltxrec) - hdrlen;
3154 if (M_LEADINGSPACE(m0) < space) {
3155 /* NB: should never happen */
3156 device_printf(sc->sc_dev,
3157 "not enough headroom, need %d found %zd, "
3158 "m_flags 0x%x m_len %d\n",
3159 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3160 ieee80211_dump_pkt(ic,
3161 mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3162 m_freem(m0);
3163 sc->sc_stats.mst_tx_noheadroom++;
3164 return EIO;
3165 }
3166 M_PREPEND(m0, space, M_NOWAIT);
3167 }
3168 tr = mtod(m0, struct mwltxrec *);
3169 if (wh != (struct ieee80211_frame *) &tr->wh)
3170 ovbcopy(wh, &tr->wh, hdrlen);
3171 /*
3172 * Note: the "firmware length" is actually the length
3173 * of the fully formed "802.11 payload". That is, it's
3174 * everything except for the 802.11 header. In particular
3175 * this includes all crypto material including the MIC!
3176 */
3177 tr->fwlen = htole16(pktlen - hdrlen);
3178
3179 /*
3180 * Load the DMA map so any coalescing is done. This
3181 * also calculates the number of descriptors we need.
3182 */
3183 error = mwl_tx_dmasetup(sc, bf, m0);
3184 if (error != 0) {
3185 /* NB: stat collected in mwl_tx_dmasetup */
3186 DPRINTF(sc, MWL_DEBUG_XMIT,
3187 "%s: unable to setup dma\n", __func__);
3188 return error;
3189 }
3190 bf->bf_node = ni; /* NB: held reference */
3191 m0 = bf->bf_m; /* NB: may have changed */
3192 tr = mtod(m0, struct mwltxrec *);
3193 wh = (struct ieee80211_frame *)&tr->wh;
3194
3195 /*
3196 * Formulate tx descriptor.
3197 */
3198 ds = bf->bf_desc;
3199 txq = bf->bf_txq;
3200
3201 ds->QosCtrl = qos; /* NB: already little-endian */
3202 #if MWL_TXDESC == 1
3203 /*
3204 * NB: multiframes should be zero because the descriptors
3205 * are initialized to zero. This should handle the case
3206 * where the driver is built with MWL_TXDESC=1 but we are
3207 * using firmware with multi-segment support.
3208 */
3209 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3210 ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3211 #else
3212 ds->multiframes = htole32(bf->bf_nseg);
3213 ds->PktLen = htole16(m0->m_pkthdr.len);
3214 for (i = 0; i < bf->bf_nseg; i++) {
3215 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3216 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3217 }
3218 #endif
3219 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3220 ds->Format = 0;
3221 ds->pad = 0;
3222 ds->ack_wcb_addr = 0;
3223
3224 mn = MWL_NODE(ni);
3225 /*
3226 * Select transmit rate.
3227 */
3228 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3229 case IEEE80211_FC0_TYPE_MGT:
3230 sc->sc_stats.mst_tx_mgmt++;
3231 /* fall thru... */
3232 case IEEE80211_FC0_TYPE_CTL:
3233 /* NB: assign to BE q to avoid bursting */
3234 ds->TxPriority = MWL_WME_AC_BE;
3235 break;
3236 case IEEE80211_FC0_TYPE_DATA:
3237 if (!ismcast) {
3238 const struct ieee80211_txparam *tp = ni->ni_txparms;
3239 /*
3240 * EAPOL frames get forced to a fixed rate and w/o
3241 * aggregation; otherwise check for any fixed rate
3242 * for the client (may depend on association state).
3243 */
3244 if (m0->m_flags & M_EAPOL) {
3245 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3246 ds->Format = mvp->mv_eapolformat;
3247 ds->pad = htole16(
3248 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3249 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3250 /* XXX pre-calculate per node */
3251 ds->Format = htole16(
3252 mwl_calcformat(tp->ucastrate, ni));
3253 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3254 }
3255 /* NB: EAPOL frames will never have qos set */
3256 if (qos == 0)
3257 ds->TxPriority = txq->qnum;
3258 #if MWL_MAXBA > 3
3259 else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3260 ds->TxPriority = mn->mn_ba[3].txq;
3261 #endif
3262 #if MWL_MAXBA > 2
3263 else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3264 ds->TxPriority = mn->mn_ba[2].txq;
3265 #endif
3266 #if MWL_MAXBA > 1
3267 else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3268 ds->TxPriority = mn->mn_ba[1].txq;
3269 #endif
3270 #if MWL_MAXBA > 0
3271 else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3272 ds->TxPriority = mn->mn_ba[0].txq;
3273 #endif
3274 else
3275 ds->TxPriority = txq->qnum;
3276 } else
3277 ds->TxPriority = txq->qnum;
3278 break;
3279 default:
3280 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3281 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3282 sc->sc_stats.mst_tx_badframetype++;
3283 m_freem(m0);
3284 return EIO;
3285 }
3286
3287 if (IFF_DUMPPKTS_XMIT(sc))
3288 ieee80211_dump_pkt(ic,
3289 mtod(m0, const uint8_t *)+sizeof(uint16_t),
3290 m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3291
3292 MWL_TXQ_LOCK(txq);
3293 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3294 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3295 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3296
3297 sc->sc_tx_timer = 5;
3298 MWL_TXQ_UNLOCK(txq);
3299
3300 return 0;
3301 }
3302
3303 static __inline int
mwl_cvtlegacyrix(int rix)3304 mwl_cvtlegacyrix(int rix)
3305 {
3306 static const int ieeerates[] =
3307 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3308 return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3309 }
3310
3311 /*
3312 * Process completed xmit descriptors from the specified queue.
3313 */
3314 static int
mwl_tx_processq(struct mwl_softc * sc,struct mwl_txq * txq)3315 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3316 {
3317 #define EAGLE_TXD_STATUS_MCAST \
3318 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3319 struct ieee80211com *ic = &sc->sc_ic;
3320 struct mwl_txbuf *bf;
3321 struct mwl_txdesc *ds;
3322 struct ieee80211_node *ni;
3323 int nreaped;
3324 uint32_t status;
3325
3326 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3327 for (nreaped = 0;; nreaped++) {
3328 MWL_TXQ_LOCK(txq);
3329 bf = STAILQ_FIRST(&txq->active);
3330 if (bf == NULL) {
3331 MWL_TXQ_UNLOCK(txq);
3332 break;
3333 }
3334 ds = bf->bf_desc;
3335 MWL_TXDESC_SYNC(txq, ds,
3336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3337 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3338 MWL_TXQ_UNLOCK(txq);
3339 break;
3340 }
3341 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3342 MWL_TXQ_UNLOCK(txq);
3343
3344 #ifdef MWL_DEBUG
3345 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3346 mwl_printtxbuf(bf, txq->qnum, nreaped);
3347 #endif
3348 ni = bf->bf_node;
3349 if (ni != NULL) {
3350 status = le32toh(ds->Status);
3351 int rate;
3352 if (status & EAGLE_TXD_STATUS_OK) {
3353 uint16_t Format = le16toh(ds->Format);
3354 uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3355 EAGLE_TXD_ANTENNA);
3356
3357 sc->sc_stats.mst_ant_tx[txant]++;
3358 if (status & EAGLE_TXD_STATUS_OK_RETRY)
3359 sc->sc_stats.mst_tx_retries++;
3360 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3361 sc->sc_stats.mst_tx_mretries++;
3362 if (txq->qnum >= MWL_WME_AC_VO)
3363 ic->ic_wme.wme_hipri_traffic++;
3364 rate = _IEEE80211_MASKSHIFT(Format,
3365 EAGLE_TXD_RATE);
3366 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3367 rate = mwl_cvtlegacyrix(rate);
3368 } else
3369 rate |= IEEE80211_RATE_MCS;
3370 sc->sc_stats.mst_tx_rate = rate;
3371 ieee80211_node_set_txrate_dot11rate(ni, rate);
3372 } else {
3373 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3374 sc->sc_stats.mst_tx_linkerror++;
3375 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3376 sc->sc_stats.mst_tx_xretries++;
3377 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3378 sc->sc_stats.mst_tx_aging++;
3379 if (bf->bf_m->m_flags & M_FF)
3380 sc->sc_stats.mst_ff_txerr++;
3381 }
3382 if (bf->bf_m->m_flags & M_TXCB)
3383 /* XXX strip fw len in case header inspected */
3384 m_adj(bf->bf_m, sizeof(uint16_t));
3385 ieee80211_tx_complete(ni, bf->bf_m,
3386 (status & EAGLE_TXD_STATUS_OK) == 0);
3387 } else
3388 m_freem(bf->bf_m);
3389 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3390
3391 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3392 BUS_DMASYNC_POSTWRITE);
3393 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3394
3395 mwl_puttxbuf_tail(txq, bf);
3396 }
3397 return nreaped;
3398 #undef EAGLE_TXD_STATUS_MCAST
3399 }
3400
3401 /*
3402 * Deferred processing of transmit interrupt; special-cased
3403 * for four hardware queues, 0-3.
3404 */
3405 static void
mwl_tx_proc(void * arg,int npending)3406 mwl_tx_proc(void *arg, int npending)
3407 {
3408 struct mwl_softc *sc = arg;
3409 int nreaped;
3410
3411 /*
3412 * Process each active queue.
3413 */
3414 nreaped = 0;
3415 if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3416 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3417 if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3418 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3419 if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3420 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3421 if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3422 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3423
3424 if (nreaped != 0) {
3425 sc->sc_tx_timer = 0;
3426 if (mbufq_first(&sc->sc_snd) != NULL) {
3427 /* NB: kick fw; the tx thread may have been preempted */
3428 mwl_hal_txstart(sc->sc_mh, 0);
3429 mwl_start(sc);
3430 }
3431 }
3432 }
3433
3434 static void
mwl_tx_draintxq(struct mwl_softc * sc,struct mwl_txq * txq)3435 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3436 {
3437 struct ieee80211_node *ni;
3438 struct mwl_txbuf *bf;
3439 u_int ix __unused;
3440
3441 /*
3442 * NB: this assumes output has been stopped and
3443 * we do not need to block mwl_tx_tasklet
3444 */
3445 for (ix = 0;; ix++) {
3446 MWL_TXQ_LOCK(txq);
3447 bf = STAILQ_FIRST(&txq->active);
3448 if (bf == NULL) {
3449 MWL_TXQ_UNLOCK(txq);
3450 break;
3451 }
3452 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3453 MWL_TXQ_UNLOCK(txq);
3454 #ifdef MWL_DEBUG
3455 if (sc->sc_debug & MWL_DEBUG_RESET) {
3456 struct ieee80211com *ic = &sc->sc_ic;
3457 const struct mwltxrec *tr =
3458 mtod(bf->bf_m, const struct mwltxrec *);
3459 mwl_printtxbuf(bf, txq->qnum, ix);
3460 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3461 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3462 }
3463 #endif /* MWL_DEBUG */
3464 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3465 ni = bf->bf_node;
3466 if (ni != NULL) {
3467 /*
3468 * Reclaim node reference.
3469 */
3470 ieee80211_free_node(ni);
3471 }
3472 m_freem(bf->bf_m);
3473
3474 mwl_puttxbuf_tail(txq, bf);
3475 }
3476 }
3477
3478 /*
3479 * Drain the transmit queues and reclaim resources.
3480 */
3481 static void
mwl_draintxq(struct mwl_softc * sc)3482 mwl_draintxq(struct mwl_softc *sc)
3483 {
3484 int i;
3485
3486 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3487 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3488 sc->sc_tx_timer = 0;
3489 }
3490
3491 #ifdef MWL_DIAGAPI
3492 /*
3493 * Reset the transmit queues to a pristine state after a fw download.
3494 */
3495 static void
mwl_resettxq(struct mwl_softc * sc)3496 mwl_resettxq(struct mwl_softc *sc)
3497 {
3498 int i;
3499
3500 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3501 mwl_txq_reset(sc, &sc->sc_txq[i]);
3502 }
3503 #endif /* MWL_DIAGAPI */
3504
3505 /*
3506 * Clear the transmit queues of any frames submitted for the
3507 * specified vap. This is done when the vap is deleted so we
3508 * don't potentially reference the vap after it is gone.
3509 * Note we cannot remove the frames; we only reclaim the node
3510 * reference.
3511 */
3512 static void
mwl_cleartxq(struct mwl_softc * sc,struct ieee80211vap * vap)3513 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3514 {
3515 struct mwl_txq *txq;
3516 struct mwl_txbuf *bf;
3517 int i;
3518
3519 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3520 txq = &sc->sc_txq[i];
3521 MWL_TXQ_LOCK(txq);
3522 STAILQ_FOREACH(bf, &txq->active, bf_list) {
3523 struct ieee80211_node *ni = bf->bf_node;
3524 if (ni != NULL && ni->ni_vap == vap) {
3525 bf->bf_node = NULL;
3526 ieee80211_free_node(ni);
3527 }
3528 }
3529 MWL_TXQ_UNLOCK(txq);
3530 }
3531 }
3532
3533 static int
mwl_recv_action(struct ieee80211_node * ni,const struct ieee80211_frame * wh,const uint8_t * frm,const uint8_t * efrm)3534 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3535 const uint8_t *frm, const uint8_t *efrm)
3536 {
3537 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3538 const struct ieee80211_action *ia;
3539
3540 ia = (const struct ieee80211_action *) frm;
3541 if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3542 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3543 const struct ieee80211_action_ht_mimopowersave *mps =
3544 (const struct ieee80211_action_ht_mimopowersave *) ia;
3545
3546 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3547 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3548 _IEEE80211_MASKSHIFT(mps->am_control,
3549 IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3550 return 0;
3551 } else
3552 return sc->sc_recv_action(ni, wh, frm, efrm);
3553 }
3554
3555 static int
mwl_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)3556 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3557 int dialogtoken, int baparamset, int batimeout)
3558 {
3559 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3560 struct ieee80211vap *vap = ni->ni_vap;
3561 struct mwl_node *mn = MWL_NODE(ni);
3562 struct mwl_bastate *bas;
3563
3564 bas = tap->txa_private;
3565 if (bas == NULL) {
3566 const MWL_HAL_BASTREAM *sp;
3567 /*
3568 * Check for a free BA stream slot.
3569 */
3570 #if MWL_MAXBA > 3
3571 if (mn->mn_ba[3].bastream == NULL)
3572 bas = &mn->mn_ba[3];
3573 else
3574 #endif
3575 #if MWL_MAXBA > 2
3576 if (mn->mn_ba[2].bastream == NULL)
3577 bas = &mn->mn_ba[2];
3578 else
3579 #endif
3580 #if MWL_MAXBA > 1
3581 if (mn->mn_ba[1].bastream == NULL)
3582 bas = &mn->mn_ba[1];
3583 else
3584 #endif
3585 #if MWL_MAXBA > 0
3586 if (mn->mn_ba[0].bastream == NULL)
3587 bas = &mn->mn_ba[0];
3588 else
3589 #endif
3590 {
3591 /* sta already has max BA streams */
3592 /* XXX assign BA stream to highest priority tid */
3593 DPRINTF(sc, MWL_DEBUG_AMPDU,
3594 "%s: already has max bastreams\n", __func__);
3595 sc->sc_stats.mst_ampdu_reject++;
3596 return 0;
3597 }
3598 /* NB: no held reference to ni */
3599 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3600 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3601 ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3602 ni, tap);
3603 if (sp == NULL) {
3604 /*
3605 * No available stream, return 0 so no
3606 * a-mpdu aggregation will be done.
3607 */
3608 DPRINTF(sc, MWL_DEBUG_AMPDU,
3609 "%s: no bastream available\n", __func__);
3610 sc->sc_stats.mst_ampdu_nostream++;
3611 return 0;
3612 }
3613 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3614 __func__, sp);
3615 /* NB: qos is left zero so we won't match in mwl_tx_start */
3616 bas->bastream = sp;
3617 tap->txa_private = bas;
3618 }
3619 /* fetch current seq# from the firmware; if available */
3620 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3621 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3622 &tap->txa_start) != 0)
3623 tap->txa_start = 0;
3624 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3625 }
3626
3627 static int
mwl_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)3628 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3629 int code, int baparamset, int batimeout)
3630 {
3631 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3632 struct mwl_bastate *bas;
3633
3634 bas = tap->txa_private;
3635 if (bas == NULL) {
3636 /* XXX should not happen */
3637 DPRINTF(sc, MWL_DEBUG_AMPDU,
3638 "%s: no BA stream allocated, TID %d\n",
3639 __func__, tap->txa_tid);
3640 sc->sc_stats.mst_addba_nostream++;
3641 return 0;
3642 }
3643 if (code == IEEE80211_STATUS_SUCCESS) {
3644 struct ieee80211vap *vap = ni->ni_vap;
3645 int bufsiz, error;
3646
3647 /*
3648 * Tell the firmware to setup the BA stream;
3649 * we know resources are available because we
3650 * pre-allocated one before forming the request.
3651 */
3652 bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3653 if (bufsiz == 0)
3654 bufsiz = IEEE80211_AGGR_BAWMAX;
3655 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3656 bas->bastream, bufsiz, bufsiz, tap->txa_start);
3657 if (error != 0) {
3658 /*
3659 * Setup failed, return immediately so no a-mpdu
3660 * aggregation will be done.
3661 */
3662 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3663 mwl_bastream_free(bas);
3664 tap->txa_private = NULL;
3665
3666 DPRINTF(sc, MWL_DEBUG_AMPDU,
3667 "%s: create failed, error %d, bufsiz %d TID %d "
3668 "htparam 0x%x\n", __func__, error, bufsiz,
3669 tap->txa_tid, ni->ni_htparam);
3670 sc->sc_stats.mst_bacreate_failed++;
3671 return 0;
3672 }
3673 /* NB: cache txq to avoid ptr indirect */
3674 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3675 DPRINTF(sc, MWL_DEBUG_AMPDU,
3676 "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3677 "htparam 0x%x\n", __func__, bas->bastream,
3678 bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3679 } else {
3680 /*
3681 * Other side NAK'd us; return the resources.
3682 */
3683 DPRINTF(sc, MWL_DEBUG_AMPDU,
3684 "%s: request failed with code %d, destroy bastream %p\n",
3685 __func__, code, bas->bastream);
3686 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3687 mwl_bastream_free(bas);
3688 tap->txa_private = NULL;
3689 }
3690 /* NB: firmware sends BAR so we don't need to */
3691 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3692 }
3693
3694 static void
mwl_addba_stop(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)3695 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3696 {
3697 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3698 struct mwl_bastate *bas;
3699
3700 bas = tap->txa_private;
3701 if (bas != NULL) {
3702 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3703 __func__, bas->bastream);
3704 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3705 mwl_bastream_free(bas);
3706 tap->txa_private = NULL;
3707 }
3708 sc->sc_addba_stop(ni, tap);
3709 }
3710
3711 /*
3712 * Setup the rx data structures. This should only be
3713 * done once or we may get out of sync with the firmware.
3714 */
3715 static int
mwl_startrecv(struct mwl_softc * sc)3716 mwl_startrecv(struct mwl_softc *sc)
3717 {
3718 if (!sc->sc_recvsetup) {
3719 struct mwl_rxbuf *bf, *prev;
3720 struct mwl_rxdesc *ds;
3721
3722 prev = NULL;
3723 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3724 int error = mwl_rxbuf_init(sc, bf);
3725 if (error != 0) {
3726 DPRINTF(sc, MWL_DEBUG_RECV,
3727 "%s: mwl_rxbuf_init failed %d\n",
3728 __func__, error);
3729 return error;
3730 }
3731 if (prev != NULL) {
3732 ds = prev->bf_desc;
3733 ds->pPhysNext = htole32(bf->bf_daddr);
3734 }
3735 prev = bf;
3736 }
3737 if (prev != NULL) {
3738 ds = prev->bf_desc;
3739 ds->pPhysNext =
3740 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3741 }
3742 sc->sc_recvsetup = 1;
3743 }
3744 mwl_mode_init(sc); /* set filters, etc. */
3745 return 0;
3746 }
3747
3748 static MWL_HAL_APMODE
mwl_getapmode(const struct ieee80211vap * vap,struct ieee80211_channel * chan)3749 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3750 {
3751 MWL_HAL_APMODE mode;
3752
3753 if (IEEE80211_IS_CHAN_HT(chan)) {
3754 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3755 mode = AP_MODE_N_ONLY;
3756 else if (IEEE80211_IS_CHAN_5GHZ(chan))
3757 mode = AP_MODE_AandN;
3758 else if (vap->iv_flags & IEEE80211_F_PUREG)
3759 mode = AP_MODE_GandN;
3760 else
3761 mode = AP_MODE_BandGandN;
3762 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3763 if (vap->iv_flags & IEEE80211_F_PUREG)
3764 mode = AP_MODE_G_ONLY;
3765 else
3766 mode = AP_MODE_MIXED;
3767 } else if (IEEE80211_IS_CHAN_B(chan))
3768 mode = AP_MODE_B_ONLY;
3769 else if (IEEE80211_IS_CHAN_A(chan))
3770 mode = AP_MODE_A_ONLY;
3771 else
3772 mode = AP_MODE_MIXED; /* XXX should not happen? */
3773 return mode;
3774 }
3775
3776 static int
mwl_setapmode(struct ieee80211vap * vap,struct ieee80211_channel * chan)3777 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3778 {
3779 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3780 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3781 }
3782
3783 /*
3784 * Set/change channels.
3785 */
3786 static int
mwl_chan_set(struct mwl_softc * sc,struct ieee80211_channel * chan)3787 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3788 {
3789 struct mwl_hal *mh = sc->sc_mh;
3790 struct ieee80211com *ic = &sc->sc_ic;
3791 MWL_HAL_CHANNEL hchan;
3792 int maxtxpow;
3793
3794 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3795 __func__, chan->ic_freq, chan->ic_flags);
3796
3797 /*
3798 * Convert to a HAL channel description with
3799 * the flags constrained to reflect the current
3800 * operating mode.
3801 */
3802 mwl_mapchan(&hchan, chan);
3803 mwl_hal_intrset(mh, 0); /* disable interrupts */
3804 #if 0
3805 mwl_draintxq(sc); /* clear pending tx frames */
3806 #endif
3807 mwl_hal_setchannel(mh, &hchan);
3808 /*
3809 * Tx power is cap'd by the regulatory setting and
3810 * possibly a user-set limit. We pass the min of
3811 * these to the hal to apply them to the cal data
3812 * for this channel.
3813 * XXX min bound?
3814 */
3815 maxtxpow = 2*chan->ic_maxregpower;
3816 if (maxtxpow > ic->ic_txpowlimit)
3817 maxtxpow = ic->ic_txpowlimit;
3818 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3819 /* NB: potentially change mcast/mgt rates */
3820 mwl_setcurchanrates(sc);
3821
3822 /*
3823 * Update internal state.
3824 */
3825 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3826 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3827 if (IEEE80211_IS_CHAN_A(chan)) {
3828 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3829 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3830 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3831 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3832 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3833 } else {
3834 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3835 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3836 }
3837 sc->sc_curchan = hchan;
3838 mwl_hal_intrset(mh, sc->sc_imask);
3839
3840 return 0;
3841 }
3842
3843 static void
mwl_scan_start(struct ieee80211com * ic)3844 mwl_scan_start(struct ieee80211com *ic)
3845 {
3846 struct mwl_softc *sc = ic->ic_softc;
3847
3848 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3849 }
3850
3851 static void
mwl_scan_end(struct ieee80211com * ic)3852 mwl_scan_end(struct ieee80211com *ic)
3853 {
3854 struct mwl_softc *sc = ic->ic_softc;
3855
3856 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3857 }
3858
3859 static void
mwl_set_channel(struct ieee80211com * ic)3860 mwl_set_channel(struct ieee80211com *ic)
3861 {
3862 struct mwl_softc *sc = ic->ic_softc;
3863
3864 (void) mwl_chan_set(sc, ic->ic_curchan);
3865 }
3866
3867 /*
3868 * Handle a channel switch request. We inform the firmware
3869 * and mark the global state to suppress various actions.
3870 * NB: we issue only one request to the fw; we may be called
3871 * multiple times if there are multiple vap's.
3872 */
3873 static void
mwl_startcsa(struct ieee80211vap * vap)3874 mwl_startcsa(struct ieee80211vap *vap)
3875 {
3876 struct ieee80211com *ic = vap->iv_ic;
3877 struct mwl_softc *sc = ic->ic_softc;
3878 MWL_HAL_CHANNEL hchan;
3879
3880 if (sc->sc_csapending)
3881 return;
3882
3883 mwl_mapchan(&hchan, ic->ic_csa_newchan);
3884 /* 1 =>'s quiet channel */
3885 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3886 sc->sc_csapending = 1;
3887 }
3888
3889 /*
3890 * Plumb any static WEP key for the station. This is
3891 * necessary as we must propagate the key from the
3892 * global key table of the vap to each sta db entry.
3893 */
3894 static void
mwl_setanywepkey(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])3895 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3896 {
3897 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3898 IEEE80211_F_PRIVACY &&
3899 vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3900 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3901 (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3902 mac);
3903 }
3904
3905 static int
mwl_peerstadb(struct ieee80211_node * ni,int aid,int staid,MWL_HAL_PEERINFO * pi)3906 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3907 {
3908 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
3909 struct ieee80211vap *vap = ni->ni_vap;
3910 struct mwl_hal_vap *hvap;
3911 int error;
3912
3913 if (vap->iv_opmode == IEEE80211_M_WDS) {
3914 /*
3915 * WDS vap's do not have a f/w vap; instead they piggyback
3916 * on an AP vap and we must install the sta db entry and
3917 * crypto state using that AP's handle (the WDS vap has none).
3918 */
3919 hvap = MWL_VAP(vap)->mv_ap_hvap;
3920 } else
3921 hvap = MWL_VAP(vap)->mv_hvap;
3922 error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3923 aid, staid, pi,
3924 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3925 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3926 if (error == 0) {
3927 /*
3928 * Setup security for this station. For sta mode this is
3929 * needed even though do the same thing on transition to
3930 * AUTH state because the call to mwl_hal_newstation
3931 * clobbers the crypto state we setup.
3932 */
3933 mwl_setanywepkey(vap, ni->ni_macaddr);
3934 }
3935 return error;
3936 #undef WME
3937 }
3938
3939 static void
mwl_setglobalkeys(struct ieee80211vap * vap)3940 mwl_setglobalkeys(struct ieee80211vap *vap)
3941 {
3942 struct ieee80211_key *wk;
3943
3944 wk = &vap->iv_nw_keys[0];
3945 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3946 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3947 (void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3948 }
3949
3950 /*
3951 * Convert a legacy rate set to a firmware bitmask.
3952 */
3953 static uint32_t
get_rate_bitmap(const struct ieee80211_rateset * rs)3954 get_rate_bitmap(const struct ieee80211_rateset *rs)
3955 {
3956 uint32_t rates;
3957 int i;
3958
3959 rates = 0;
3960 for (i = 0; i < rs->rs_nrates; i++)
3961 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3962 case 2: rates |= 0x001; break;
3963 case 4: rates |= 0x002; break;
3964 case 11: rates |= 0x004; break;
3965 case 22: rates |= 0x008; break;
3966 case 44: rates |= 0x010; break;
3967 case 12: rates |= 0x020; break;
3968 case 18: rates |= 0x040; break;
3969 case 24: rates |= 0x080; break;
3970 case 36: rates |= 0x100; break;
3971 case 48: rates |= 0x200; break;
3972 case 72: rates |= 0x400; break;
3973 case 96: rates |= 0x800; break;
3974 case 108: rates |= 0x1000; break;
3975 }
3976 return rates;
3977 }
3978
3979 /*
3980 * Construct an HT firmware bitmask from an HT rate set.
3981 */
3982 static uint32_t
get_htrate_bitmap(const struct ieee80211_htrateset * rs)3983 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3984 {
3985 uint32_t rates;
3986 int i;
3987
3988 rates = 0;
3989 for (i = 0; i < rs->rs_nrates; i++) {
3990 if (rs->rs_rates[i] < 16)
3991 rates |= 1<<rs->rs_rates[i];
3992 }
3993 return rates;
3994 }
3995
3996 /*
3997 * Craft station database entry for station.
3998 * NB: use host byte order here, the hal handles byte swapping.
3999 */
4000 static MWL_HAL_PEERINFO *
mkpeerinfo(MWL_HAL_PEERINFO * pi,const struct ieee80211_node * ni)4001 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4002 {
4003 const struct ieee80211vap *vap = ni->ni_vap;
4004
4005 memset(pi, 0, sizeof(*pi));
4006 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4007 pi->CapInfo = ni->ni_capinfo;
4008 if (ni->ni_flags & IEEE80211_NODE_HT) {
4009 /* HT capabilities, etc */
4010 pi->HTCapabilitiesInfo = ni->ni_htcap;
4011 /* XXX pi.HTCapabilitiesInfo */
4012 pi->MacHTParamInfo = ni->ni_htparam;
4013 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4014 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4015 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4016 pi->AddHtInfo.OpMode = ni->ni_htopmode;
4017 pi->AddHtInfo.stbc = ni->ni_htstbc;
4018
4019 /* constrain according to local configuration */
4020 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4021 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4022 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4023 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4024 if (ni->ni_chw != NET80211_STA_RX_BW_40)
4025 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4026 }
4027 return pi;
4028 }
4029
4030 /*
4031 * Re-create the local sta db entry for a vap to ensure
4032 * up to date WME state is pushed to the firmware. Because
4033 * this resets crypto state this must be followed by a
4034 * reload of any keys in the global key table.
4035 */
4036 static int
mwl_localstadb(struct ieee80211vap * vap)4037 mwl_localstadb(struct ieee80211vap *vap)
4038 {
4039 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4040 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4041 struct ieee80211_node *bss;
4042 MWL_HAL_PEERINFO pi;
4043 int error;
4044
4045 switch (vap->iv_opmode) {
4046 case IEEE80211_M_STA:
4047 bss = vap->iv_bss;
4048 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4049 vap->iv_state == IEEE80211_S_RUN ?
4050 mkpeerinfo(&pi, bss) : NULL,
4051 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4052 bss->ni_ies.wme_ie != NULL ?
4053 WME(bss->ni_ies.wme_ie)->wme_info : 0);
4054 if (error == 0)
4055 mwl_setglobalkeys(vap);
4056 break;
4057 case IEEE80211_M_HOSTAP:
4058 case IEEE80211_M_MBSS:
4059 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4060 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4061 if (error == 0)
4062 mwl_setglobalkeys(vap);
4063 break;
4064 default:
4065 error = 0;
4066 break;
4067 }
4068 return error;
4069 #undef WME
4070 }
4071
4072 static int
mwl_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4073 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4074 {
4075 struct mwl_vap *mvp = MWL_VAP(vap);
4076 struct mwl_hal_vap *hvap = mvp->mv_hvap;
4077 struct ieee80211com *ic = vap->iv_ic;
4078 struct ieee80211_node *ni = NULL;
4079 struct mwl_softc *sc = ic->ic_softc;
4080 struct mwl_hal *mh = sc->sc_mh;
4081 enum ieee80211_state ostate = vap->iv_state;
4082 int error;
4083
4084 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4085 if_name(vap->iv_ifp), __func__,
4086 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4087
4088 callout_stop(&sc->sc_timer);
4089 /*
4090 * Clear current radar detection state.
4091 */
4092 if (ostate == IEEE80211_S_CAC) {
4093 /* stop quiet mode radar detection */
4094 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4095 } else if (sc->sc_radarena) {
4096 /* stop in-service radar detection */
4097 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4098 sc->sc_radarena = 0;
4099 }
4100 /*
4101 * Carry out per-state actions before doing net80211 work.
4102 */
4103 if (nstate == IEEE80211_S_INIT) {
4104 /* NB: only ap+sta vap's have a fw entity */
4105 if (hvap != NULL)
4106 mwl_hal_stop(hvap);
4107 } else if (nstate == IEEE80211_S_SCAN) {
4108 mwl_hal_start(hvap);
4109 /* NB: this disables beacon frames */
4110 mwl_hal_setinframode(hvap);
4111 } else if (nstate == IEEE80211_S_AUTH) {
4112 /*
4113 * Must create a sta db entry in case a WEP key needs to
4114 * be plumbed. This entry will be overwritten if we
4115 * associate; otherwise it will be reclaimed on node free.
4116 */
4117 ni = vap->iv_bss;
4118 MWL_NODE(ni)->mn_hvap = hvap;
4119 (void) mwl_peerstadb(ni, 0, 0, NULL);
4120 } else if (nstate == IEEE80211_S_CSA) {
4121 /* XXX move to below? */
4122 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4123 vap->iv_opmode == IEEE80211_M_MBSS)
4124 mwl_startcsa(vap);
4125 } else if (nstate == IEEE80211_S_CAC) {
4126 /* XXX move to below? */
4127 /* stop ap xmit and enable quiet mode radar detection */
4128 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4129 }
4130
4131 /*
4132 * Invoke the parent method to do net80211 work.
4133 */
4134 error = mvp->mv_newstate(vap, nstate, arg);
4135
4136 /*
4137 * Carry out work that must be done after net80211 runs;
4138 * this work requires up to date state (e.g. iv_bss).
4139 */
4140 if (error == 0 && nstate == IEEE80211_S_RUN) {
4141 /* NB: collect bss node again, it may have changed */
4142 ni = vap->iv_bss;
4143
4144 DPRINTF(sc, MWL_DEBUG_STATE,
4145 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4146 "capinfo 0x%04x chan %d\n",
4147 if_name(vap->iv_ifp), __func__, vap->iv_flags,
4148 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4149 ieee80211_chan2ieee(ic, ic->ic_curchan));
4150
4151 /*
4152 * Recreate local sta db entry to update WME/HT state.
4153 */
4154 mwl_localstadb(vap);
4155 switch (vap->iv_opmode) {
4156 case IEEE80211_M_HOSTAP:
4157 case IEEE80211_M_MBSS:
4158 if (ostate == IEEE80211_S_CAC) {
4159 /* enable in-service radar detection */
4160 mwl_hal_setradardetection(mh,
4161 DR_IN_SERVICE_MONITOR_START);
4162 sc->sc_radarena = 1;
4163 }
4164 /*
4165 * Allocate and setup the beacon frame
4166 * (and related state).
4167 */
4168 error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4169 if (error != 0) {
4170 DPRINTF(sc, MWL_DEBUG_STATE,
4171 "%s: beacon setup failed, error %d\n",
4172 __func__, error);
4173 goto bad;
4174 }
4175 /* NB: must be after setting up beacon */
4176 mwl_hal_start(hvap);
4177 break;
4178 case IEEE80211_M_STA:
4179 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4180 if_name(vap->iv_ifp), __func__, ni->ni_associd);
4181 /*
4182 * Set state now that we're associated.
4183 */
4184 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4185 mwl_setrates(vap);
4186 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4187 if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4188 sc->sc_ndwdsvaps++ == 0)
4189 mwl_hal_setdwds(mh, 1);
4190 break;
4191 case IEEE80211_M_WDS:
4192 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4193 if_name(vap->iv_ifp), __func__,
4194 ether_sprintf(ni->ni_bssid));
4195 mwl_seteapolformat(vap);
4196 break;
4197 default:
4198 break;
4199 }
4200 /*
4201 * Set CS mode according to operating channel;
4202 * this mostly an optimization for 5GHz.
4203 *
4204 * NB: must follow mwl_hal_start which resets csmode
4205 */
4206 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4207 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4208 else
4209 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4210 /*
4211 * Start timer to prod firmware.
4212 */
4213 if (sc->sc_ageinterval != 0)
4214 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4215 mwl_agestations, sc);
4216 } else if (nstate == IEEE80211_S_SLEEP) {
4217 /* XXX set chip in power save */
4218 } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4219 --sc->sc_ndwdsvaps == 0)
4220 mwl_hal_setdwds(mh, 0);
4221 bad:
4222 return error;
4223 }
4224
4225 /*
4226 * Manage station id's; these are separate from AID's
4227 * as AID's may have values out of the range of possible
4228 * station id's acceptable to the firmware.
4229 */
4230 static int
allocstaid(struct mwl_softc * sc,int aid)4231 allocstaid(struct mwl_softc *sc, int aid)
4232 {
4233 int staid;
4234
4235 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4236 /* NB: don't use 0 */
4237 for (staid = 1; staid < MWL_MAXSTAID; staid++)
4238 if (isclr(sc->sc_staid, staid))
4239 break;
4240 } else
4241 staid = aid;
4242 setbit(sc->sc_staid, staid);
4243 return staid;
4244 }
4245
4246 static void
delstaid(struct mwl_softc * sc,int staid)4247 delstaid(struct mwl_softc *sc, int staid)
4248 {
4249 clrbit(sc->sc_staid, staid);
4250 }
4251
4252 /*
4253 * Setup driver-specific state for a newly associated node.
4254 * Note that we're called also on a re-associate, the isnew
4255 * param tells us if this is the first time or not.
4256 */
4257 static void
mwl_newassoc(struct ieee80211_node * ni,int isnew)4258 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4259 {
4260 struct ieee80211vap *vap = ni->ni_vap;
4261 struct mwl_softc *sc = vap->iv_ic->ic_softc;
4262 struct mwl_node *mn = MWL_NODE(ni);
4263 MWL_HAL_PEERINFO pi;
4264 uint16_t aid;
4265 int error;
4266
4267 aid = IEEE80211_AID(ni->ni_associd);
4268 if (isnew) {
4269 mn->mn_staid = allocstaid(sc, aid);
4270 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4271 } else {
4272 mn = MWL_NODE(ni);
4273 /* XXX reset BA stream? */
4274 }
4275 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4276 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4277 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4278 if (error != 0) {
4279 DPRINTF(sc, MWL_DEBUG_NODE,
4280 "%s: error %d creating sta db entry\n",
4281 __func__, error);
4282 /* XXX how to deal with error? */
4283 }
4284 }
4285
4286 /*
4287 * Periodically poke the firmware to age out station state
4288 * (power save queues, pending tx aggregates).
4289 */
4290 static void
mwl_agestations(void * arg)4291 mwl_agestations(void *arg)
4292 {
4293 struct mwl_softc *sc = arg;
4294
4295 mwl_hal_setkeepalive(sc->sc_mh);
4296 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
4297 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4298 }
4299
4300 static const struct mwl_hal_channel *
findhalchannel(const MWL_HAL_CHANNELINFO * ci,int ieee)4301 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4302 {
4303 int i;
4304
4305 for (i = 0; i < ci->nchannels; i++) {
4306 const struct mwl_hal_channel *hc = &ci->channels[i];
4307 if (hc->ieee == ieee)
4308 return hc;
4309 }
4310 return NULL;
4311 }
4312
4313 static int
mwl_setregdomain(struct ieee80211com * ic,struct ieee80211_regdomain * rd,int nchan,struct ieee80211_channel chans[])4314 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4315 int nchan, struct ieee80211_channel chans[])
4316 {
4317 struct mwl_softc *sc = ic->ic_softc;
4318 struct mwl_hal *mh = sc->sc_mh;
4319 const MWL_HAL_CHANNELINFO *ci;
4320 int i;
4321
4322 for (i = 0; i < nchan; i++) {
4323 struct ieee80211_channel *c = &chans[i];
4324 const struct mwl_hal_channel *hc;
4325
4326 if (IEEE80211_IS_CHAN_2GHZ(c)) {
4327 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4328 IEEE80211_IS_CHAN_HT40(c) ?
4329 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4330 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4331 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4332 IEEE80211_IS_CHAN_HT40(c) ?
4333 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4334 } else {
4335 device_printf(sc->sc_dev,
4336 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4337 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4338 return EINVAL;
4339 }
4340 /*
4341 * Verify channel has cal data and cap tx power.
4342 */
4343 hc = findhalchannel(ci, c->ic_ieee);
4344 if (hc != NULL) {
4345 if (c->ic_maxpower > 2*hc->maxTxPow)
4346 c->ic_maxpower = 2*hc->maxTxPow;
4347 goto next;
4348 }
4349 if (IEEE80211_IS_CHAN_HT40(c)) {
4350 /*
4351 * Look for the extension channel since the
4352 * hal table only has the primary channel.
4353 */
4354 hc = findhalchannel(ci, c->ic_extieee);
4355 if (hc != NULL) {
4356 if (c->ic_maxpower > 2*hc->maxTxPow)
4357 c->ic_maxpower = 2*hc->maxTxPow;
4358 goto next;
4359 }
4360 }
4361 device_printf(sc->sc_dev,
4362 "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4363 __func__, c->ic_ieee, c->ic_extieee,
4364 c->ic_freq, c->ic_flags);
4365 return EINVAL;
4366 next:
4367 ;
4368 }
4369 return 0;
4370 }
4371
4372 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4373 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4374
4375 static void
addht40channels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,int flags)4376 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4377 const MWL_HAL_CHANNELINFO *ci, int flags)
4378 {
4379 int i, error;
4380
4381 for (i = 0; i < ci->nchannels; i++) {
4382 const struct mwl_hal_channel *hc = &ci->channels[i];
4383
4384 error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4385 hc->ieee, hc->maxTxPow, flags);
4386 if (error != 0 && error != ENOENT)
4387 break;
4388 }
4389 }
4390
4391 static void
addchannels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,const uint8_t bands[])4392 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4393 const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4394 {
4395 int i, error;
4396
4397 error = 0;
4398 for (i = 0; i < ci->nchannels && error == 0; i++) {
4399 const struct mwl_hal_channel *hc = &ci->channels[i];
4400
4401 error = ieee80211_add_channel(chans, maxchans, nchans,
4402 hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4403 }
4404 }
4405
4406 static void
getchannels(struct mwl_softc * sc,int maxchans,int * nchans,struct ieee80211_channel chans[])4407 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4408 struct ieee80211_channel chans[])
4409 {
4410 const MWL_HAL_CHANNELINFO *ci;
4411 uint8_t bands[IEEE80211_MODE_BYTES];
4412
4413 /*
4414 * Use the channel info from the hal to craft the
4415 * channel list. Note that we pass back an unsorted
4416 * list; the caller is required to sort it for us
4417 * (if desired).
4418 */
4419 *nchans = 0;
4420 if (mwl_hal_getchannelinfo(sc->sc_mh,
4421 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4422 memset(bands, 0, sizeof(bands));
4423 setbit(bands, IEEE80211_MODE_11B);
4424 setbit(bands, IEEE80211_MODE_11G);
4425 setbit(bands, IEEE80211_MODE_11NG);
4426 addchannels(chans, maxchans, nchans, ci, bands);
4427 }
4428 if (mwl_hal_getchannelinfo(sc->sc_mh,
4429 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4430 memset(bands, 0, sizeof(bands));
4431 setbit(bands, IEEE80211_MODE_11A);
4432 setbit(bands, IEEE80211_MODE_11NA);
4433 addchannels(chans, maxchans, nchans, ci, bands);
4434 }
4435 if (mwl_hal_getchannelinfo(sc->sc_mh,
4436 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4437 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4438 if (mwl_hal_getchannelinfo(sc->sc_mh,
4439 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4440 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4441 }
4442
4443 static void
mwl_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])4444 mwl_getradiocaps(struct ieee80211com *ic,
4445 int maxchans, int *nchans, struct ieee80211_channel chans[])
4446 {
4447 struct mwl_softc *sc = ic->ic_softc;
4448
4449 getchannels(sc, maxchans, nchans, chans);
4450 }
4451
4452 static int
mwl_getchannels(struct mwl_softc * sc)4453 mwl_getchannels(struct mwl_softc *sc)
4454 {
4455 struct ieee80211com *ic = &sc->sc_ic;
4456
4457 /*
4458 * Use the channel info from the hal to craft the
4459 * channel list for net80211. Note that we pass up
4460 * an unsorted list; net80211 will sort it for us.
4461 */
4462 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4463 ic->ic_nchans = 0;
4464 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4465
4466 ic->ic_regdomain.regdomain = SKU_DEBUG;
4467 ic->ic_regdomain.country = CTRY_DEFAULT;
4468 ic->ic_regdomain.location = 'I';
4469 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
4470 ic->ic_regdomain.isocc[1] = ' ';
4471 return (ic->ic_nchans == 0 ? EIO : 0);
4472 }
4473 #undef IEEE80211_CHAN_HTA
4474 #undef IEEE80211_CHAN_HTG
4475
4476 #ifdef MWL_DEBUG
4477 static void
mwl_printrxbuf(const struct mwl_rxbuf * bf,u_int ix)4478 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4479 {
4480 const struct mwl_rxdesc *ds = bf->bf_desc;
4481 uint32_t status = le32toh(ds->Status);
4482
4483 printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4484 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4485 ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4486 le32toh(ds->pPhysBuffData), ds->RxControl,
4487 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4488 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4489 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4490 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4491 }
4492
4493 static void
mwl_printtxbuf(const struct mwl_txbuf * bf,u_int qnum,u_int ix)4494 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4495 {
4496 const struct mwl_txdesc *ds = bf->bf_desc;
4497 uint32_t status = le32toh(ds->Status);
4498
4499 printf("Q%u[%3u]", qnum, ix);
4500 printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4501 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4502 le32toh(ds->pPhysNext),
4503 le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4504 status & EAGLE_TXD_STATUS_USED ?
4505 "" : (status & 3) != 0 ? " *" : " !");
4506 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4507 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4508 le32toh(ds->SapPktInfo), le16toh(ds->Format));
4509 #if MWL_TXDESC > 1
4510 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4511 , le32toh(ds->multiframes)
4512 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4513 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4514 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4515 );
4516 printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
4517 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4518 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4519 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4520 );
4521 #endif
4522 #if 0
4523 { const uint8_t *cp = (const uint8_t *) ds;
4524 int i;
4525 for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4526 printf("%02x ", cp[i]);
4527 if (((i+1) % 16) == 0)
4528 printf("\n");
4529 }
4530 printf("\n");
4531 }
4532 #endif
4533 }
4534 #endif /* MWL_DEBUG */
4535
4536 #if 0
4537 static void
4538 mwl_txq_dump(struct mwl_txq *txq)
4539 {
4540 struct mwl_txbuf *bf;
4541 int i = 0;
4542
4543 MWL_TXQ_LOCK(txq);
4544 STAILQ_FOREACH(bf, &txq->active, bf_list) {
4545 struct mwl_txdesc *ds = bf->bf_desc;
4546 MWL_TXDESC_SYNC(txq, ds,
4547 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4548 #ifdef MWL_DEBUG
4549 mwl_printtxbuf(bf, txq->qnum, i);
4550 #endif
4551 i++;
4552 }
4553 MWL_TXQ_UNLOCK(txq);
4554 }
4555 #endif
4556
4557 static void
mwl_watchdog(void * arg)4558 mwl_watchdog(void *arg)
4559 {
4560 struct mwl_softc *sc = arg;
4561
4562 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4563 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4564 return;
4565
4566 if (sc->sc_running && !sc->sc_invalid) {
4567 if (mwl_hal_setkeepalive(sc->sc_mh))
4568 device_printf(sc->sc_dev,
4569 "transmit timeout (firmware hung?)\n");
4570 else
4571 device_printf(sc->sc_dev,
4572 "transmit timeout\n");
4573 #if 0
4574 mwl_reset(sc);
4575 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4576 #endif
4577 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4578 sc->sc_stats.mst_watchdog++;
4579 }
4580 }
4581
4582 #ifdef MWL_DIAGAPI
4583 /*
4584 * Diagnostic interface to the HAL. This is used by various
4585 * tools to do things like retrieve register contents for
4586 * debugging. The mechanism is intentionally opaque so that
4587 * it can change frequently w/o concern for compatibility.
4588 */
4589 static int
mwl_ioctl_diag(struct mwl_softc * sc,struct mwl_diag * md)4590 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4591 {
4592 struct mwl_hal *mh = sc->sc_mh;
4593 u_int id = md->md_id & MWL_DIAG_ID;
4594 void *indata = NULL;
4595 void *outdata = NULL;
4596 u_int32_t insize = md->md_in_size;
4597 u_int32_t outsize = md->md_out_size;
4598 int error = 0;
4599
4600 if (md->md_id & MWL_DIAG_IN) {
4601 /*
4602 * Copy in data.
4603 */
4604 indata = malloc(insize, M_TEMP, M_NOWAIT);
4605 if (indata == NULL) {
4606 error = ENOMEM;
4607 goto bad;
4608 }
4609 error = copyin(md->md_in_data, indata, insize);
4610 if (error)
4611 goto bad;
4612 }
4613 if (md->md_id & MWL_DIAG_DYN) {
4614 /*
4615 * Allocate a buffer for the results (otherwise the HAL
4616 * returns a pointer to a buffer where we can read the
4617 * results). Note that we depend on the HAL leaving this
4618 * pointer for us to use below in reclaiming the buffer;
4619 * may want to be more defensive.
4620 */
4621 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4622 if (outdata == NULL) {
4623 error = ENOMEM;
4624 goto bad;
4625 }
4626 }
4627 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4628 if (outsize < md->md_out_size)
4629 md->md_out_size = outsize;
4630 if (outdata != NULL)
4631 error = copyout(outdata, md->md_out_data,
4632 md->md_out_size);
4633 } else {
4634 error = EINVAL;
4635 }
4636 bad:
4637 if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4638 free(indata, M_TEMP);
4639 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4640 free(outdata, M_TEMP);
4641 return error;
4642 }
4643
4644 static int
mwl_ioctl_reset(struct mwl_softc * sc,struct mwl_diag * md)4645 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4646 {
4647 struct mwl_hal *mh = sc->sc_mh;
4648 int error;
4649
4650 MWL_LOCK_ASSERT(sc);
4651
4652 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4653 device_printf(sc->sc_dev, "unable to load firmware\n");
4654 return EIO;
4655 }
4656 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4657 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4658 return EIO;
4659 }
4660 error = mwl_setupdma(sc);
4661 if (error != 0) {
4662 /* NB: mwl_setupdma prints a msg */
4663 return error;
4664 }
4665 /*
4666 * Reset tx/rx data structures; after reload we must
4667 * re-start the driver's notion of the next xmit/recv.
4668 */
4669 mwl_draintxq(sc); /* clear pending frames */
4670 mwl_resettxq(sc); /* rebuild tx q lists */
4671 sc->sc_rxnext = NULL; /* force rx to start at the list head */
4672 return 0;
4673 }
4674 #endif /* MWL_DIAGAPI */
4675
4676 static void
mwl_parent(struct ieee80211com * ic)4677 mwl_parent(struct ieee80211com *ic)
4678 {
4679 struct mwl_softc *sc = ic->ic_softc;
4680 int startall = 0;
4681
4682 MWL_LOCK(sc);
4683 if (ic->ic_nrunning > 0) {
4684 if (sc->sc_running) {
4685 /*
4686 * To avoid rescanning another access point,
4687 * do not call mwl_init() here. Instead,
4688 * only reflect promisc mode settings.
4689 */
4690 mwl_mode_init(sc);
4691 } else {
4692 /*
4693 * Beware of being called during attach/detach
4694 * to reset promiscuous mode. In that case we
4695 * will still be marked UP but not RUNNING.
4696 * However trying to re-init the interface
4697 * is the wrong thing to do as we've already
4698 * torn down much of our state. There's
4699 * probably a better way to deal with this.
4700 */
4701 if (!sc->sc_invalid) {
4702 mwl_init(sc); /* XXX lose error */
4703 startall = 1;
4704 }
4705 }
4706 } else
4707 mwl_stop(sc);
4708 MWL_UNLOCK(sc);
4709 if (startall)
4710 ieee80211_start_all(ic);
4711 }
4712
4713 static int
mwl_ioctl(struct ieee80211com * ic,u_long cmd,void * data)4714 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4715 {
4716 struct mwl_softc *sc = ic->ic_softc;
4717 struct ifreq *ifr = data;
4718 int error = 0;
4719
4720 switch (cmd) {
4721 case SIOCGMVSTATS:
4722 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4723 #if 0
4724 /* NB: embed these numbers to get a consistent view */
4725 sc->sc_stats.mst_tx_packets =
4726 if_get_counter(ifp, IFCOUNTER_OPACKETS);
4727 sc->sc_stats.mst_rx_packets =
4728 if_get_counter(ifp, IFCOUNTER_IPACKETS);
4729 #endif
4730 /*
4731 * NB: Drop the softc lock in case of a page fault;
4732 * we'll accept any potential inconsisentcy in the
4733 * statistics. The alternative is to copy the data
4734 * to a local structure.
4735 */
4736 return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4737 sizeof (sc->sc_stats)));
4738 #ifdef MWL_DIAGAPI
4739 case SIOCGMVDIAG:
4740 /* XXX check privs */
4741 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4742 case SIOCGMVRESET:
4743 /* XXX check privs */
4744 MWL_LOCK(sc);
4745 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4746 MWL_UNLOCK(sc);
4747 break;
4748 #endif /* MWL_DIAGAPI */
4749 default:
4750 error = ENOTTY;
4751 break;
4752 }
4753 return (error);
4754 }
4755
4756 #ifdef MWL_DEBUG
4757 static int
mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)4758 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4759 {
4760 struct mwl_softc *sc = arg1;
4761 int debug, error;
4762
4763 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4764 error = sysctl_handle_int(oidp, &debug, 0, req);
4765 if (error || !req->newptr)
4766 return error;
4767 mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4768 sc->sc_debug = debug & 0x00ffffff;
4769 return 0;
4770 }
4771 #endif /* MWL_DEBUG */
4772
4773 static void
mwl_sysctlattach(struct mwl_softc * sc)4774 mwl_sysctlattach(struct mwl_softc *sc)
4775 {
4776 #ifdef MWL_DEBUG
4777 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4778 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4779
4780 sc->sc_debug = mwl_debug;
4781 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
4782 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
4783 mwl_sysctl_debug, "I", "control debugging printfs");
4784 #endif
4785 }
4786
4787 /*
4788 * Announce various information on device/driver attach.
4789 */
4790 static void
mwl_announce(struct mwl_softc * sc)4791 mwl_announce(struct mwl_softc *sc)
4792 {
4793
4794 device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4795 sc->sc_hwspecs.hwVersion,
4796 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4797 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4798 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4799 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4800 sc->sc_hwspecs.regionCode);
4801 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4802
4803 if (bootverbose) {
4804 int i;
4805 for (i = 0; i <= WME_AC_VO; i++) {
4806 struct mwl_txq *txq = sc->sc_ac2q[i];
4807 device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4808 txq->qnum, ieee80211_wme_acnames[i]);
4809 }
4810 }
4811 if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4812 device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4813 if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4814 device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4815 if (bootverbose || mwl_txbuf != MWL_TXBUF)
4816 device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4817 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4818 device_printf(sc->sc_dev, "multi-bss support\n");
4819 #ifdef MWL_TX_NODROP
4820 if (bootverbose)
4821 device_printf(sc->sc_dev, "no tx drop\n");
4822 #endif
4823 }
4824