1 /*-
2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2013-2014 Kevin Lo
4 * Copyright (c) 2021 James Hastings
5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * MediaTek MT7601U 802.11b/g/n WLAN.
22 */
23
24 #include "opt_wlan.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/bus.h>
29 #include <sys/endian.h>
30 #include <sys/eventhandler.h>
31 #include <sys/firmware.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_radiotap.h>
54 #include <net80211/ieee80211_ratectl.h>
55 #include <net80211/ieee80211_regdomain.h>
56 #ifdef IEEE80211_SUPPORT_SUPERG
57 #include <net80211/ieee80211_superg.h>
58 #endif
59 #include <netinet/if_ether.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67
68 #include "usbdevs.h"
69
70 #define USB_DEBUG_VAR mtw_debug
71 #include <dev/usb/usb_debug.h>
72 #include <dev/usb/usb_msctest.h>
73
74 #include "if_mtwreg.h"
75 #include "if_mtwvar.h"
76
77 #define MTW_DEBUG
78
79 #ifdef MTW_DEBUG
80 int mtw_debug;
81 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
82 "USB mtw");
83 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0,
84 "mtw debug level");
85
86 enum {
87 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
88 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
89 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */
90 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
91 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */
92 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */
93 MTW_DEBUG_USB = 0x00000040, /* usb requests */
94 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */
95 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */
96 MTW_DEBUG_INTR = 0x00000200, /* ISR */
97 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */
98 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */
99 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */
100 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */
101 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */
102 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */
103 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */
104 MTW_DEBUG_CMD = 0x00020000, /* command queue */
105 MTW_DEBUG_ANY = 0xffffffff
106 };
107
108 #define MTW_DPRINTF(_sc, _m, ...) \
109 do { \
110 if (mtw_debug & (_m)) \
111 device_printf((_sc)->sc_dev, __VA_ARGS__); \
112 } while (0)
113
114 #else
115 #define MTW_DPRINTF(_sc, _m, ...) \
116 do { \
117 (void)_sc; \
118 } while (0)
119 #endif
120
121 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
122
123 /* NB: "11" is the maximum number of padding bytes needed for Tx */
124 #define MTW_MAX_TXSZ \
125 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11)
126
127 /*
128 * Because of LOR in mtw_key_delete(), use atomic instead.
129 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
130 */
131 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ)
132
133 static const STRUCT_USB_HOST_ID mtw_devs[] = {
134 #define MTW_DEV(v, p) \
135 { \
136 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \
137 }
138 MTW_DEV(EDIMAX, MT7601U),
139 MTW_DEV(RALINK, MT7601U),
140 MTW_DEV(XIAOMI, MT7601U)
141 };
142 #undef MTW_DEV
143
144 static device_probe_t mtw_match;
145 static device_attach_t mtw_attach;
146 static device_detach_t mtw_detach;
147
148 static usb_callback_t mtw_bulk_rx_callback;
149 static usb_callback_t mtw_bulk_tx_callback0;
150 static usb_callback_t mtw_bulk_tx_callback1;
151 static usb_callback_t mtw_bulk_tx_callback2;
152 static usb_callback_t mtw_bulk_tx_callback3;
153 static usb_callback_t mtw_bulk_tx_callback4;
154 static usb_callback_t mtw_bulk_tx_callback5;
155 static usb_callback_t mtw_fw_callback;
156
157 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *);
158 static int mtw_driver_loaded(struct module *, int, void *);
159 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error,
160 u_int index);
161 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *,
162 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
163 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
164 static void mtw_vap_delete(struct ieee80211vap *);
165 static void mtw_cmdq_cb(void *, int);
166 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *);
167 static void mtw_unsetup_tx_list(struct mtw_softc *,
168 struct mtw_endpoint_queue *);
169 static void mtw_load_microcode(void *arg);
170
171 static usb_error_t mtw_do_request(struct mtw_softc *,
172 struct usb_device_request *, void *);
173 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
174 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
175 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
176 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
177 static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
178 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
179 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
180 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
181 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t);
182 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len);
183 static void mtw_get_txpower(struct mtw_softc *);
184 static int mtw_read_eeprom(struct mtw_softc *);
185 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *,
186 const uint8_t mac[IEEE80211_ADDR_LEN]);
187 static int mtw_media_change(if_t);
188 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
189 static int mtw_wme_update(struct ieee80211com *);
190 static void mtw_key_set_cb(void *);
191 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *);
192 static void mtw_key_delete_cb(void *);
193 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *);
194 static void mtw_ratectl_to(void *);
195 static void mtw_ratectl_cb(void *, int);
196 static void mtw_drain_fifo(void *);
197 static void mtw_iter_func(void *, struct ieee80211_node *);
198 static void mtw_newassoc_cb(void *);
199 static void mtw_newassoc(struct ieee80211_node *, int);
200 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val);
201 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
202 const struct ieee80211_rx_stats *, int, int);
203 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t);
204 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *,
205 int);
206 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *);
207 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *);
208 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *,
209 struct ieee80211_node *);
210 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *,
211 struct ieee80211_node *, int, int);
212 static int mtw_tx_param(struct mtw_softc *, struct mbuf *,
213 struct ieee80211_node *, const struct ieee80211_bpf_params *);
214 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 const struct ieee80211_bpf_params *);
216 static int mtw_transmit(struct ieee80211com *, struct mbuf *);
217 static void mtw_start(struct mtw_softc *);
218 static void mtw_parent(struct ieee80211com *);
219 static void mtw_select_chan_group(struct mtw_softc *, int);
220
221 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *);
222 static void mtw_set_channel(struct ieee80211com *);
223 static void mtw_getradiocaps(struct ieee80211com *, int, int *,
224 struct ieee80211_channel[]);
225 static void mtw_scan_start(struct ieee80211com *);
226 static void mtw_scan_end(struct ieee80211com *);
227 static void mtw_update_beacon(struct ieee80211vap *, int);
228 static void mtw_update_beacon_cb(void *);
229 static void mtw_updateprot(struct ieee80211com *);
230 static void mtw_updateprot_cb(void *);
231 static void mtw_usb_timeout_cb(void *);
232 static int mtw_reset(struct mtw_softc *sc);
233 static void mtw_enable_tsf_sync(struct mtw_softc *);
234
235
236 static void mtw_enable_mrr(struct mtw_softc *);
237 static void mtw_set_txpreamble(struct mtw_softc *);
238 static void mtw_set_basicrates(struct mtw_softc *);
239 static void mtw_set_leds(struct mtw_softc *, uint16_t);
240 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *);
241 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *);
242 static void mtw_updateslot(struct ieee80211com *);
243 static void mtw_updateslot_cb(void *);
244 static void mtw_update_mcast(struct ieee80211com *);
245 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t);
246 static void mtw_update_promisc_locked(struct mtw_softc *);
247 static void mtw_update_promisc(struct ieee80211com *);
248 static int mtw_txrx_enable(struct mtw_softc *);
249 static void mtw_init_locked(struct mtw_softc *);
250 static void mtw_stop(void *);
251 static void mtw_delay(struct mtw_softc *, u_int);
252 static void mtw_update_chw(struct ieee80211com *ic);
253 static int mtw_ampdu_enable(struct ieee80211_node *ni,
254 struct ieee80211_tx_ampdu *tap);
255
256 static eventhandler_tag mtw_etag;
257
258 static const struct {
259 uint8_t reg;
260 uint8_t val;
261 } mt7601_rf_bank0[] = { MT7601_BANK0_RF },
262 mt7601_rf_bank4[] = { MT7601_BANK4_RF },
263 mt7601_rf_bank5[] = { MT7601_BANK5_RF };
264 static const struct {
265 uint32_t reg;
266 uint32_t val;
267 } mt7601_def_mac[] = { MT7601_DEF_MAC };
268 static const struct {
269 uint8_t reg;
270 uint8_t val;
271 } mt7601_def_bbp[] = { MT7601_DEF_BBP };
272
273
274 static const struct {
275 u_int chan;
276 uint8_t r17, r18, r19, r20;
277 } mt7601_rf_chan[] = { MT7601_RF_CHAN };
278
279
280 static const struct usb_config mtw_config[MTW_N_XFER] = {
281 [MTW_BULK_RX] = {
282 .type = UE_BULK,
283 .endpoint = UE_ADDR_ANY,
284 .direction = UE_DIR_IN,
285 .bufsize = MTW_MAX_RXSZ,
286 .flags = {.pipe_bof = 1,
287 .short_xfer_ok = 1,},
288 .callback = mtw_bulk_rx_callback,
289 },
290 [MTW_BULK_TX_BE] = {
291 .type = UE_BULK,
292 .endpoint = UE_ADDR_ANY,
293 .direction = UE_DIR_OUT,
294 .bufsize = MTW_MAX_TXSZ,
295 .flags = {.pipe_bof = 1,
296 .force_short_xfer = 0,},
297 .callback = mtw_bulk_tx_callback0,
298 .timeout = 5000, /* ms */
299 },
300 [MTW_BULK_TX_BK] = {
301 .type = UE_BULK,
302 .endpoint = UE_ADDR_ANY,
303 .direction = UE_DIR_OUT,
304 .bufsize = MTW_MAX_TXSZ,
305 .flags = {.pipe_bof = 1,
306 .force_short_xfer = 1,},
307 .callback = mtw_bulk_tx_callback1,
308 .timeout = 5000, /* ms */
309 },
310 [MTW_BULK_TX_VI] = {
311 .type = UE_BULK,
312 .endpoint = UE_ADDR_ANY,
313 .direction = UE_DIR_OUT,
314 .bufsize = MTW_MAX_TXSZ,
315 .flags = {.pipe_bof = 1,
316 .force_short_xfer = 1,},
317 .callback = mtw_bulk_tx_callback2,
318 .timeout = 5000, /* ms */
319 },
320 [MTW_BULK_TX_VO] = {
321 .type = UE_BULK,
322 .endpoint = UE_ADDR_ANY,
323 .direction = UE_DIR_OUT,
324 .bufsize = MTW_MAX_TXSZ,
325 .flags = {.pipe_bof = 1,
326 .force_short_xfer = 1,},
327 .callback = mtw_bulk_tx_callback3,
328 .timeout = 5000, /* ms */
329 },
330 [MTW_BULK_TX_HCCA] = {
331 .type = UE_BULK,
332 .endpoint = UE_ADDR_ANY,
333 .direction = UE_DIR_OUT,
334 .bufsize = MTW_MAX_TXSZ,
335 .flags = {.pipe_bof = 1,
336 .force_short_xfer = 1, .no_pipe_ok = 1,},
337 .callback = mtw_bulk_tx_callback4,
338 .timeout = 5000, /* ms */
339 },
340 [MTW_BULK_TX_PRIO] = {
341 .type = UE_BULK,
342 .endpoint = UE_ADDR_ANY,
343 .direction = UE_DIR_OUT,
344 .bufsize = MTW_MAX_TXSZ,
345 .flags = {.pipe_bof = 1,
346 .force_short_xfer = 1, .no_pipe_ok = 1,},
347 .callback = mtw_bulk_tx_callback5,
348 .timeout = 5000, /* ms */
349 },
350
351 [MTW_BULK_FW_CMD] = {
352 .type = UE_BULK,
353 .endpoint = UE_ADDR_ANY,
354 .direction = UE_DIR_OUT,
355 .bufsize = 0x2c44,
356 .flags = {.pipe_bof = 1,
357 .force_short_xfer = 1, .no_pipe_ok = 1,},
358 .callback = mtw_fw_callback,
359
360 },
361
362 [MTW_BULK_RAW_TX] = {
363 .type = UE_BULK,
364 .ep_index = 0,
365 .endpoint = UE_ADDR_ANY,
366 .direction = UE_DIR_OUT,
367 .bufsize = MTW_MAX_TXSZ,
368 .flags = {.pipe_bof = 1,
369 .force_short_xfer = 1, .no_pipe_ok = 1,},
370 .callback = mtw_bulk_tx_callback0,
371 .timeout = 5000, /* ms */
372 },
373
374 };
375 static uint8_t mtw_wme_ac_xfer_map[4] = {
376 [WME_AC_BE] = MTW_BULK_TX_BE,
377 [WME_AC_BK] = MTW_BULK_TX_BK,
378 [WME_AC_VI] = MTW_BULK_TX_VI,
379 [WME_AC_VO] = MTW_BULK_TX_VO,
380 };
381 static void
mtw_autoinst(void * arg,struct usb_device * udev,struct usb_attach_arg * uaa)382 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa)
383 {
384 struct usb_interface *iface;
385 struct usb_interface_descriptor *id;
386
387 if (uaa->dev_state != UAA_DEV_READY)
388 return;
389
390 iface = usbd_get_iface(udev, 0);
391 if (iface == NULL)
392 return;
393 id = iface->idesc;
394 if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
395 return;
396 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa))
397 return;
398
399 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
400 uaa->dev_state = UAA_DEV_EJECTING;
401 }
402
403 static int
mtw_driver_loaded(struct module * mod,int what,void * arg)404 mtw_driver_loaded(struct module *mod, int what, void *arg)
405 {
406 switch (what) {
407 case MOD_LOAD:
408 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
409 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY);
410 break;
411 case MOD_UNLOAD:
412 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag);
413 break;
414 default:
415 return (EOPNOTSUPP);
416 }
417 return (0);
418 }
419
420 static const char *
mtw_get_rf(int rev)421 mtw_get_rf(int rev)
422 {
423 switch (rev) {
424 case MT7601_RF_7601:
425 return ("MT7601");
426 case MT7610_RF_7610:
427 return ("MT7610");
428 case MT7612_RF_7612:
429 return ("MT7612");
430 }
431 return ("unknown");
432 }
433 static int
mtw_wlan_enable(struct mtw_softc * sc,int enable)434 mtw_wlan_enable(struct mtw_softc *sc, int enable)
435 {
436 uint32_t tmp;
437 int error = 0;
438
439 if (enable) {
440 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
441 if (sc->asic_ver == 0x7612)
442 tmp &= ~0xfffff000;
443
444 tmp &= ~MTW_WLAN_CLK_EN;
445 tmp |= MTW_WLAN_EN;
446 mtw_write(sc, MTW_WLAN_CTRL, tmp);
447 mtw_delay(sc, 2);
448
449 tmp |= MTW_WLAN_CLK_EN;
450 if (sc->asic_ver == 0x7612) {
451 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF);
452 }
453 mtw_write(sc, MTW_WLAN_CTRL, tmp);
454 mtw_delay(sc, 2);
455
456 mtw_read(sc, MTW_OSC_CTRL, &tmp);
457 tmp |= MTW_OSC_EN;
458 mtw_write(sc, MTW_OSC_CTRL, tmp);
459 tmp |= MTW_OSC_CAL_REQ;
460 mtw_write(sc, MTW_OSC_CTRL, tmp);
461 } else {
462 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
463 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN);
464 mtw_write(sc, MTW_WLAN_CTRL, tmp);
465
466 mtw_read(sc, MTW_OSC_CTRL, &tmp);
467 tmp &= ~MTW_OSC_EN;
468 mtw_write(sc, MTW_OSC_CTRL, tmp);
469 }
470 return (error);
471 }
472
473 static int
mtw_read_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t * val)474 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
475 {
476 usb_device_request_t req;
477 uint32_t tmp;
478 uint16_t actlen;
479 int error;
480
481 req.bmRequestType = UT_READ_VENDOR_DEVICE;
482 req.bRequest = MTW_READ_CFG;
483 USETW(req.wValue, 0);
484 USETW(req.wIndex, reg);
485 USETW(req.wLength, 4);
486 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0,
487 &actlen, 1000);
488
489 if (error == 0)
490 *val = le32toh(tmp);
491 else
492 *val = 0xffffffff;
493 return (error);
494 }
495
496 static int
mtw_match(device_t self)497 mtw_match(device_t self)
498 {
499 struct usb_attach_arg *uaa = device_get_ivars(self);
500
501 if (uaa->usb_mode != USB_MODE_HOST)
502 return (ENXIO);
503 if (uaa->info.bConfigIndex != 0)
504 return (ENXIO);
505 if (uaa->info.bIfaceIndex != 0)
506 return (ENXIO);
507
508 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa));
509 }
510
511 static int
mtw_attach(device_t self)512 mtw_attach(device_t self)
513 {
514 struct mtw_softc *sc = device_get_softc(self);
515 struct usb_attach_arg *uaa = device_get_ivars(self);
516 struct ieee80211com *ic = &sc->sc_ic;
517 uint32_t ver;
518 int i, ret;
519 uint32_t tmp;
520 uint8_t iface_index;
521 int ntries, error;
522
523 device_set_usb_desc(self);
524 sc->sc_udev = uaa->device;
525 sc->sc_dev = self;
526 sc->sc_sent = 0;
527
528 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
529 MTX_NETWORK_LOCK, MTX_DEF);
530
531 iface_index = 0;
532
533 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
534 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx);
535 if (error) {
536 device_printf(sc->sc_dev,
537 "could not allocate USB transfers, "
538 "err=%s\n",
539 usbd_errstr(error));
540 goto detach;
541 }
542 for (i = 0; i < 4; i++) {
543 sc->txd_fw[i] = (struct mtw_txd_fw *)
544 malloc(sizeof(struct mtw_txd_fw),
545 M_USBDEV, M_NOWAIT | M_ZERO);
546 }
547 MTW_LOCK(sc);
548 sc->sc_idx = 0;
549 mbufq_init(&sc->sc_snd, ifqmaxlen);
550
551 /*enable WLAN core */
552 if ((error = mtw_wlan_enable(sc, 1)) != 0) {
553 device_printf(sc->sc_dev, "could not enable WLAN core\n");
554 return (ENXIO);
555 }
556
557 /* wait for the chip to settle */
558 DELAY(100);
559 for (ntries = 0; ntries < 100; ntries++) {
560 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) {
561 goto detach;
562 }
563 if (ver != 0 && ver != 0xffffffff)
564 break;
565 DELAY(10);
566 }
567 if (ntries == 100) {
568 device_printf(sc->sc_dev,
569 "timeout waiting for NIC to initialize\n");
570 goto detach;
571 }
572 sc->asic_ver = ver >> 16;
573 sc->asic_rev = ver & 0xffff;
574 DELAY(100);
575 if (sc->asic_ver != 0x7601) {
576 device_printf(sc->sc_dev,
577 "Your revision 0x04%x is not supported yet\n",
578 sc->asic_rev);
579 goto detach;
580 }
581
582
583 if (mtw_read(sc, MTW_MAC_VER_ID, &tmp) != 0)
584 goto detach;
585 sc->mac_rev = tmp & 0xffff;
586
587 mtw_load_microcode(sc);
588 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz);
589 if (ret == EWOULDBLOCK || sc->fwloading != 1) {
590 device_printf(sc->sc_dev,
591 "timeout waiting for MCU to initialize\n");
592 goto detach;
593 }
594
595 sc->sc_srom_read = mtw_efuse_read_2;
596 /* retrieve RF rev. no and various other things from EEPROM */
597 mtw_read_eeprom(sc);
598
599 device_printf(sc->sc_dev,
600 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
601 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains,
602 sc->nrxchains, ether_sprintf(ic->ic_macaddr));
603 DELAY(100);
604
605 //mtw_set_leds(sc,5);
606 // mtw_mcu_radio(sc,0x31,0);
607 MTW_UNLOCK(sc);
608
609
610 ic->ic_softc = sc;
611 ic->ic_name = device_get_nameunit(self);
612 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
613 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
614
615 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */
616 IEEE80211_C_MONITOR | /* monitor mode supported */
617 IEEE80211_C_IBSS |
618 IEEE80211_C_HOSTAP |
619 IEEE80211_C_WDS | /* 4-address traffic works */
620 IEEE80211_C_MBSS |
621 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
622 IEEE80211_C_SHSLOT | /* short slot time supported */
623 IEEE80211_C_WME | /* WME */
624 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
625 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n");
626 ic->ic_htcaps = IEEE80211_HTC_HT
627 | IEEE80211_HTC_AMPDU
628 | IEEE80211_HTC_AMSDU
629 | IEEE80211_HTCAP_MAXAMSDU_3839
630 | IEEE80211_HTCAP_SMPS_OFF;
631
632 ic->ic_rxstream = sc->nrxchains;
633 ic->ic_txstream = sc->ntxchains;
634
635 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM |
636 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP |
637 IEEE80211_CRYPTO_TKIPMIC;
638
639 ic->ic_flags |= IEEE80211_F_DATAPAD;
640 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
641
642 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
643 ic->ic_channels);
644
645 ieee80211_ifattach(ic);
646
647 ic->ic_scan_start = mtw_scan_start;
648 ic->ic_scan_end = mtw_scan_end;
649 ic->ic_set_channel = mtw_set_channel;
650 ic->ic_getradiocaps = mtw_getradiocaps;
651 ic->ic_node_alloc = mtw_node_alloc;
652 ic->ic_newassoc = mtw_newassoc;
653 ic->ic_update_mcast = mtw_update_mcast;
654 ic->ic_updateslot = mtw_updateslot;
655 ic->ic_wme.wme_update = mtw_wme_update;
656 ic->ic_raw_xmit = mtw_raw_xmit;
657 ic->ic_update_promisc = mtw_update_promisc;
658 ic->ic_vap_create = mtw_vap_create;
659 ic->ic_vap_delete = mtw_vap_delete;
660 ic->ic_transmit = mtw_transmit;
661 ic->ic_parent = mtw_parent;
662
663 ic->ic_update_chw = mtw_update_chw;
664 ic->ic_ampdu_enable = mtw_ampdu_enable;
665
666 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
667 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT,
668 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
669 MTW_RX_RADIOTAP_PRESENT);
670 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc);
671 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc);
672 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
673
674 if (bootverbose)
675 ieee80211_announce(ic);
676
677 return (0);
678
679 detach:
680 MTW_UNLOCK(sc);
681 mtw_detach(self);
682 return (ENXIO);
683 }
684
685 static void
mtw_drain_mbufq(struct mtw_softc * sc)686 mtw_drain_mbufq(struct mtw_softc *sc)
687 {
688 struct mbuf *m;
689 struct ieee80211_node *ni;
690
691 MTW_LOCK_ASSERT(sc, MA_OWNED);
692 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
693 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
694 m->m_pkthdr.rcvif = NULL;
695 ieee80211_free_node(ni);
696 m_freem(m);
697 }
698 }
699
700 static int
mtw_detach(device_t self)701 mtw_detach(device_t self)
702 {
703 struct mtw_softc *sc = device_get_softc(self);
704 struct ieee80211com *ic = &sc->sc_ic;
705 int i;
706 MTW_LOCK(sc);
707 mtw_reset(sc);
708 DELAY(10000);
709 sc->sc_detached = 1;
710 MTW_UNLOCK(sc);
711
712
713 /* stop all USB transfers */
714 for (i = 0; i < MTW_N_XFER; i++)
715 usbd_transfer_drain(sc->sc_xfer[i]);
716
717 MTW_LOCK(sc);
718 sc->ratectl_run = MTW_RATECTL_OFF;
719 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT;
720
721 /* free TX list, if any */
722 if (ic->ic_nrunning > 0)
723 for (i = 0; i < MTW_EP_QUEUES; i++)
724 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]);
725
726 /* Free TX queue */
727 mtw_drain_mbufq(sc);
728 MTW_UNLOCK(sc);
729 if (sc->sc_ic.ic_softc == sc) {
730 /* drain tasks */
731 usb_callout_drain(&sc->ratectl_ch);
732 ieee80211_draintask(ic, &sc->cmdq_task);
733 ieee80211_draintask(ic, &sc->ratectl_task);
734 ieee80211_ifdetach(ic);
735 }
736 for (i = 0; i < 4; i++) {
737 free(sc->txd_fw[i], M_USBDEV);
738 }
739 firmware_unregister("/mediatek/mt7601u");
740 mtx_destroy(&sc->sc_mtx);
741
742 return (0);
743 }
744
745 static struct ieee80211vap *
mtw_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])746 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
747 enum ieee80211_opmode opmode, int flags,
748 const uint8_t bssid[IEEE80211_ADDR_LEN],
749 const uint8_t mac[IEEE80211_ADDR_LEN])
750 {
751 struct mtw_softc *sc = ic->ic_softc;
752 struct mtw_vap *rvp;
753 struct ieee80211vap *vap;
754 int i;
755
756 if (sc->rvp_cnt >= MTW_VAP_MAX) {
757 device_printf(sc->sc_dev, "number of VAPs maxed out\n");
758 return (NULL);
759 }
760
761 switch (opmode) {
762 case IEEE80211_M_STA:
763 /* enable s/w bmiss handling for sta mode */
764 flags |= IEEE80211_CLONE_NOBEACONS;
765 /* fall though */
766 case IEEE80211_M_IBSS:
767 case IEEE80211_M_MONITOR:
768 case IEEE80211_M_HOSTAP:
769 case IEEE80211_M_MBSS:
770 /* other than WDS vaps, only one at a time */
771 if (!TAILQ_EMPTY(&ic->ic_vaps))
772 return (NULL);
773 break;
774 case IEEE80211_M_WDS:
775 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
776 if (vap->iv_opmode != IEEE80211_M_HOSTAP)
777 continue;
778 /* WDS vap's always share the local mac address. */
779 flags &= ~IEEE80211_CLONE_BSSID;
780 break;
781 }
782 if (vap == NULL) {
783 device_printf(sc->sc_dev,
784 "wds only supported in ap mode\n");
785 return (NULL);
786 }
787 break;
788 default:
789 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
790 return (NULL);
791 }
792
793 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
794 vap = &rvp->vap;
795
796 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) !=
797 0) {
798 /* out of memory */
799 free(rvp, M_80211_VAP);
800 return (NULL);
801 }
802
803 vap->iv_update_beacon = mtw_update_beacon;
804 vap->iv_max_aid = MTW_WCID_MAX;
805
806 /*
807 * The linux rt2800 driver limits 1 stream devices to a 32KB
808 * RX AMPDU.
809 */
810 if (ic->ic_rxstream > 1)
811 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
812 else
813 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
814 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */
815
816 /*
817 * To delete the right key from h/w, we need wcid.
818 * Luckily, there is unused space in ieee80211_key{}, wk_pad,
819 * and matching wcid will be written into there. So, cast
820 * some spells to remove 'const' from ieee80211_key{}
821 */
822 vap->iv_key_delete = (void *)mtw_key_delete;
823 vap->iv_key_set = (void *)mtw_key_set;
824
825 // override state transition machine
826 rvp->newstate = vap->iv_newstate;
827 vap->iv_newstate = mtw_newstate;
828 if (opmode == IEEE80211_M_IBSS) {
829 rvp->recv_mgmt = vap->iv_recv_mgmt;
830 vap->iv_recv_mgmt = mtw_recv_mgmt;
831 }
832
833 ieee80211_ratectl_init(vap);
834 ieee80211_ratectl_setinterval(vap, 1000); // 1 second
835
836 /* complete setup */
837 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status,
838 mac);
839
840 /* make sure id is always unique */
841 for (i = 0; i < MTW_VAP_MAX; i++) {
842 if ((sc->rvp_bmap & 1 << i) == 0) {
843 sc->rvp_bmap |= 1 << i;
844 rvp->rvp_id = i;
845 break;
846 }
847 }
848 if (sc->rvp_cnt++ == 0)
849 ic->ic_opmode = opmode;
850
851 if (opmode == IEEE80211_M_HOSTAP)
852 sc->cmdq_run = MTW_CMDQ_GO;
853
854 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n",
855 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
856
857 return (vap);
858 }
859
860 static void
mtw_vap_delete(struct ieee80211vap * vap)861 mtw_vap_delete(struct ieee80211vap *vap)
862 {
863 struct mtw_vap *rvp = MTW_VAP(vap);
864 struct ieee80211com *ic;
865 struct mtw_softc *sc;
866 uint8_t rvp_id;
867
868 if (vap == NULL)
869 return;
870
871 ic = vap->iv_ic;
872 sc = ic->ic_softc;
873
874 MTW_LOCK(sc);
875 m_freem(rvp->beacon_mbuf);
876 rvp->beacon_mbuf = NULL;
877
878 rvp_id = rvp->rvp_id;
879 sc->ratectl_run &= ~(1 << rvp_id);
880 sc->rvp_bmap &= ~(1 << rvp_id);
881 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256);
882 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512);
883 --sc->rvp_cnt;
884
885 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
886 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap,
887 sc->rvp_cnt);
888
889 MTW_UNLOCK(sc);
890
891 ieee80211_ratectl_deinit(vap);
892 ieee80211_vap_detach(vap);
893 free(rvp, M_80211_VAP);
894 }
895
896 /*
897 * There are numbers of functions need to be called in context thread.
898 * Rather than creating taskqueue event for each of those functions,
899 * here is all-for-one taskqueue callback function. This function
900 * guarantees deferred functions are executed in the same order they
901 * were enqueued.
902 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
903 */
904 static void
mtw_cmdq_cb(void * arg,int pending)905 mtw_cmdq_cb(void *arg, int pending)
906 {
907 struct mtw_softc *sc = arg;
908 uint8_t i;
909 /* call cmdq[].func locked */
910 MTW_LOCK(sc);
911 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
912 i = sc->cmdq_exec, pending--) {
913 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i,
914 pending);
915 if (sc->cmdq_run == MTW_CMDQ_GO) {
916 /*
917 * If arg0 is NULL, callback func needs more
918 * than one arg. So, pass ptr to cmdq struct.
919 */
920 if (sc->cmdq[i].arg0)
921 sc->cmdq[i].func(sc->cmdq[i].arg0);
922 else
923 sc->cmdq[i].func(&sc->cmdq[i]);
924 }
925 sc->cmdq[i].arg0 = NULL;
926 sc->cmdq[i].func = NULL;
927 sc->cmdq_exec++;
928 sc->cmdq_exec &= MTW_CMDQ_MASQ;
929 }
930 MTW_UNLOCK(sc);
931 }
932
933 static void
mtw_setup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)934 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
935 {
936 struct mtw_tx_data *data;
937
938 memset(pq, 0, sizeof(*pq));
939
940 STAILQ_INIT(&pq->tx_qh);
941 STAILQ_INIT(&pq->tx_fh);
942
943 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
944 data++) {
945 data->sc = sc;
946 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
947 }
948 pq->tx_nfree = MTW_TX_RING_COUNT;
949 }
950
951 static void
mtw_unsetup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)952 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
953 {
954 struct mtw_tx_data *data;
955 /* make sure any subsequent use of the queues will fail */
956 pq->tx_nfree = 0;
957
958 STAILQ_INIT(&pq->tx_fh);
959 STAILQ_INIT(&pq->tx_qh);
960
961 /* free up all node references and mbufs */
962 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
963 data++) {
964 if (data->m != NULL) {
965 m_freem(data->m);
966 data->m = NULL;
967 }
968 if (data->ni != NULL) {
969 ieee80211_free_node(data->ni);
970 data->ni = NULL;
971 }
972 }
973 }
974
975 static int
mtw_write_ivb(struct mtw_softc * sc,void * buf,uint16_t len)976 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len)
977 {
978 usb_device_request_t req;
979 uint16_t actlen;
980 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
981 req.bRequest = MTW_RESET;
982 USETW(req.wValue, 0x12);
983 USETW(req.wIndex, 0);
984 USETW(req.wLength, len);
985
986 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf,
987 0, &actlen, 1000);
988
989 return (error);
990 }
991
992 static int
mtw_write_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t val)993 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val)
994 {
995 usb_device_request_t req;
996 int error;
997
998 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
999 req.bRequest = MTW_WRITE_CFG;
1000 USETW(req.wValue, 0);
1001 USETW(req.wIndex, reg);
1002 USETW(req.wLength, 4);
1003 val = htole32(val);
1004 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val);
1005 return (error);
1006 }
1007
1008 static int
mtw_usb_dma_write(struct mtw_softc * sc,uint32_t val)1009 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val)
1010 {
1011 // if (sc->asic_ver == 0x7612)
1012 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val);
1013 // else
1014 return (mtw_write(sc, MTW_USB_DMA_CFG, val));
1015 }
1016
1017 static void
mtw_ucode_setup(struct mtw_softc * sc)1018 mtw_ucode_setup(struct mtw_softc *sc)
1019 {
1020
1021 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN));
1022 mtw_write(sc, MTW_FCE_PSE_CTRL, 1);
1023 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230);
1024 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1);
1025 mtw_write(sc, MTW_MCU_FW_IDX, 1);
1026 mtw_write(sc, MTW_FCE_PDMA, 0x44);
1027 mtw_write(sc, MTW_FCE_SKIP_FS, 3);
1028 }
1029 static int
mtw_ucode_write(struct mtw_softc * sc,const uint8_t * fw,const uint8_t * ivb,int32_t len,uint32_t offset)1030 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb,
1031 int32_t len, uint32_t offset)
1032 {
1033
1034 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev);
1035 #if 0 // firmware not tested
1036
1037 if (sc->asic_ver == 0x7612 && offset >= 0x90000)
1038 blksz = 0x800; /* MT7612 ROM Patch */
1039
1040 xfer = usbd_alloc_xfer(sc->sc_udev);
1041 if (xfer == NULL) {
1042 error = ENOMEM;
1043 goto fail;
1044 }
1045 buf = usbd_alloc_buffer(xfer, blksz + 12);
1046 if (buf == NULL) {
1047 error = ENOMEM;
1048 goto fail;
1049 }
1050 #endif
1051
1052
1053
1054 int mlen;
1055 int idx = 0;
1056
1057 mlen = 0x2c44;
1058
1059 while (len > 0) {
1060
1061 if (len < 0x2c44 && len > 0) {
1062 mlen = len;
1063 }
1064
1065 sc->txd_fw[idx]->len = htole16(mlen);
1066 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU);
1067
1068 memcpy(&sc->txd_fw[idx]->fw, fw, mlen);
1069 // memcpy(&txd[1], fw, mlen);
1070 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD);
1071 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset
1072 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16));
1073
1074 // sc->sc_fw_data[idx]->len=htole16(mlen);
1075
1076 // memcpy(tmpbuf,fw,mlen);
1077 // memset(tmpbuf+mlen,0,MTW_DMA_PAD);
1078 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen);
1079
1080 fw += mlen;
1081 len -= mlen;
1082 // sent+=mlen;
1083 idx++;
1084 }
1085 sc->sc_sent = 0;
1086 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN);
1087
1088 usbd_transfer_start(sc->sc_xfer[7]);
1089
1090 return (0);
1091 }
1092
1093 static void
mtw_load_microcode(void * arg)1094 mtw_load_microcode(void *arg)
1095 {
1096
1097 struct mtw_softc *sc = (struct mtw_softc *)arg;
1098 const struct mtw_ucode_hdr *hdr;
1099 // onst struct mtw_ucode *fw = NULL;
1100 const char *fwname;
1101 size_t size;
1102 int error = 0;
1103 uint32_t tmp, iofs = 0x40;
1104 // int ntries;
1105 int dlen, ilen;
1106 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
1107 /* is firmware already running? */
1108 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
1109 if (tmp == MTW_MCU_READY) {
1110 return;
1111 }
1112 if (sc->asic_ver == 0x7612) {
1113 fwname = "mtw-mt7662u_rom_patch";
1114
1115 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN);
1116 if (firmware == NULL) {
1117 device_printf(sc->sc_dev,
1118 "failed loadfirmware of file %s (error %d)\n",
1119 fwname, error);
1120 return;
1121 }
1122 size = firmware->datasize;
1123
1124 const struct mtw_ucode *fw = (const struct mtw_ucode *)
1125 firmware->data;
1126 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1127 // memcpy(fw,(const unsigned char*)firmware->data +
1128 // 0x1e,size-0x1e);
1129 ilen = size - 0x1e;
1130
1131 mtw_ucode_setup(sc);
1132
1133 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen,
1134 0x90000)) != 0) {
1135 goto fail;
1136 }
1137 mtw_usb_dma_write(sc, 0x00e41814);
1138 }
1139
1140 fwname = "/mediatek/mt7601u.bin";
1141 iofs = 0x40;
1142 // dofs = 0;
1143 if (sc->asic_ver == 0x7612) {
1144 fwname = "mtw-mt7662u";
1145 iofs = 0x80040;
1146 // dofs = 0x110800;
1147 } else if (sc->asic_ver == 0x7610) {
1148 fwname = "mt7610u";
1149 // dofs = 0x80000;
1150 }
1151 MTW_UNLOCK(sc);
1152 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN);
1153
1154 if (firmware == NULL) {
1155 device_printf(sc->sc_dev,
1156 "failed loadfirmware of file %s (error %d)\n", fwname,
1157 error);
1158 MTW_LOCK(sc);
1159 return;
1160 }
1161 MTW_LOCK(sc);
1162 size = firmware->datasize;
1163 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size);
1164 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data;
1165
1166 if (size < sizeof(struct mtw_ucode_hdr)) {
1167 device_printf(sc->sc_dev, "firmware header too short\n");
1168 goto fail;
1169 }
1170
1171 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1172
1173 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) +
1174 le32toh(hdr->dlm_len)) {
1175 device_printf(sc->sc_dev, "firmware payload too short\n");
1176 goto fail;
1177 }
1178
1179 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN;
1180 dlen = le32toh(hdr->dlm_len);
1181
1182 if (ilen > size || dlen > size) {
1183 device_printf(sc->sc_dev, "firmware payload too large\n");
1184 goto fail;
1185 }
1186
1187 mtw_write(sc, MTW_FCE_PDMA, 0);
1188 mtw_write(sc, MTW_FCE_PSE_CTRL, 0);
1189 mtw_ucode_setup(sc);
1190
1191 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0)
1192 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n",
1193 error);
1194
1195 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n",
1196 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time);
1197
1198 return;
1199 fail:
1200 return;
1201 }
1202 static usb_error_t
mtw_do_request(struct mtw_softc * sc,struct usb_device_request * req,void * data)1203 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data)
1204 {
1205 usb_error_t err;
1206 int ntries = 5;
1207
1208 MTW_LOCK_ASSERT(sc, MA_OWNED);
1209
1210 while (ntries--) {
1211 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data,
1212 0, NULL, 2000); // ms seconds
1213 if (err == 0)
1214 break;
1215 MTW_DPRINTF(sc, MTW_DEBUG_USB,
1216 "Control request failed, %s (retrying)\n",
1217 usbd_errstr(err));
1218 mtw_delay(sc, 10);
1219 }
1220 return (err);
1221 }
1222
1223 static int
mtw_read(struct mtw_softc * sc,uint16_t reg,uint32_t * val)1224 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
1225 {
1226 uint32_t tmp;
1227 int error;
1228
1229 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
1230 if (error == 0)
1231 *val = le32toh(tmp);
1232 else
1233 *val = 0xffffffff;
1234 return (error);
1235 }
1236
1237 static int
mtw_read_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1238 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1239 {
1240 usb_device_request_t req;
1241
1242 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1243 req.bRequest = MTW_READ_REGION_1;
1244 USETW(req.wValue, 0);
1245 USETW(req.wIndex, reg);
1246 USETW(req.wLength, len);
1247
1248 return (mtw_do_request(sc, &req, buf));
1249 }
1250
1251 static int
mtw_write_2(struct mtw_softc * sc,uint16_t reg,uint16_t val)1252 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val)
1253 {
1254
1255 usb_device_request_t req;
1256 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1257 req.bRequest = MTW_WRITE_2;
1258 USETW(req.wValue, val);
1259 USETW(req.wIndex, reg);
1260 USETW(req.wLength, 0);
1261 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
1262 }
1263
1264 static int
mtw_write(struct mtw_softc * sc,uint16_t reg,uint32_t val)1265 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
1266 {
1267
1268 int error;
1269
1270 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) {
1271
1272 error = mtw_write_2(sc, reg + 2, val >> 16);
1273 }
1274
1275 return (error);
1276 }
1277
1278 static int
mtw_write_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1279 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1280 {
1281
1282 usb_device_request_t req;
1283 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1284 req.bRequest = MTW_WRITE_REGION_1;
1285 USETW(req.wValue, 0);
1286 USETW(req.wIndex, reg);
1287 USETW(req.wLength, len);
1288 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf));
1289 }
1290
1291 static int
mtw_set_region_4(struct mtw_softc * sc,uint16_t reg,uint32_t val,int count)1292 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count)
1293 {
1294 int i, error = 0;
1295
1296 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n"));
1297 for (i = 0; i < count && error == 0; i += 4)
1298 error = mtw_write(sc, reg + i, val);
1299 return (error);
1300 }
1301
1302 static int
mtw_efuse_read_2(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1303 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1304 {
1305
1306 uint32_t tmp;
1307 uint16_t reg;
1308 int error, ntries;
1309
1310 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1311 return (error);
1312
1313 addr *= 2;
1314 /*
1315 * Read one 16-byte block into registers EFUSE_DATA[0-3]:
1316 * DATA0: 3 2 1 0
1317 * DATA1: 7 6 5 4
1318 * DATA2: B A 9 8
1319 * DATA3: F E D C
1320 */
1321 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK);
1322 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK;
1323 mtw_write(sc, MTW_EFUSE_CTRL, tmp);
1324 for (ntries = 0; ntries < 100; ntries++) {
1325 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1326 return (error);
1327 if (!(tmp & MTW_EFSROM_KICK))
1328 break;
1329 DELAY(2);
1330 }
1331 if (ntries == 100)
1332 return (ETIMEDOUT);
1333
1334 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) {
1335 *val = 0xffff; // address not found
1336 return (0);
1337 }
1338 // determine to which 32-bit register our 16-bit word belongs
1339 reg = MTW_EFUSE_DATA0 + (addr & 0xc);
1340 if ((error = mtw_read(sc, reg, &tmp)) != 0)
1341 return (error);
1342
1343 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff;
1344 return (0);
1345 }
1346
1347 static __inline int
mtw_srom_read(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1348 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1349 {
1350 /* either eFUSE ROM or EEPROM */
1351 return (sc->sc_srom_read(sc, addr, val));
1352 }
1353
1354 static int
mtw_bbp_read(struct mtw_softc * sc,uint8_t reg,uint8_t * val)1355 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val)
1356 {
1357 uint32_t tmp;
1358 int ntries, error;
1359
1360 for (ntries = 0; ntries < 10; ntries++) {
1361 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1362 return (error);
1363 if (!(tmp & MTW_BBP_CSR_KICK))
1364 break;
1365 }
1366 if (ntries == 10)
1367 return (ETIMEDOUT);
1368
1369 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8;
1370 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0)
1371 return (error);
1372
1373 for (ntries = 0; ntries < 10; ntries++) {
1374 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1375 return (error);
1376 if (!(tmp & MTW_BBP_CSR_KICK))
1377 break;
1378 }
1379 if (ntries == 10)
1380 return (ETIMEDOUT);
1381
1382 *val = tmp & 0xff;
1383 return (0);
1384 }
1385
1386 static int
mtw_bbp_write(struct mtw_softc * sc,uint8_t reg,uint8_t val)1387 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val)
1388 {
1389 uint32_t tmp;
1390 int ntries, error;
1391
1392 for (ntries = 0; ntries < 10; ntries++) {
1393 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1394 return (error);
1395 if (!(tmp & MTW_BBP_CSR_KICK))
1396 break;
1397 }
1398 if (ntries == 10)
1399 return (ETIMEDOUT);
1400
1401 tmp = MTW_BBP_CSR_KICK | reg << 8 | val;
1402 return (mtw_write(sc, MTW_BBP_CSR, tmp));
1403 }
1404
1405 static int
mtw_mcu_cmd(struct mtw_softc * sc,u_int8_t cmd,void * buf,int len)1406 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len)
1407 {
1408 sc->sc_idx = 0;
1409 sc->txd_fw[sc->sc_idx]->len = htole16(
1410 len + 8);
1411 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU |
1412 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf));
1413
1414 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004);
1415 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len);
1416 usbd_transfer_start(sc->sc_xfer[7]);
1417 return (0);
1418 }
1419
1420 /*
1421 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
1422 * Used to adjust per-rate Tx power registers.
1423 */
1424 static __inline uint32_t
b4inc(uint32_t b32,int8_t delta)1425 b4inc(uint32_t b32, int8_t delta)
1426 {
1427 int8_t i, b4;
1428
1429 for (i = 0; i < 8; i++) {
1430 b4 = b32 & 0xf;
1431 b4 += delta;
1432 if (b4 < 0)
1433 b4 = 0;
1434 else if (b4 > 0xf)
1435 b4 = 0xf;
1436 b32 = b32 >> 4 | b4 << 28;
1437 }
1438 return (b32);
1439 }
1440 static void
mtw_get_txpower(struct mtw_softc * sc)1441 mtw_get_txpower(struct mtw_softc *sc)
1442 {
1443 uint16_t val;
1444 int i;
1445
1446 /* Read power settings for 2GHz channels. */
1447 for (i = 0; i < 14; i += 2) {
1448 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
1449 sc->txpow1[i + 0] = (int8_t)(val & 0xff);
1450 sc->txpow1[i + 1] = (int8_t)(val >> 8);
1451 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
1452 sc->txpow2[i + 0] = (int8_t)(val & 0xff);
1453 sc->txpow2[i + 1] = (int8_t)(val >> 8);
1454 }
1455 /* Fix broken Tx power entries. */
1456 for (i = 0; i < 14; i++) {
1457 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
1458 sc->txpow1[i] = 5;
1459 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
1460 sc->txpow2[i] = 5;
1461 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR,
1462 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan,
1463 sc->txpow1[i], sc->txpow2[i]);
1464 }
1465 }
1466
1467 struct ieee80211_node *
mtw_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])1468 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1469 {
1470 return (malloc(sizeof(struct mtw_node), M_80211_NODE,
1471 M_NOWAIT | M_ZERO));
1472 }
1473 static int
mtw_read_eeprom(struct mtw_softc * sc)1474 mtw_read_eeprom(struct mtw_softc *sc)
1475 {
1476 struct ieee80211com *ic = &sc->sc_ic;
1477 int8_t delta_2ghz, delta_5ghz;
1478 uint16_t val;
1479 int ridx, ant;
1480
1481 sc->sc_srom_read = mtw_efuse_read_2;
1482
1483 /* read RF information */
1484 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val);
1485 sc->rf_rev = val;
1486 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val);
1487 sc->ntxchains = (val >> 4) & 0xf;
1488 sc->nrxchains = val & 0xf;
1489 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n",
1490 sc->rf_rev, sc->ntxchains, sc->nrxchains);
1491
1492 /* read ROM version */
1493 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val);
1494 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff,
1495 val >> 8);
1496
1497 /* read MAC address */
1498 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val);
1499 ic->ic_macaddr[0] = val & 0xff;
1500 ic->ic_macaddr[1] = val >> 8;
1501 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val);
1502 ic->ic_macaddr[2] = val & 0xff;
1503 ic->ic_macaddr[3] = val >> 8;
1504 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val);
1505 ic->ic_macaddr[4] = val & 0xff;
1506 ic->ic_macaddr[5] = val >> 8;
1507 #if 0
1508 printf("eFUSE ROM\n00: ");
1509 for (int i = 0; i < 256; i++) {
1510 if (((i % 8) == 0) && i > 0)
1511 printf("\n%02x: ", i);
1512 mtw_srom_read(sc, i, &val);
1513 printf(" %04x", val);
1514 }
1515 printf("\n");
1516 #endif
1517 /* check if RF supports automatic Tx access gain control */
1518 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val);
1519 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val);
1520 if ((val & 0xff) != 0xff) {
1521 sc->ext_5ghz_lna = (val >> 3) & 1;
1522 sc->ext_2ghz_lna = (val >> 2) & 1;
1523 /* check if RF supports automatic Tx access gain control */
1524 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
1525 /* check if we have a hardware radio switch */
1526 sc->rfswitch = val & 1;
1527 }
1528
1529 /* read RF frequency offset from EEPROM */
1530 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val);
1531 if ((val & 0xff) != 0xff)
1532 sc->rf_freq_offset = val;
1533 else
1534 sc->rf_freq_offset = 0;
1535 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n",
1536 sc->rf_freq_offset);
1537
1538 /* Read Tx power settings. */
1539 mtw_get_txpower(sc);
1540
1541 /* read Tx power compensation for each Tx rate */
1542 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val);
1543 delta_2ghz = delta_5ghz = 0;
1544 if ((val & 0xff) != 0xff && (val & 0x80)) {
1545 delta_2ghz = val & 0xf;
1546 if (!(val & 0x40)) /* negative number */
1547 delta_2ghz = -delta_2ghz;
1548 }
1549 val >>= 8;
1550 if ((val & 0xff) != 0xff && (val & 0x80)) {
1551 delta_5ghz = val & 0xf;
1552 if (!(val & 0x40)) /* negative number */
1553 delta_5ghz = -delta_5ghz;
1554 }
1555 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1556 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz,
1557 delta_5ghz);
1558
1559 for (ridx = 0; ridx < 5; ridx++) {
1560 uint32_t reg;
1561
1562 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val);
1563 reg = val;
1564 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val);
1565 reg |= (uint32_t)val << 16;
1566
1567 sc->txpow20mhz[ridx] = reg;
1568 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
1569 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
1570
1571 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1572 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
1573 "40MHz/5GHz=0x%08x\n",
1574 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx],
1575 sc->txpow40mhz_5ghz[ridx]);
1576 }
1577
1578 /* read RSSI offsets and LNA gains from EEPROM */
1579 val = 0;
1580 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val);
1581 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
1582 sc->rssi_2ghz[1] = val >> 8; /* Ant B */
1583 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val);
1584 /*
1585 * On RT3070 chips (limited to 2 Rx chains), this ROM
1586 * field contains the Tx mixer gain for the 2GHz band.
1587 */
1588 if ((val & 0xff) != 0xff)
1589 sc->txmixgain_2ghz = val & 0x7;
1590 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n",
1591 sc->txmixgain_2ghz);
1592 sc->lna[2] = val >> 8; /* channel group 2 */
1593 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val);
1594 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
1595 sc->rssi_5ghz[1] = val >> 8; /* Ant B */
1596 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val);
1597 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
1598
1599 sc->lna[3] = val >> 8; /* channel group 3 */
1600
1601 mtw_srom_read(sc, MTW_EEPROM_LNA, &val);
1602 sc->lna[0] = val & 0xff; /* channel group 0 */
1603 sc->lna[1] = val >> 8; /* channel group 1 */
1604 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]);
1605
1606 /* fix broken 5GHz LNA entries */
1607 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
1608 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1609 "invalid LNA for channel group %d\n", 2);
1610 sc->lna[2] = sc->lna[1];
1611 }
1612 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
1613 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1614 "invalid LNA for channel group %d\n", 3);
1615 sc->lna[3] = sc->lna[1];
1616 }
1617
1618 /* fix broken RSSI offset entries */
1619 for (ant = 0; ant < 3; ant++) {
1620 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
1621 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1622 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1,
1623 sc->rssi_2ghz[ant]);
1624 sc->rssi_2ghz[ant] = 0;
1625 }
1626 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
1627 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1628 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1,
1629 sc->rssi_5ghz[ant]);
1630 sc->rssi_5ghz[ant] = 0;
1631 }
1632 }
1633 return (0);
1634 }
1635 static int
mtw_media_change(if_t ifp)1636 mtw_media_change(if_t ifp)
1637 {
1638 struct ieee80211vap *vap = if_getsoftc(ifp);
1639 struct ieee80211com *ic = vap->iv_ic;
1640 const struct ieee80211_txparam *tp;
1641 struct mtw_softc *sc = ic->ic_softc;
1642 uint8_t rate, ridx;
1643
1644 MTW_LOCK(sc);
1645 ieee80211_media_change(ifp);
1646 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
1647 tp = &vap->iv_txparms[ic->ic_curmode];
1648 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
1649 struct ieee80211_node *ni;
1650 struct mtw_node *rn;
1651 /* XXX TODO: methodize with MCS rates */
1652 rate =
1653 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] &
1654 IEEE80211_RATE_VAL;
1655 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
1656 if (rt2860_rates[ridx].rate == rate)
1657 break;
1658 }
1659 ni = ieee80211_ref_node(vap->iv_bss);
1660 rn = MTW_NODE(ni);
1661 rn->fix_ridx = ridx;
1662
1663 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate,
1664 rn->fix_ridx);
1665 ieee80211_free_node(ni);
1666 }
1667 MTW_UNLOCK(sc);
1668
1669 return (0);
1670 }
1671
1672 void
mtw_set_leds(struct mtw_softc * sc,uint16_t which)1673 mtw_set_leds(struct mtw_softc *sc, uint16_t which)
1674 {
1675 struct mtw_mcu_cmd_8 cmd;
1676 cmd.func = htole32(0x1);
1677 cmd.val = htole32(which);
1678 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8));
1679 }
1680 static void
mtw_abort_tsf_sync(struct mtw_softc * sc)1681 mtw_abort_tsf_sync(struct mtw_softc *sc)
1682 {
1683 uint32_t tmp;
1684
1685 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
1686 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN);
1687 mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
1688 }
1689 static int
mtw_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)1690 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1691 {
1692 const struct ieee80211_txparam *tp;
1693 struct ieee80211com *ic = vap->iv_ic;
1694 struct mtw_softc *sc = ic->ic_softc;
1695 struct mtw_vap *rvp = MTW_VAP(vap);
1696 enum ieee80211_state ostate;
1697 uint32_t sta[3];
1698 uint8_t ratectl = 0;
1699 uint8_t restart_ratectl = 0;
1700 uint8_t bid = 1 << rvp->rvp_id;
1701
1702
1703 ostate = vap->iv_state;
1704 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n",
1705 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
1706 IEEE80211_UNLOCK(ic);
1707 MTW_LOCK(sc);
1708 ratectl = sc->ratectl_run; /* remember current state */
1709 usb_callout_stop(&sc->ratectl_ch);
1710 sc->ratectl_run = MTW_RATECTL_OFF;
1711 if (ostate == IEEE80211_S_RUN) {
1712 /* turn link LED off */
1713 }
1714
1715 switch (nstate) {
1716 case IEEE80211_S_INIT:
1717 restart_ratectl = 1;
1718 if (ostate != IEEE80211_S_RUN)
1719 break;
1720
1721 ratectl &= ~bid;
1722 sc->runbmap &= ~bid;
1723
1724 /* abort TSF synchronization if there is no vap running */
1725 if (--sc->running == 0)
1726 mtw_abort_tsf_sync(sc);
1727 break;
1728
1729 case IEEE80211_S_RUN:
1730 if (!(sc->runbmap & bid)) {
1731 if (sc->running++)
1732 restart_ratectl = 1;
1733 sc->runbmap |= bid;
1734 }
1735
1736 m_freem(rvp->beacon_mbuf);
1737 rvp->beacon_mbuf = NULL;
1738
1739 switch (vap->iv_opmode) {
1740 case IEEE80211_M_HOSTAP:
1741 case IEEE80211_M_MBSS:
1742 sc->ap_running |= bid;
1743 ic->ic_opmode = vap->iv_opmode;
1744 mtw_update_beacon_cb(vap);
1745 break;
1746 case IEEE80211_M_IBSS:
1747 sc->adhoc_running |= bid;
1748 if (!sc->ap_running)
1749 ic->ic_opmode = vap->iv_opmode;
1750 mtw_update_beacon_cb(vap);
1751 break;
1752 case IEEE80211_M_STA:
1753 sc->sta_running |= bid;
1754 if (!sc->ap_running && !sc->adhoc_running)
1755 ic->ic_opmode = vap->iv_opmode;
1756
1757 /* read statistic counters (clear on read) */
1758 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
1759 sizeof sta);
1760
1761 break;
1762 default:
1763 ic->ic_opmode = vap->iv_opmode;
1764 break;
1765 }
1766
1767 if (vap->iv_opmode != IEEE80211_M_MONITOR) {
1768 struct ieee80211_node *ni;
1769
1770 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
1771 MTW_UNLOCK(sc);
1772 IEEE80211_LOCK(ic);
1773 return (-1);
1774 }
1775 mtw_updateslot(ic);
1776 mtw_enable_mrr(sc);
1777 mtw_set_txpreamble(sc);
1778 mtw_set_basicrates(sc);
1779 ni = ieee80211_ref_node(vap->iv_bss);
1780 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
1781 mtw_set_bssid(sc, sc->sc_bssid);
1782 ieee80211_free_node(ni);
1783 mtw_enable_tsf_sync(sc);
1784
1785 /* enable automatic rate adaptation */
1786 tp = &vap->iv_txparms[ieee80211_chan2mode(
1787 ic->ic_curchan)];
1788 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
1789 ratectl |= bid;
1790 } else {
1791 mtw_enable_tsf_sync(sc);
1792 }
1793
1794 break;
1795 default:
1796 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n");
1797 break;
1798 }
1799
1800 /* restart amrr for running VAPs */
1801 if ((sc->ratectl_run = ratectl) && restart_ratectl) {
1802 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
1803 }
1804 MTW_UNLOCK(sc);
1805 IEEE80211_LOCK(ic);
1806 return (rvp->newstate(vap, nstate, arg));
1807 }
1808
1809 static int
mtw_wme_update(struct ieee80211com * ic)1810 mtw_wme_update(struct ieee80211com *ic)
1811 {
1812 struct chanAccParams chp;
1813 struct mtw_softc *sc = ic->ic_softc;
1814 const struct wmeParams *ac;
1815 int aci, error = 0;
1816 ieee80211_wme_ic_getparams(ic, &chp);
1817 ac = chp.cap_wmeParams;
1818
1819 MTW_LOCK(sc);
1820 /* update MAC TX configuration registers */
1821 for (aci = 0; aci < WME_NUM_AC; aci++) {
1822 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci),
1823 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 |
1824 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit);
1825 if (error)
1826 goto err;
1827 }
1828
1829 /* update SCH/DMA registers too */
1830 error = mtw_write(sc, MTW_WMM_AIFSN_CFG,
1831 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 |
1832 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn);
1833 if (error)
1834 goto err;
1835 error = mtw_write(sc, MTW_WMM_CWMIN_CFG,
1836 ac[WME_AC_VO].wmep_logcwmin << 12 |
1837 ac[WME_AC_VI].wmep_logcwmin << 8 |
1838 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin);
1839 if (error)
1840 goto err;
1841 error = mtw_write(sc, MTW_WMM_CWMAX_CFG,
1842 ac[WME_AC_VO].wmep_logcwmax << 12 |
1843 ac[WME_AC_VI].wmep_logcwmax << 8 |
1844 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax);
1845 if (error)
1846 goto err;
1847 error = mtw_write(sc, MTW_WMM_TXOP0_CFG,
1848 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit);
1849 if (error)
1850 goto err;
1851 error = mtw_write(sc, MTW_WMM_TXOP1_CFG,
1852 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit);
1853
1854 err:
1855 MTW_UNLOCK(sc);
1856 if (error)
1857 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n");
1858
1859 return (error);
1860 }
1861
1862 static int
mtw_key_set(struct ieee80211vap * vap,struct ieee80211_key * k)1863 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k)
1864 {
1865 struct ieee80211com *ic = vap->iv_ic;
1866 struct mtw_softc *sc = ic->ic_softc;
1867 uint32_t i;
1868
1869 i = MTW_CMDQ_GET(&sc->cmdq_store);
1870 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
1871 sc->cmdq[i].func = mtw_key_set_cb;
1872 sc->cmdq[i].arg0 = NULL;
1873 sc->cmdq[i].arg1 = vap;
1874 sc->cmdq[i].k = k;
1875 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr);
1876 ieee80211_runtask(ic, &sc->cmdq_task);
1877
1878 /*
1879 * To make sure key will be set when hostapd
1880 * calls iv_key_set() before if_init().
1881 */
1882 if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1883 MTW_LOCK(sc);
1884 sc->cmdq_key_set = MTW_CMDQ_GO;
1885 MTW_UNLOCK(sc);
1886 }
1887
1888 return (1);
1889 }
1890 static void
mtw_key_set_cb(void * arg)1891 mtw_key_set_cb(void *arg)
1892 {
1893 struct mtw_cmdq *cmdq = arg;
1894 struct ieee80211vap *vap = cmdq->arg1;
1895 struct ieee80211_key *k = cmdq->k;
1896 struct ieee80211com *ic = vap->iv_ic;
1897 struct mtw_softc *sc = ic->ic_softc;
1898 struct ieee80211_node *ni;
1899 u_int cipher = k->wk_cipher->ic_cipher;
1900 uint32_t attr;
1901 uint16_t base;
1902 uint8_t mode, wcid, iv[8];
1903 MTW_LOCK_ASSERT(sc, MA_OWNED);
1904
1905 if (vap->iv_opmode == IEEE80211_M_HOSTAP)
1906 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
1907 else
1908 ni = vap->iv_bss;
1909
1910 /* map net80211 cipher to RT2860 security mode */
1911 switch (cipher) {
1912 case IEEE80211_CIPHER_WEP:
1913 if (k->wk_keylen < 8)
1914 mode = MTW_MODE_WEP40;
1915 else
1916 mode = MTW_MODE_WEP104;
1917 break;
1918 case IEEE80211_CIPHER_TKIP:
1919 mode = MTW_MODE_TKIP;
1920 break;
1921 case IEEE80211_CIPHER_AES_CCM:
1922 mode = MTW_MODE_AES_CCMP;
1923 break;
1924 default:
1925 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n");
1926 return;
1927 }
1928
1929 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1930 wcid = 0; /* NB: update WCID0 for group keys */
1931 base = MTW_SKEY(0, k->wk_keyix);
1932 } else {
1933 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0;
1934 base = MTW_PKEY(wcid);
1935 }
1936
1937 if (cipher == IEEE80211_CIPHER_TKIP) {
1938 mtw_write_region_1(sc, base, k->wk_key, 16);
1939 mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8);
1940 mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8);
1941 } else {
1942 /* roundup len to 16-bit: XXX fix write_region_1() instead */
1943 mtw_write_region_1(sc, base, k->wk_key,
1944 (k->wk_keylen + 1) & ~1);
1945 }
1946
1947 if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
1948 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
1949 /* set initial packet number in IV+EIV */
1950 if (cipher == IEEE80211_CIPHER_WEP) {
1951 memset(iv, 0, sizeof iv);
1952 iv[3] = vap->iv_def_txkey << 6;
1953 } else {
1954 if (cipher == IEEE80211_CIPHER_TKIP) {
1955 iv[0] = k->wk_keytsc >> 8;
1956 iv[1] = (iv[0] | 0x20) & 0x7f;
1957 iv[2] = k->wk_keytsc;
1958 } else { //CCMP
1959 iv[0] = k->wk_keytsc;
1960 iv[1] = k->wk_keytsc >> 8;
1961 iv[2] = 0;
1962 }
1963 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
1964 iv[4] = k->wk_keytsc >> 16;
1965 iv[5] = k->wk_keytsc >> 24;
1966 iv[6] = k->wk_keytsc >> 32;
1967 iv[7] = k->wk_keytsc >> 40;
1968 }
1969 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8);
1970 }
1971
1972 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1973 /* install group key */
1974 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
1975 attr &= ~(0xf << (k->wk_keyix * 4));
1976 attr |= mode << (k->wk_keyix * 4);
1977 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
1978
1979 if (cipher & (IEEE80211_CIPHER_WEP)) {
1980 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr);
1981 attr = (attr & ~0xf) | (mode << 1);
1982 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr);
1983
1984 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4);
1985
1986 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
1987 attr = (attr & ~0xf) | (mode << 1);
1988 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
1989 }
1990 } else {
1991 /* install pairwise key */
1992 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
1993 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN;
1994 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
1995 }
1996 k->wk_pad = wcid;
1997 }
1998
1999 /*
2000 * If wlan is destroyed without being brought down i.e. without
2001 * wlan down or wpa_cli terminate, this function is called after
2002 * vap is gone. Don't refer it.
2003 */
2004 static void
mtw_key_delete_cb(void * arg)2005 mtw_key_delete_cb(void *arg)
2006 {
2007 struct mtw_cmdq *cmdq = arg;
2008 struct mtw_softc *sc = cmdq->arg1;
2009 struct ieee80211_key *k = &cmdq->key;
2010 uint32_t attr;
2011 uint8_t wcid;
2012
2013 MTW_LOCK_ASSERT(sc, MA_OWNED);
2014
2015 if (k->wk_flags & IEEE80211_KEY_GROUP) {
2016 /* remove group key */
2017 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n");
2018 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
2019 attr &= ~(0xf << (k->wk_keyix * 4));
2020 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
2021 } else {
2022 /* remove pairwise key */
2023 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n",
2024 k->wk_pad);
2025 /* matching wcid was written to wk_pad in mtw_key_set() */
2026 wcid = k->wk_pad;
2027 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2028 attr &= ~0xf;
2029 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2030 }
2031
2032 k->wk_pad = 0;
2033 }
2034
2035 /*
2036 * return 0 on error
2037 */
2038 static int
mtw_key_delete(struct ieee80211vap * vap,struct ieee80211_key * k)2039 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
2040 {
2041 struct ieee80211com *ic = vap->iv_ic;
2042 struct mtw_softc *sc = ic->ic_softc;
2043 struct ieee80211_key *k0;
2044 uint32_t i;
2045 if (sc->sc_flags & MTW_RUNNING)
2046 return (1);
2047
2048 /*
2049 * When called back, key might be gone. So, make a copy
2050 * of some values need to delete keys before deferring.
2051 * But, because of LOR with node lock, cannot use lock here.
2052 * So, use atomic instead.
2053 */
2054 i = MTW_CMDQ_GET(&sc->cmdq_store);
2055 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
2056 sc->cmdq[i].func = mtw_key_delete_cb;
2057 sc->cmdq[i].arg0 = NULL;
2058 sc->cmdq[i].arg1 = sc;
2059 k0 = &sc->cmdq[i].key;
2060 k0->wk_flags = k->wk_flags;
2061 k0->wk_keyix = k->wk_keyix;
2062 /* matching wcid was written to wk_pad in mtw_key_set() */
2063 k0->wk_pad = k->wk_pad;
2064 ieee80211_runtask(ic, &sc->cmdq_task);
2065 return (1); /* return fake success */
2066 }
2067
2068 static void
mtw_ratectl_to(void * arg)2069 mtw_ratectl_to(void *arg)
2070 {
2071 struct mtw_softc *sc = arg;
2072 /* do it in a process context, so it can go sleep */
2073 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
2074 /* next timeout will be rescheduled in the callback task */
2075 }
2076
2077 /* ARGSUSED */
2078 static void
mtw_ratectl_cb(void * arg,int pending)2079 mtw_ratectl_cb(void *arg, int pending)
2080 {
2081
2082 struct mtw_softc *sc = arg;
2083 struct ieee80211com *ic = &sc->sc_ic;
2084 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2085
2086 if (vap == NULL)
2087 return;
2088
2089 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc);
2090
2091 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
2092
2093
2094 }
2095
2096 static void
mtw_drain_fifo(void * arg)2097 mtw_drain_fifo(void *arg)
2098 {
2099 struct mtw_softc *sc = arg;
2100 uint32_t stat;
2101 uint16_t(*wstat)[3];
2102 uint8_t wcid, mcs, pid;
2103 int8_t retry;
2104
2105 MTW_LOCK_ASSERT(sc, MA_OWNED);
2106
2107 for (;;) {
2108 /* drain Tx status FIFO (maxsize = 16) */
2109 mtw_read(sc, MTW_TX_STAT_FIFO, &stat);
2110 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat);
2111 if (!(stat & MTW_TXQ_VLD))
2112 break;
2113
2114 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff;
2115
2116 /* if no ACK was requested, no feedback is available */
2117 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX ||
2118 wcid == 0)
2119 continue;
2120
2121 /*
2122 * Even though each stat is Tx-complete-status like format,
2123 * the device can poll stats. Because there is no guarantee
2124 * that the referring node is still around when read the stats.
2125 * So that, if we use ieee80211_ratectl_tx_update(), we will
2126 * have hard time not to refer already freed node.
2127 *
2128 * To eliminate such page faults, we poll stats in softc.
2129 * Then, update the rates later with
2130 * ieee80211_ratectl_tx_update().
2131 */
2132 wstat = &(sc->wcid_stats[wcid]);
2133 (*wstat)[MTW_TXCNT]++;
2134 if (stat & MTW_TXQ_OK)
2135 (*wstat)[MTW_SUCCESS]++;
2136 else
2137 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
2138 /*
2139 * Check if there were retries, ie if the Tx success rate is
2140 * different from the requested rate. Note that it works only
2141 * because we do not allow rate fallback from OFDM to CCK.
2142 */
2143 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f;
2144 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf;
2145 if ((retry = pid - 1 - mcs) > 0) {
2146 (*wstat)[MTW_TXCNT] += retry;
2147 (*wstat)[MTW_RETRY] += retry;
2148 }
2149 }
2150 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt);
2151
2152 sc->fifo_cnt = 0;
2153 }
2154
2155 static void
mtw_iter_func(void * arg,struct ieee80211_node * ni)2156 mtw_iter_func(void *arg, struct ieee80211_node *ni)
2157 {
2158 struct mtw_softc *sc = arg;
2159 MTW_LOCK(sc);
2160 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs;
2161 struct ieee80211vap *vap = ni->ni_vap;
2162 struct mtw_node *rn = MTW_NODE(ni);
2163 uint32_t sta[3];
2164 uint16_t(*wstat)[3];
2165 int error, ridx;
2166 uint8_t txrate = 0;
2167
2168 /* Check for special case */
2169 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
2170 ni != vap->iv_bss)
2171 goto fail;
2172
2173 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE |
2174 IEEE80211_RATECTL_TX_STATS_RETRIES;
2175 txs->ni = ni;
2176 if (sc->rvp_cnt <= 1 &&
2177 (vap->iv_opmode == IEEE80211_M_IBSS ||
2178 vap->iv_opmode == IEEE80211_M_STA)) {
2179 /*
2180 * read statistic counters (clear on read) and update AMRR state
2181 */
2182 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
2183 sizeof sta);
2184 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error);
2185 if (error != 0)
2186 goto fail;
2187
2188 /* count failed TX as errors */
2189 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
2190 le32toh(sta[0]) & 0xffff);
2191
2192 txs->nretries = (le32toh(sta[1]) >> 16);
2193 txs->nsuccess = (le32toh(sta[1]) & 0xffff);
2194 /* nretries??? */
2195 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff);
2196
2197 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2198 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries,
2199 txs->nsuccess, le32toh(sta[0]) & 0xffff);
2200 } else {
2201 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]);
2202
2203 if (wstat == &(sc->wcid_stats[0]) ||
2204 wstat > &(sc->wcid_stats[MTW_WCID_MAX]))
2205 goto fail;
2206
2207 txs->nretries = (*wstat)[MTW_RETRY];
2208 txs->nsuccess = (*wstat)[MTW_SUCCESS];
2209 txs->nframes = (*wstat)[MTW_TXCNT];
2210 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2211 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries,
2212 txs->nframes, txs->nsuccess);
2213
2214 memset(wstat, 0, sizeof(*wstat));
2215 }
2216
2217 ieee80211_ratectl_tx_update(vap, txs);
2218 ieee80211_ratectl_rate(ni, NULL, 0);
2219 txrate = ieee80211_node_get_txrate_dot11rate(ni);
2220
2221 /* XXX TODO: methodize with MCS rates */
2222 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
2223 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n",
2224 txrate);
2225 if (rt2860_rates[ridx].rate == txrate) {
2226 break;
2227 }
2228 }
2229 rn->amrr_ridx = ridx;
2230 fail:
2231 MTW_UNLOCK(sc);
2232
2233 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n",
2234 txrate, rn->amrr_ridx);
2235 }
2236
2237 static void
mtw_newassoc_cb(void * arg)2238 mtw_newassoc_cb(void *arg)
2239 {
2240 struct mtw_cmdq *cmdq = arg;
2241 struct ieee80211_node *ni = cmdq->arg1;
2242 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc;
2243
2244 uint8_t wcid = cmdq->wcid;
2245
2246 MTW_LOCK_ASSERT(sc, MA_OWNED);
2247
2248 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr,
2249 IEEE80211_ADDR_LEN);
2250
2251 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
2252 }
2253
2254 static void
mtw_newassoc(struct ieee80211_node * ni,int isnew)2255 mtw_newassoc(struct ieee80211_node *ni, int isnew)
2256 {
2257
2258 struct mtw_node *mn = MTW_NODE(ni);
2259 struct ieee80211vap *vap = ni->ni_vap;
2260 struct ieee80211com *ic = vap->iv_ic;
2261 struct mtw_softc *sc = ic->ic_softc;
2262
2263 uint8_t rate;
2264 uint8_t ridx;
2265 uint8_t wcid;
2266 //int i;
2267 // int i,j;
2268 wcid = MTW_AID2WCID(ni->ni_associd);
2269
2270 if (wcid > MTW_WCID_MAX) {
2271 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
2272 return;
2273 }
2274
2275 /* only interested in true associations */
2276 if (isnew && ni->ni_associd != 0) {
2277 /*
2278 * This function could is called though timeout function.
2279 * Need to deferggxr.
2280 */
2281
2282 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store);
2283 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt);
2284 sc->cmdq[cnt].func = mtw_newassoc_cb;
2285 sc->cmdq[cnt].arg0 = NULL;
2286 sc->cmdq[cnt].arg1 = ni;
2287 sc->cmdq[cnt].wcid = wcid;
2288 ieee80211_runtask(ic, &sc->cmdq_task);
2289 }
2290
2291 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
2292 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd,
2293 ether_sprintf(ni->ni_macaddr));
2294 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
2295 /* XXX TODO: methodize with MCS rates */
2296 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
2297 if (rt2860_rates[ridx].rate == rate)
2298 break;
2299 mn->mgt_ridx = ridx;
2300 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE,
2301 "rate=%d, ctl_ridx=%d\n", rate, ridx);
2302 MTW_LOCK(sc);
2303 if (sc->ratectl_run != MTW_RATECTL_OFF) {
2304 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc);
2305 }
2306 MTW_UNLOCK(sc);
2307
2308 }
2309
2310 /*
2311 * Return the Rx chain with the highest RSSI for a given frame.
2312 */
2313 static __inline uint8_t
mtw_maxrssi_chain(struct mtw_softc * sc,const struct mtw_rxwi * rxwi)2314 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi)
2315 {
2316 uint8_t rxchain = 0;
2317
2318 if (sc->nrxchains > 1) {
2319 if (rxwi->rssi[1] > rxwi->rssi[rxchain])
2320 rxchain = 1;
2321 if (sc->nrxchains > 2)
2322 if (rxwi->rssi[2] > rxwi->rssi[rxchain])
2323 rxchain = 2;
2324 }
2325 return (rxchain);
2326 }
2327 static void
mtw_get_tsf(struct mtw_softc * sc,uint64_t * buf)2328 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf)
2329 {
2330 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf));
2331 }
2332
2333 static void
mtw_recv_mgmt(struct ieee80211_node * ni,struct mbuf * m,int subtype,const struct ieee80211_rx_stats * rxs,int rssi,int nf)2334 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
2335 const struct ieee80211_rx_stats *rxs, int rssi, int nf)
2336 {
2337 struct ieee80211vap *vap = ni->ni_vap;
2338 struct mtw_softc *sc = vap->iv_ic->ic_softc;
2339 struct mtw_vap *rvp = MTW_VAP(vap);
2340 uint64_t ni_tstamp, rx_tstamp;
2341
2342 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf);
2343
2344 if (vap->iv_state == IEEE80211_S_RUN &&
2345 (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
2346 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
2347 ni_tstamp = le64toh(ni->ni_tstamp.tsf);
2348 MTW_LOCK(sc);
2349 mtw_get_tsf(sc, &rx_tstamp);
2350 MTW_UNLOCK(sc);
2351 rx_tstamp = le64toh(rx_tstamp);
2352
2353 if (ni_tstamp >= rx_tstamp) {
2354 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON,
2355 "ibss merge, tsf %ju tstamp %ju\n",
2356 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
2357 (void)ieee80211_ibss_merge(ni);
2358 }
2359 }
2360 }
2361 static void
mtw_rx_frame(struct mtw_softc * sc,struct mbuf * m,uint32_t dmalen)2362 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen)
2363 {
2364 struct ieee80211com *ic = &sc->sc_ic;
2365 struct ieee80211_frame *wh;
2366 struct ieee80211_node *ni;
2367 struct epoch_tracker et;
2368
2369 struct mtw_rxwi *rxwi;
2370 uint32_t flags;
2371 uint16_t len, rxwisize;
2372 uint8_t ant, rssi;
2373 int8_t nf;
2374
2375 rxwisize = sizeof(struct mtw_rxwi);
2376
2377 if (__predict_false(
2378 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) {
2379 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2380 "payload is too short: dma length %u < %zu\n", dmalen,
2381 rxwisize + sizeof(struct ieee80211_frame_ack));
2382 goto fail;
2383 }
2384
2385 rxwi = mtod(m, struct mtw_rxwi *);
2386 len = le16toh(rxwi->len) & 0xfff;
2387 flags = le32toh(rxwi->flags);
2388 if (__predict_false(len > dmalen - rxwisize)) {
2389 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n",
2390 len, dmalen);
2391 goto fail;
2392 }
2393
2394 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) {
2395 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n",
2396 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV");
2397 goto fail;
2398 }
2399
2400 if (flags & MTW_RX_L2PAD) {
2401 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2402 "received RT2860_RX_L2PAD frame\n");
2403 len += 2;
2404 }
2405
2406 m->m_data += rxwisize;
2407 m->m_pkthdr.len = m->m_len = len;
2408
2409 wh = mtod(m, struct ieee80211_frame *);
2410 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2411 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2412 m->m_flags |= M_WEP;
2413 }
2414
2415 if (len >= sizeof(struct ieee80211_frame_min)) {
2416 ni = ieee80211_find_rxnode(ic,
2417 mtod(m, struct ieee80211_frame_min *));
2418 } else
2419 ni = NULL;
2420
2421 if (ni && ni->ni_flags & IEEE80211_NODE_HT) {
2422 m->m_flags |= M_AMPDU;
2423 }
2424
2425 if (__predict_false(flags & MTW_RX_MICERR)) {
2426 /* report MIC failures to net80211 for TKIP */
2427 if (ni != NULL)
2428 ieee80211_notify_michael_failure(ni->ni_vap, wh,
2429 rxwi->keyidx);
2430 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2431 "MIC error. Someone is lying.\n");
2432 goto fail;
2433 }
2434
2435 ant = mtw_maxrssi_chain(sc, rxwi);
2436 rssi = rxwi->rssi[ant];
2437 nf = mtw_rssi2dbm(sc, rssi, ant);
2438
2439 if (__predict_false(ieee80211_radiotap_active(ic))) {
2440 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap;
2441 uint16_t phy;
2442
2443 tap->wr_flags = 0;
2444 if (flags & MTW_RX_L2PAD)
2445 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2446 tap->wr_antsignal = rssi;
2447 tap->wr_antenna = ant;
2448 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant);
2449 tap->wr_rate = 2; /* in case it can't be found below */
2450 //MTW_LOCK(sc);
2451
2452 // MTW_UNLOCK(sc);
2453 phy = le16toh(rxwi->phy);
2454 switch (phy >> MT7601_PHY_SHIFT) {
2455 case MTW_PHY_CCK:
2456 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) {
2457 case 0:
2458 tap->wr_rate = 2;
2459 break;
2460 case 1:
2461 tap->wr_rate = 4;
2462 break;
2463 case 2:
2464 tap->wr_rate = 11;
2465 break;
2466 case 3:
2467 tap->wr_rate = 22;
2468 break;
2469 }
2470 if (phy & MTW_PHY_SHPRE)
2471 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2472 break;
2473 case MTW_PHY_OFDM:
2474 switch (phy & MTW_PHY_MCS) {
2475 case 0:
2476 tap->wr_rate = 12;
2477 break;
2478 case 1:
2479 tap->wr_rate = 18;
2480 break;
2481 case 2:
2482 tap->wr_rate = 24;
2483 break;
2484 case 3:
2485 tap->wr_rate = 36;
2486 break;
2487 case 4:
2488 tap->wr_rate = 48;
2489 break;
2490 case 5:
2491 tap->wr_rate = 72;
2492 break;
2493 case 6:
2494 tap->wr_rate = 96;
2495 break;
2496 case 7:
2497 tap->wr_rate = 108;
2498 break;
2499 }
2500 break;
2501 }
2502 }
2503
2504 NET_EPOCH_ENTER(et);
2505 if (ni != NULL) {
2506 (void)ieee80211_input(ni, m, rssi, nf);
2507 ieee80211_free_node(ni);
2508 } else {
2509 (void)ieee80211_input_all(ic, m, rssi, nf);
2510 }
2511 NET_EPOCH_EXIT(et);
2512
2513 return;
2514
2515 fail:
2516 m_freem(m);
2517 counter_u64_add(ic->ic_ierrors, 1);
2518 }
2519
2520 static void
mtw_bulk_rx_callback(struct usb_xfer * xfer,usb_error_t error)2521 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
2522 {
2523 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2524 struct ieee80211com *ic = &sc->sc_ic;
2525 struct mbuf *m = NULL;
2526 struct mbuf *m0;
2527 uint32_t dmalen, mbuf_len;
2528 uint16_t rxwisize;
2529 int xferlen;
2530
2531 rxwisize = sizeof(struct mtw_rxwi);
2532
2533 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
2534
2535 switch (USB_GET_STATE(xfer)) {
2536 case USB_ST_TRANSFERRED:
2537 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n",
2538 xferlen);
2539 if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
2540 sizeof(struct mtw_rxd))) {
2541 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2542 "xfer too short %d %d\n", xferlen,
2543 (int)(sizeof(uint32_t) + rxwisize +
2544 sizeof(struct mtw_rxd)));
2545 goto tr_setup;
2546 }
2547
2548 m = sc->rx_m;
2549 sc->rx_m = NULL;
2550
2551 /* FALLTHROUGH */
2552 case USB_ST_SETUP:
2553 tr_setup:
2554
2555 if (sc->rx_m == NULL) {
2556 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2557 MTW_MAX_RXSZ);
2558 }
2559 if (sc->rx_m == NULL) {
2560 MTW_DPRINTF(sc,
2561 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC |
2562 MTW_DEBUG_USB,
2563 "could not allocate mbuf - idle with stall\n");
2564 counter_u64_add(ic->ic_ierrors, 1);
2565 usbd_xfer_set_stall(xfer);
2566 usbd_xfer_set_frames(xfer, 0);
2567 } else {
2568 /*
2569 * Directly loading a mbuf cluster into DMA to
2570 * save some data copying. This works because
2571 * there is only one cluster.
2572 */
2573 usbd_xfer_set_frame_data(xfer, 0,
2574 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ);
2575 usbd_xfer_set_frames(xfer, 1);
2576 }
2577 usbd_transfer_submit(xfer);
2578 break;
2579
2580 default: /* Error */
2581 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2582 "USB transfer error, %s\n", usbd_errstr(error));
2583
2584 if (error != USB_ERR_CANCELLED) {
2585 /* try to clear stall first */
2586 usbd_xfer_set_stall(xfer);
2587 if (error == USB_ERR_TIMEOUT)
2588 device_printf(sc->sc_dev, "device timeout %s\n",
2589 __func__);
2590 counter_u64_add(ic->ic_ierrors, 1);
2591 goto tr_setup;
2592 }
2593 if (sc->rx_m != NULL) {
2594 m_freem(sc->rx_m);
2595 sc->rx_m = NULL;
2596 }
2597 break;
2598 }
2599
2600 if (m == NULL)
2601 return;
2602
2603 /* inputting all the frames must be last */
2604
2605 MTW_UNLOCK(sc);
2606
2607 m->m_pkthdr.len = m->m_len = xferlen;
2608
2609 /* HW can aggregate multiple 802.11 frames in a single USB xfer */
2610 for (;;) {
2611 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
2612
2613 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
2614 ((dmalen & 3) != 0)) {
2615 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2616 "bad DMA length %u\n", dmalen);
2617 break;
2618 }
2619 if ((dmalen + 8) > (uint32_t)xferlen) {
2620 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2621 "bad DMA length %u > %d\n", dmalen + 8, xferlen);
2622 break;
2623 }
2624
2625 /* If it is the last one or a single frame, we won't copy. */
2626 if ((xferlen -= dmalen + 8) <= 8) {
2627 /* trim 32-bit DMA-len header */
2628 m->m_data += 4;
2629 m->m_pkthdr.len = m->m_len -= 4;
2630 mtw_rx_frame(sc, m, dmalen);
2631 m = NULL; /* don't free source buffer */
2632 break;
2633 }
2634
2635 mbuf_len = dmalen + sizeof(struct mtw_rxd);
2636 if (__predict_false(mbuf_len > MCLBYTES)) {
2637 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2638 "payload is too big: mbuf_len %u\n", mbuf_len);
2639 counter_u64_add(ic->ic_ierrors, 1);
2640 break;
2641 }
2642
2643 /* copy aggregated frames to another mbuf */
2644 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2645 if (__predict_false(m0 == NULL)) {
2646 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC,
2647 "could not allocate mbuf\n");
2648 counter_u64_add(ic->ic_ierrors, 1);
2649 break;
2650 }
2651 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len,
2652 mtod(m0, caddr_t));
2653 m0->m_pkthdr.len = m0->m_len = mbuf_len;
2654 mtw_rx_frame(sc, m0, dmalen);
2655
2656 /* update data ptr */
2657 m->m_data += mbuf_len + 4;
2658 m->m_pkthdr.len = m->m_len -= mbuf_len + 4;
2659 }
2660
2661 /* make sure we free the source buffer, if any */
2662 m_freem(m);
2663
2664 #ifdef IEEE80211_SUPPORT_SUPERG
2665 ieee80211_ff_age_all(ic, 100);
2666 #endif
2667 MTW_LOCK(sc);
2668 }
2669
2670 static void
mtw_tx_free(struct mtw_endpoint_queue * pq,struct mtw_tx_data * data,int txerr)2671 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr)
2672 {
2673
2674 ieee80211_tx_complete(data->ni, data->m, txerr);
2675 data->m = NULL;
2676 data->ni = NULL;
2677
2678 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
2679 pq->tx_nfree++;
2680 }
2681 static void
mtw_bulk_tx_callbackN(struct usb_xfer * xfer,usb_error_t error,u_int index)2682 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
2683 {
2684 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2685 struct ieee80211com *ic = &sc->sc_ic;
2686 struct mtw_tx_data *data;
2687 struct ieee80211vap *vap = NULL;
2688 struct usb_page_cache *pc;
2689 struct mtw_endpoint_queue *pq = &sc->sc_epq[index];
2690 struct mbuf *m;
2691 usb_frlength_t size;
2692 int actlen;
2693 int sumlen;
2694 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
2695
2696 switch (USB_GET_STATE(xfer)) {
2697 case USB_ST_TRANSFERRED:
2698 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2699 "transfer complete: %d bytes @ index %d\n", actlen, index);
2700
2701 data = usbd_xfer_get_priv(xfer);
2702 mtw_tx_free(pq, data, 0);
2703 usbd_xfer_set_priv(xfer, NULL);
2704
2705 /* FALLTHROUGH */
2706 case USB_ST_SETUP:
2707 tr_setup:
2708 data = STAILQ_FIRST(&pq->tx_qh);
2709 if (data == NULL)
2710 break;
2711
2712 STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
2713
2714 m = data->m;
2715
2716 size = sizeof(data->desc);
2717 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) {
2718 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB,
2719 "data overflow, %u bytes\n", m->m_pkthdr.len);
2720 mtw_tx_free(pq, data, 1);
2721 goto tr_setup;
2722 }
2723
2724 pc = usbd_xfer_get_frame(xfer, 0);
2725 usbd_copy_in(pc, 0, &data->desc, size);
2726 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
2727 size += m->m_pkthdr.len;
2728 /*
2729 * Align end on a 4-byte boundary, pad 8 bytes (CRC +
2730 * 4-byte padding), and be sure to zero those trailing
2731 * bytes:
2732 */
2733 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD);
2734 size += ((-size) & 3) + MTW_DMA_PAD;
2735
2736 vap = data->ni->ni_vap;
2737 if (ieee80211_radiotap_active_vap(vap)) {
2738 const struct ieee80211_frame *wh;
2739 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap;
2740 struct mtw_txwi *txwi =
2741 (struct mtw_txwi *)(&data->desc +
2742 sizeof(struct mtw_txd));
2743 int has_l2pad;
2744
2745 wh = mtod(m, struct ieee80211_frame *);
2746 has_l2pad = IEEE80211_HAS_ADDR4(wh) !=
2747 IEEE80211_QOS_HAS_SEQ(wh);
2748
2749 tap->wt_flags = 0;
2750 tap->wt_rate = rt2860_rates[data->ridx].rate;
2751 tap->wt_hwqueue = index;
2752 if (le16toh(txwi->phy) & MTW_PHY_SHPRE)
2753 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2754 if (has_l2pad)
2755 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2756
2757 ieee80211_radiotap_tx(vap, m);
2758 }
2759
2760 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2761 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len,
2762 size, index);
2763
2764 usbd_xfer_set_frame_len(xfer, 0, size);
2765 usbd_xfer_set_priv(xfer, data);
2766 usbd_transfer_submit(xfer);
2767 mtw_start(sc);
2768
2769 break;
2770
2771 default:
2772 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2773 "USB transfer error, %s\n", usbd_errstr(error));
2774
2775 data = usbd_xfer_get_priv(xfer);
2776
2777 if (data != NULL) {
2778 if (data->ni != NULL)
2779 vap = data->ni->ni_vap;
2780 mtw_tx_free(pq, data, error);
2781 usbd_xfer_set_priv(xfer, NULL);
2782 }
2783
2784 if (vap == NULL)
2785 vap = TAILQ_FIRST(&ic->ic_vaps);
2786
2787 if (error != USB_ERR_CANCELLED) {
2788 if (error == USB_ERR_TIMEOUT) {
2789 device_printf(sc->sc_dev, "device timeout %s\n",
2790 __func__);
2791 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
2792 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2793 "cmdq_store=%d\n", i);
2794 sc->cmdq[i].func = mtw_usb_timeout_cb;
2795 sc->cmdq[i].arg0 = vap;
2796 ieee80211_runtask(ic, &sc->cmdq_task);
2797 }
2798
2799 /*
2800 * Try to clear stall first, also if other
2801 * errors occur, hence clearing stall
2802 * introduces a 50 ms delay:
2803 */
2804 usbd_xfer_set_stall(xfer);
2805 goto tr_setup;
2806 }
2807 break;
2808 }
2809 #ifdef IEEE80211_SUPPORT_SUPERG
2810 /* XXX TODO: make this deferred rather than unlock/relock */
2811 /* XXX TODO: should only do the QoS AC this belongs to */
2812 if (pq->tx_nfree >= MTW_TX_RING_COUNT) {
2813 MTW_UNLOCK(sc);
2814 ieee80211_ff_flush_all(ic);
2815 MTW_LOCK(sc);
2816 }
2817 #endif
2818 }
2819
2820 static void
mtw_fw_callback(struct usb_xfer * xfer,usb_error_t error)2821 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
2822 {
2823 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2824
2825 int actlen;
2826 int ntries, tmp;
2827 // struct mtw_txd *data;
2828
2829 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
2830 // data = usbd_xfer_get_priv(xfer);
2831 usbd_xfer_set_priv(xfer, NULL);
2832 switch (USB_GET_STATE(xfer)) {
2833
2834 case USB_ST_TRANSFERRED:
2835 sc->sc_sent += actlen;
2836 memset(sc->txd_fw[sc->sc_idx], 0, actlen);
2837
2838 if (actlen < 0x2c44 && sc->sc_idx == 0) {
2839 return;
2840 }
2841 if (sc->sc_idx == 3) {
2842
2843 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1,
2844 MTW_MCU_IVB_LEN)) != 0) {
2845 device_printf(sc->sc_dev,
2846 "Could not write ivb error: %d\n", error);
2847 }
2848
2849 mtw_delay(sc, 10);
2850 for (ntries = 0; ntries < 100; ntries++) {
2851 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
2852 &tmp)) != 0) {
2853 device_printf(sc->sc_dev,
2854 "Could not read cfg error: %d\n", error);
2855
2856 }
2857 if (tmp == MTW_MCU_READY) {
2858 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE,
2859 "mcu reaady %d\n", tmp);
2860 sc->fwloading = 1;
2861 break;
2862 }
2863
2864 mtw_delay(sc, 10);
2865 }
2866 if (ntries == 100)
2867 sc->fwloading = 0;
2868 wakeup(&sc->fwloading);
2869 return;
2870 }
2871
2872 if (actlen == 0x2c44) {
2873 sc->sc_idx++;
2874 DELAY(1000);
2875 }
2876
2877 case USB_ST_SETUP: {
2878 int dlen = 0;
2879 dlen = sc->txd_fw[sc->sc_idx]->len;
2880
2881 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent);
2882 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16));
2883
2884 usbd_xfer_set_frame_len(xfer, 0, dlen);
2885 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen);
2886
2887 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]);
2888 usbd_transfer_submit(xfer);
2889 break;
2890
2891 default: /* Error */
2892 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__,
2893 usbd_errstr(error));
2894 sc->fwloading = 0;
2895 wakeup(&sc->fwloading);
2896 /*
2897 * Print error message and clear stall
2898 * for example.
2899 */
2900 break;
2901 }
2902 /*
2903 * Here it is safe to do something without the private
2904 * USB mutex locked.
2905 */
2906 }
2907 return;
2908 }
2909 static void
mtw_bulk_tx_callback0(struct usb_xfer * xfer,usb_error_t error)2910 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
2911 {
2912 mtw_bulk_tx_callbackN(xfer, error, 0);
2913 }
2914
2915 static void
mtw_bulk_tx_callback1(struct usb_xfer * xfer,usb_error_t error)2916 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
2917 {
2918
2919
2920 mtw_bulk_tx_callbackN(xfer, error, 1);
2921 }
2922
2923 static void
mtw_bulk_tx_callback2(struct usb_xfer * xfer,usb_error_t error)2924 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
2925 {
2926 mtw_bulk_tx_callbackN(xfer, error, 2);
2927 }
2928
2929 static void
mtw_bulk_tx_callback3(struct usb_xfer * xfer,usb_error_t error)2930 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
2931 {
2932 mtw_bulk_tx_callbackN(xfer, error, 3);
2933 }
2934
2935 static void
mtw_bulk_tx_callback4(struct usb_xfer * xfer,usb_error_t error)2936 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
2937 {
2938 mtw_bulk_tx_callbackN(xfer, error, 4);
2939 }
2940
2941 static void
mtw_bulk_tx_callback5(struct usb_xfer * xfer,usb_error_t error)2942 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
2943 {
2944 mtw_bulk_tx_callbackN(xfer, error, 5);
2945 }
2946
2947 static void
mtw_set_tx_desc(struct mtw_softc * sc,struct mtw_tx_data * data)2948 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data)
2949 {
2950 struct mbuf *m = data->m;
2951 struct ieee80211com *ic = &sc->sc_ic;
2952 struct ieee80211vap *vap = data->ni->ni_vap;
2953 struct ieee80211_frame *wh;
2954 struct mtw_txd *txd;
2955 struct mtw_txwi *txwi;
2956 uint16_t xferlen, txwisize;
2957 uint16_t mcs;
2958 uint8_t ridx = data->ridx;
2959 uint8_t pad;
2960
2961 /* get MCS code from rate index */
2962 mcs = rt2860_rates[ridx].mcs;
2963
2964 txwisize = sizeof(*txwi);
2965 xferlen = txwisize + m->m_pkthdr.len;
2966
2967 /* roundup to 32-bit alignment */
2968 xferlen = (xferlen + 3) & ~3;
2969
2970 txd = (struct mtw_txd *)&data->desc;
2971 txd->len = htole16(xferlen);
2972
2973 wh = mtod(m, struct ieee80211_frame *);
2974
2975 /*
2976 * Ether both are true or both are false, the header
2977 * are nicely aligned to 32-bit. So, no L2 padding.
2978 */
2979 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
2980 pad = 0;
2981 else
2982 pad = 2;
2983
2984 /* setup TX Wireless Information */
2985 txwi = (struct mtw_txwi *)(txd + 1);
2986 txwi->len = htole16(m->m_pkthdr.len - pad);
2987 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
2988 mcs |= MTW_PHY_CCK;
2989 if (ridx != MTW_RIDX_CCK1 &&
2990 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
2991 mcs |= MTW_PHY_SHPRE;
2992 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) {
2993 mcs |= MTW_PHY_OFDM;
2994 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) {
2995 /* XXX TODO: [adrian] set short preamble for MCS? */
2996 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */
2997 }
2998 txwi->phy = htole16(mcs);
2999
3000 /* check if RTS/CTS or CTS-to-self protection is required */
3001 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3002 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) ||
3003 ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3004 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) ||
3005 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
3006 rt2860_rates[ridx].phy == IEEE80211_T_HT)))
3007 txwi->txop |= MTW_TX_TXOP_HT;
3008 else
3009 txwi->txop |= MTW_TX_TXOP_BACKOFF;
3010
3011 }
3012
3013 /* This function must be called locked */
3014 static int
mtw_tx(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3015 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3016 {
3017 struct ieee80211com *ic = &sc->sc_ic;
3018 struct ieee80211vap *vap = ni->ni_vap;
3019 struct ieee80211_frame *wh;
3020
3021
3022 //const struct ieee80211_txparam *tp = ni->ni_txparms;
3023 struct mtw_node *rn = MTW_NODE(ni);
3024 struct mtw_tx_data *data;
3025 struct mtw_txd *txd;
3026 struct mtw_txwi *txwi;
3027 uint16_t qos;
3028 uint16_t dur;
3029 uint16_t qid;
3030 uint8_t type;
3031 uint8_t tid;
3032 uint16_t ridx;
3033 uint8_t ctl_ridx;
3034 uint16_t qflags;
3035 uint8_t xflags = 0;
3036
3037 int hasqos;
3038
3039 MTW_LOCK_ASSERT(sc, MA_OWNED);
3040
3041 wh = mtod(m, struct ieee80211_frame *);
3042 const struct ieee80211_txparam *tp = ni->ni_txparms;
3043 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3044
3045 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 |
3046 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA);
3047
3048 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
3049 uint8_t *frm;
3050 frm = ieee80211_getqos(wh);
3051
3052
3053 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm);
3054 qos = le16toh(*(const uint16_t *)frm);
3055 tid = ieee80211_gettid(wh);
3056 qid = TID_TO_WME_AC(tid);
3057 qflags |= MTW_TXD_QSEL_EDCA;
3058 } else {
3059 qos = 0;
3060 tid = 0;
3061 qid = WME_AC_BE;
3062 }
3063 if (type & IEEE80211_FC0_TYPE_MGT) {
3064 qid = 0;
3065 }
3066
3067 if (type != IEEE80211_FC0_TYPE_DATA)
3068 qflags |= htole16(MTW_TXD_WIV);
3069
3070 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3071 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
3072 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */
3073 ridx = (ic->ic_curmode == IEEE80211_MODE_11A
3074 || ic->ic_curmode == IEEE80211_MODE_11NA) ?
3075 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1;
3076 if (type == IEEE80211_MODE_11NG) {
3077 ridx = 12;
3078 }
3079 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3080 } else {
3081 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3082 ridx = rn->fix_ridx;
3083
3084 } else {
3085 ridx = rn->amrr_ridx;
3086 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3087 }
3088 }
3089
3090 if (hasqos)
3091 xflags = 0;
3092 else
3093 xflags = MTW_TX_NSEQ;
3094
3095 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3096 (!hasqos ||
3097 (qos & IEEE80211_QOS_ACKPOLICY) !=
3098 IEEE80211_QOS_ACKPOLICY_NOACK)) {
3099 xflags |= MTW_TX_ACK;
3100 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3101 dur = rt2860_rates[ctl_ridx].sp_ack_dur;
3102 else
3103 dur = rt2860_rates[ctl_ridx].lp_ack_dur;
3104 USETW(wh->i_dur, dur);
3105 }
3106 /* reserve slots for mgmt packets, just in case */
3107 if (sc->sc_epq[qid].tx_nfree < 3) {
3108 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid);
3109 return (-1);
3110 }
3111
3112 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
3113 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
3114 sc->sc_epq[qid].tx_nfree--;
3115
3116 txd = (struct mtw_txd *)&data->desc;
3117 txd->flags = qflags;
3118
3119 txwi = (struct mtw_txwi *)(txd + 1);
3120 txwi->xflags = xflags;
3121 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ?
3122
3123 MTW_AID2WCID(ni->ni_associd) :
3124 0xff;
3125
3126 /* clear leftover garbage bits */
3127 txwi->flags = 0;
3128 txwi->txop = 0;
3129
3130 data->m = m;
3131 data->ni = ni;
3132 data->ridx = ridx;
3133
3134 mtw_set_tx_desc(sc, data);
3135
3136 /*
3137 * The chip keeps track of 2 kind of Tx stats,
3138 * * TX_STAT_FIFO, for per WCID stats, and
3139 * * TX_STA_CNT0 for all-TX-in-one stats.
3140 *
3141 * To use FIFO stats, we need to store MCS into the driver-private
3142 * PacketID field. So that, we can tell whose stats when we read them.
3143 * We add 1 to the MCS because setting the PacketID field to 0 means
3144 * that we don't want feedback in TX_STAT_FIFO.
3145 * And, that's what we want for STA mode, since TX_STA_CNT0 does the
3146 * job.
3147 *
3148 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in
3149 * run_tx().
3150 */
3151
3152 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
3153 vap->iv_opmode == IEEE80211_M_MBSS) {
3154
3155 /*
3156 * Unlike PCI based devices, we don't get any interrupt from
3157 * USB devices, so we simulate FIFO-is-full interrupt here.
3158 * Ralink recommends to drain FIFO stats every 100 ms, but 16
3159 * slots quickly get fulled. To prevent overflow, increment a
3160 * counter on every FIFO stat request, so we know how many slots
3161 * are left. We do this only in HOSTAP or multiple vap mode
3162 * since FIFO stats are used only in those modes. We just drain
3163 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via
3164 * callout. Call it early. Otherwise overflow.
3165 */
3166 if (sc->fifo_cnt++ == 10) {
3167 /*
3168 * With multiple vaps or if_bridge, if_start() is called
3169 * with a non-sleepable lock, tcpinp. So, need to defer.
3170 */
3171 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
3172 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i);
3173 sc->cmdq[i].func = mtw_drain_fifo;
3174 sc->cmdq[i].arg0 = sc;
3175 ieee80211_runtask(ic, &sc->cmdq_task);
3176 }
3177 }
3178
3179 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
3180 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]);
3181
3182 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3183 "sending data frame len=%d rate=%d qid=%d\n",
3184 m->m_pkthdr.len +
3185 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3186 rt2860_rates[ridx].rate, qid);
3187
3188 return (0);
3189 }
3190
3191 static int
mtw_tx_mgt(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3192 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3193 {
3194 struct ieee80211com *ic = &sc->sc_ic;
3195 struct mtw_node *rn = MTW_NODE(ni);
3196 struct mtw_tx_data *data;
3197 struct ieee80211_frame *wh;
3198 struct mtw_txd *txd;
3199 struct mtw_txwi *txwi;
3200 uint8_t type;
3201 uint16_t dur;
3202 uint8_t ridx = rn->mgt_ridx;
3203 uint8_t xflags = 0;
3204 uint8_t wflags = 0;
3205
3206 MTW_LOCK_ASSERT(sc, MA_OWNED);
3207
3208 wh = mtod(m, struct ieee80211_frame *);
3209
3210 /* tell hardware to add timestamp for probe responses */
3211 if ((wh->i_fc[0] &
3212 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3213 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
3214 wflags |= MTW_TX_TS;
3215 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3216 xflags |= MTW_TX_ACK;
3217
3218 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
3219 ic->ic_flags & IEEE80211_F_SHPREAMBLE);
3220 USETW(wh->i_dur, dur);
3221 }
3222 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3223 if (sc->sc_epq[0].tx_nfree == 0)
3224 /* let caller free mbuf */
3225 return (EIO);
3226 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3227 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3228 sc->sc_epq[0].tx_nfree--;
3229
3230 txd = (struct mtw_txd *)&data->desc;
3231 txd->flags = htole16(
3232 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3233 if (type != IEEE80211_FC0_TYPE_DATA)
3234 txd->flags |= htole16(MTW_TXD_WIV);
3235
3236 txwi = (struct mtw_txwi *)(txd + 1);
3237 txwi->wcid = 0xff;
3238 txwi->xflags = xflags;
3239 txwi->flags = wflags;
3240
3241 txwi->txop = 0; /* clear leftover garbage bits */
3242
3243 data->m = m;
3244 data->ni = ni;
3245 data->ridx = ridx;
3246
3247 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
3248 m->m_pkthdr.len +
3249 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3250 rt2860_rates[ridx].rate);
3251
3252 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3253
3254 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]);
3255
3256 return (0);
3257 }
3258
3259 static int
mtw_sendprot(struct mtw_softc * sc,const struct mbuf * m,struct ieee80211_node * ni,int prot,int rate)3260 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m,
3261 struct ieee80211_node *ni, int prot, int rate)
3262 {
3263 struct ieee80211com *ic = ni->ni_ic;
3264 struct mtw_tx_data *data;
3265 struct mtw_txd *txd;
3266 struct mtw_txwi *txwi;
3267 struct mbuf *mprot;
3268 int ridx;
3269 int protrate;
3270 uint8_t wflags = 0;
3271 uint8_t xflags = 0;
3272
3273 MTW_LOCK_ASSERT(sc, MA_OWNED);
3274
3275 /* check that there are free slots before allocating the mbuf */
3276 if (sc->sc_epq[0].tx_nfree == 0)
3277 /* let caller free mbuf */
3278 return (ENOBUFS);
3279
3280 mprot = ieee80211_alloc_prot(ni, m, rate, prot);
3281 if (mprot == NULL) {
3282 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3283 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n");
3284 return (ENOBUFS);
3285 }
3286
3287 protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
3288 wflags = MTW_TX_FRAG;
3289 xflags = 0;
3290 if (prot == IEEE80211_PROT_RTSCTS)
3291 xflags |= MTW_TX_ACK;
3292
3293 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3294 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3295 sc->sc_epq[0].tx_nfree--;
3296
3297 txd = (struct mtw_txd *)&data->desc;
3298 txd->flags = RT2860_TX_QSEL_EDCA;
3299 txwi = (struct mtw_txwi *)(txd + 1);
3300 txwi->wcid = 0xff;
3301 txwi->flags = wflags;
3302 txwi->xflags = xflags;
3303 txwi->txop = 0; /* clear leftover garbage bits */
3304
3305 data->m = mprot;
3306 data->ni = ieee80211_ref_node(ni);
3307
3308 /* XXX TODO: methodize with MCS rates */
3309 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3310 if (rt2860_rates[ridx].rate == protrate)
3311 break;
3312 data->ridx = ridx;
3313
3314 mtw_set_tx_desc(sc, data);
3315 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n",
3316 m->m_pkthdr.len, rate);
3317
3318 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3319
3320 usbd_transfer_start(sc->sc_xfer[0]);
3321
3322 return (0);
3323 }
3324
3325 static int
mtw_tx_param(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni,const struct ieee80211_bpf_params * params)3326 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3327 const struct ieee80211_bpf_params *params)
3328 {
3329 struct ieee80211com *ic = ni->ni_ic;
3330 struct mtw_tx_data *data;
3331 struct mtw_txd *txd;
3332 struct mtw_txwi *txwi;
3333 uint8_t ridx;
3334 uint8_t rate;
3335 uint8_t opflags = 0;
3336 uint8_t xflags = 0;
3337 int error;
3338
3339 MTW_LOCK_ASSERT(sc, MA_OWNED);
3340
3341 KASSERT(params != NULL, ("no raw xmit params"));
3342
3343 rate = params->ibp_rate0;
3344 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3345 /* let caller free mbuf */
3346 return (EINVAL);
3347 }
3348
3349 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3350 xflags |= MTW_TX_ACK;
3351 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) {
3352 error = mtw_sendprot(sc, m, ni,
3353 params->ibp_flags & IEEE80211_BPF_RTS ?
3354 IEEE80211_PROT_RTSCTS :
3355 IEEE80211_PROT_CTSONLY,
3356 rate);
3357 if (error) {
3358 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__,
3359 __LINE__, error);
3360 return (error);
3361 }
3362 opflags |= MTW_TX_TXOP_SIFS;
3363 }
3364
3365 if (sc->sc_epq[0].tx_nfree == 0) {
3366 /* let caller free mbuf */
3367 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3368 "sending raw frame, but tx ring is full\n");
3369 return (EIO);
3370 }
3371 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3372 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3373 sc->sc_epq[0].tx_nfree--;
3374
3375 txd = (struct mtw_txd *)&data->desc;
3376 txd->flags = htole16(
3377 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3378 // txd->flags = htole16(MTW_TXD_QSEL_EDCA);
3379 txwi = (struct mtw_txwi *)(txd + 1);
3380 txwi->wcid = 0xff;
3381 txwi->xflags = xflags;
3382 txwi->txop = opflags;
3383 txwi->flags = 0; /* clear leftover garbage bits */
3384
3385 data->m = m;
3386 data->ni = ni;
3387 /* XXX TODO: methodize with MCS rates */
3388 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3389 if (rt2860_rates[ridx].rate == rate)
3390 break;
3391 data->ridx = ridx;
3392
3393 mtw_set_tx_desc(sc, data);
3394
3395 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
3396 m->m_pkthdr.len, rate);
3397
3398 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3399
3400 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]);
3401
3402 return (0);
3403 }
3404
3405 static int
mtw_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3406 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3407 const struct ieee80211_bpf_params *params)
3408 {
3409 struct mtw_softc *sc = ni->ni_ic->ic_softc;
3410 int error = 0;
3411 MTW_LOCK(sc);
3412 /* prevent management frames from being sent if we're not ready */
3413 if (!(sc->sc_flags & MTW_RUNNING)) {
3414 error = ENETDOWN;
3415 goto done;
3416 }
3417
3418 if (params == NULL) {
3419 /* tx mgt packet */
3420 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) {
3421 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n");
3422 goto done;
3423 }
3424 } else {
3425 /* tx raw packet with param */
3426 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) {
3427 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3428 "tx with param failed\n");
3429 goto done;
3430 }
3431 }
3432
3433 done:
3434
3435 MTW_UNLOCK(sc);
3436
3437 if (error != 0) {
3438 if (m != NULL)
3439 m_freem(m);
3440 }
3441
3442 return (error);
3443 }
3444
3445 static int
mtw_transmit(struct ieee80211com * ic,struct mbuf * m)3446 mtw_transmit(struct ieee80211com *ic, struct mbuf *m)
3447 {
3448 struct mtw_softc *sc = ic->ic_softc;
3449 int error;
3450 MTW_LOCK(sc);
3451 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3452 MTW_UNLOCK(sc);
3453 return (ENXIO);
3454 }
3455 error = mbufq_enqueue(&sc->sc_snd, m);
3456 if (error) {
3457 MTW_UNLOCK(sc);
3458 return (error);
3459 }
3460 mtw_start(sc);
3461 MTW_UNLOCK(sc);
3462
3463 return (0);
3464 }
3465
3466 static void
mtw_start(struct mtw_softc * sc)3467 mtw_start(struct mtw_softc *sc)
3468 {
3469 struct ieee80211_node *ni;
3470 struct mbuf *m;
3471
3472 MTW_LOCK_ASSERT(sc, MA_OWNED);
3473
3474 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3475
3476 return;
3477 }
3478 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3479 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3480 if (mtw_tx(sc, m, ni) != 0) {
3481 mbufq_prepend(&sc->sc_snd, m);
3482 break;
3483 }
3484 }
3485 }
3486
3487 static void
mtw_parent(struct ieee80211com * ic)3488 mtw_parent(struct ieee80211com *ic)
3489 {
3490
3491 struct mtw_softc *sc = ic->ic_softc;
3492
3493 MTW_LOCK(sc);
3494 if (sc->sc_detached) {
3495 MTW_UNLOCK(sc);
3496 return;
3497 }
3498
3499 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3500 mtw_init_locked(sc);
3501 MTW_UNLOCK(sc);
3502 ieee80211_start_all(ic);
3503 return;
3504 }
3505 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3506 mtw_update_promisc_locked(sc);
3507 MTW_UNLOCK(sc);
3508 return;
3509 }
3510 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 &&
3511 ic->ic_nrunning == 0) {
3512 mtw_stop(sc);
3513 MTW_UNLOCK(sc);
3514 return;
3515 }
3516 return;
3517 }
3518
3519 static void
mt7601_set_agc(struct mtw_softc * sc,uint8_t agc)3520 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc)
3521 {
3522 uint8_t bbp;
3523
3524 mtw_bbp_write(sc, 66, agc);
3525 mtw_bbp_write(sc, 195, 0x87);
3526 bbp = (agc & 0xf0) | 0x08;
3527 mtw_bbp_write(sc, 196, bbp);
3528 }
3529
3530 static int
mtw_mcu_calibrate(struct mtw_softc * sc,int func,uint32_t val)3531 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val)
3532 {
3533 struct mtw_mcu_cmd_8 cmd;
3534
3535 cmd.func = htole32(func);
3536 cmd.val = htole32(val);
3537 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8)));
3538 }
3539
3540 static int
mtw_rf_write(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t val)3541 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val)
3542 {
3543 uint32_t tmp;
3544 int error, ntries, shift;
3545
3546 for (ntries = 0; ntries < 10; ntries++) {
3547 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3548 return (error);
3549 if (!(tmp & MTW_RF_CSR_KICK))
3550 break;
3551 }
3552 if (ntries == 10)
3553 return (ETIMEDOUT);
3554
3555 if (sc->asic_ver == 0x7601)
3556 shift = MT7601_BANK_SHIFT;
3557 else
3558 shift = MT7610_BANK_SHIFT;
3559
3560 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift |
3561 reg << 8 | val;
3562 return (mtw_write(sc, MTW_RF_CSR, tmp));
3563 }
3564
3565 void
mtw_select_chan_group(struct mtw_softc * sc,int group)3566 mtw_select_chan_group(struct mtw_softc *sc, int group)
3567 {
3568 uint32_t tmp;
3569 uint8_t bbp;
3570
3571 /* Tx band 20MHz 2G */
3572 mtw_read(sc, MTW_TX_BAND_CFG, &tmp);
3573 tmp &= ~(
3574 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M);
3575 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G;
3576 mtw_write(sc, MTW_TX_BAND_CFG, tmp);
3577
3578 /* select 20 MHz bandwidth */
3579 mtw_bbp_read(sc, 4, &bbp);
3580 bbp &= ~0x18;
3581 bbp |= 0x40;
3582 mtw_bbp_write(sc, 4, bbp);
3583
3584 /* calibrate BBP */
3585 mtw_bbp_write(sc, 69, 0x12);
3586 mtw_bbp_write(sc, 91, 0x07);
3587 mtw_bbp_write(sc, 195, 0x23);
3588 mtw_bbp_write(sc, 196, 0x17);
3589 mtw_bbp_write(sc, 195, 0x24);
3590 mtw_bbp_write(sc, 196, 0x06);
3591 mtw_bbp_write(sc, 195, 0x81);
3592 mtw_bbp_write(sc, 196, 0x12);
3593 mtw_bbp_write(sc, 195, 0x83);
3594 mtw_bbp_write(sc, 196, 0x17);
3595 mtw_rf_write(sc, 5, 8, 0x00);
3596 // mtw_mcu_calibrate(sc, 0x6, 0x10001);
3597
3598 /* set initial AGC value */
3599 mt7601_set_agc(sc, 0x14);
3600 }
3601
3602 static int
mtw_rf_read(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t * val)3603 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val)
3604 {
3605 uint32_t tmp;
3606 int error, ntries, shift;
3607
3608 for (ntries = 0; ntries < 100; ntries++) {
3609 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3610 return (error);
3611 if (!(tmp & MTW_RF_CSR_KICK))
3612 break;
3613 }
3614 if (ntries == 100)
3615 return (ETIMEDOUT);
3616
3617 if (sc->asic_ver == 0x7601)
3618 shift = MT7601_BANK_SHIFT;
3619 else
3620 shift = MT7610_BANK_SHIFT;
3621
3622 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8;
3623 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0)
3624 return (error);
3625
3626 for (ntries = 0; ntries < 100; ntries++) {
3627 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3628 return (error);
3629 if (!(tmp & MTW_RF_CSR_KICK))
3630 break;
3631 }
3632 if (ntries == 100)
3633 return (ETIMEDOUT);
3634
3635 *val = tmp & 0xff;
3636 return (0);
3637 }
3638 static void
mt7601_set_chan(struct mtw_softc * sc,u_int chan)3639 mt7601_set_chan(struct mtw_softc *sc, u_int chan)
3640 {
3641 uint32_t tmp;
3642 uint8_t bbp, rf, txpow1;
3643 int i;
3644 /* find the settings for this channel */
3645 for (i = 0; mt7601_rf_chan[i].chan != chan; i++)
3646 ;
3647
3648 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17);
3649 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18);
3650 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19);
3651 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20);
3652
3653 /* use Tx power values from EEPROM */
3654 txpow1 = sc->txpow1[i];
3655
3656 /* Tx automatic level control */
3657 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp);
3658 tmp &= ~0x3f3f;
3659 tmp |= (txpow1 & 0x3f);
3660 mtw_write(sc, MTW_TX_ALC_CFG0, tmp);
3661
3662 /* LNA */
3663 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]);
3664 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]);
3665 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]);
3666
3667 /* VCO calibration */
3668 mtw_rf_write(sc, 0, 4, 0x0a);
3669 mtw_rf_write(sc, 0, 5, 0x20);
3670 mtw_rf_read(sc, 0, 4, &rf);
3671 mtw_rf_write(sc, 0, 4, rf | 0x80);
3672
3673 /* select 20 MHz bandwidth */
3674 mtw_bbp_read(sc, 4, &bbp);
3675 bbp &= ~0x18;
3676 bbp |= 0x40;
3677 mtw_bbp_write(sc, 4, bbp);
3678 mtw_bbp_write(sc, 178, 0xff);
3679 }
3680
3681 static int
mtw_set_chan(struct mtw_softc * sc,struct ieee80211_channel * c)3682 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c)
3683 {
3684 struct ieee80211com *ic = &sc->sc_ic;
3685 u_int chan, group;
3686
3687 chan = ieee80211_chan2ieee(ic, c);
3688 if (chan == 0 || chan == IEEE80211_CHAN_ANY)
3689 return (EINVAL);
3690
3691 /* determine channel group */
3692 if (chan <= 14)
3693 group = 0;
3694 else if (chan <= 64)
3695 group = 1;
3696 else if (chan <= 128)
3697 group = 2;
3698 else
3699 group = 3;
3700
3701 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated)
3702 mtw_select_chan_group(sc, group);
3703
3704 sc->sc_chan_group = group;
3705
3706 /* chipset specific */
3707 if (sc->asic_ver == 0x7601)
3708 mt7601_set_chan(sc, chan);
3709
3710 DELAY(1000);
3711 return (0);
3712 }
3713
3714 static void
mtw_set_channel(struct ieee80211com * ic)3715 mtw_set_channel(struct ieee80211com *ic)
3716 {
3717 struct mtw_softc *sc = ic->ic_softc;
3718
3719 MTW_LOCK(sc);
3720 mtw_set_chan(sc, ic->ic_curchan);
3721 MTW_UNLOCK(sc);
3722
3723 return;
3724 }
3725
3726 static void
mtw_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3727 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans,
3728 struct ieee80211_channel chans[])
3729 {
3730 // struct mtw_softc *sc = ic->ic_softc;
3731 uint8_t bands[IEEE80211_MODE_BYTES];
3732
3733 memset(bands, 0, sizeof(bands));
3734 setbit(bands, IEEE80211_MODE_11B);
3735 setbit(bands, IEEE80211_MODE_11G);
3736 setbit(bands, IEEE80211_MODE_11NG);
3737
3738 /* Note: for now, only support HT20 channels */
3739 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0);
3740 }
3741
3742 static void
mtw_scan_start(struct ieee80211com * ic)3743 mtw_scan_start(struct ieee80211com *ic)
3744 {
3745 struct mtw_softc *sc = ic->ic_softc;
3746 MTW_LOCK(sc);
3747 /* abort TSF synchronization */
3748 mtw_abort_tsf_sync(sc);
3749 mtw_set_bssid(sc, ieee80211broadcastaddr);
3750
3751 MTW_UNLOCK(sc);
3752
3753 return;
3754 }
3755
3756 static void
mtw_scan_end(struct ieee80211com * ic)3757 mtw_scan_end(struct ieee80211com *ic)
3758 {
3759 struct mtw_softc *sc = ic->ic_softc;
3760
3761 MTW_LOCK(sc);
3762
3763 mtw_enable_tsf_sync(sc);
3764 mtw_set_bssid(sc, sc->sc_bssid);
3765
3766 MTW_UNLOCK(sc);
3767
3768 return;
3769 }
3770
3771 /*
3772 * Could be called from ieee80211_node_timeout()
3773 * (non-sleepable thread)
3774 */
3775 static void
mtw_update_beacon(struct ieee80211vap * vap,int item)3776 mtw_update_beacon(struct ieee80211vap *vap, int item)
3777 {
3778 struct ieee80211com *ic = vap->iv_ic;
3779 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
3780 struct ieee80211_node *ni = vap->iv_bss;
3781 struct mtw_softc *sc = ic->ic_softc;
3782 struct mtw_vap *rvp = MTW_VAP(vap);
3783 int mcast = 0;
3784 uint32_t i;
3785
3786 switch (item) {
3787 case IEEE80211_BEACON_ERP:
3788 mtw_updateslot(ic);
3789 break;
3790 case IEEE80211_BEACON_HTINFO:
3791 mtw_updateprot(ic);
3792 break;
3793 case IEEE80211_BEACON_TIM:
3794 mcast = 1; /*TODO*/
3795 break;
3796 default:
3797 break;
3798 }
3799
3800 setbit(bo->bo_flags, item);
3801 if (rvp->beacon_mbuf == NULL) {
3802 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3803 if (rvp->beacon_mbuf == NULL)
3804 return;
3805 }
3806 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast);
3807
3808 i = MTW_CMDQ_GET(&sc->cmdq_store);
3809 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
3810 sc->cmdq[i].func = mtw_update_beacon_cb;
3811 sc->cmdq[i].arg0 = vap;
3812 ieee80211_runtask(ic, &sc->cmdq_task);
3813
3814 return;
3815 }
3816
3817 static void
mtw_update_beacon_cb(void * arg)3818 mtw_update_beacon_cb(void *arg)
3819 {
3820
3821 struct ieee80211vap *vap = arg;
3822 struct ieee80211_node *ni = vap->iv_bss;
3823 struct mtw_vap *rvp = MTW_VAP(vap);
3824 struct ieee80211com *ic = vap->iv_ic;
3825 struct mtw_softc *sc = ic->ic_softc;
3826 struct mtw_txwi txwi;
3827 struct mbuf *m;
3828 uint16_t txwisize;
3829 uint8_t ridx;
3830 if (ni->ni_chan == IEEE80211_CHAN_ANYC)
3831 return;
3832 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
3833 return;
3834
3835 /*
3836 * No need to call ieee80211_beacon_update(), mtw_update_beacon()
3837 * is taking care of appropriate calls.
3838 */
3839 if (rvp->beacon_mbuf == NULL) {
3840 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3841 if (rvp->beacon_mbuf == NULL)
3842 return;
3843 }
3844 m = rvp->beacon_mbuf;
3845
3846 memset(&txwi, 0, sizeof(txwi));
3847 txwi.wcid = 0xff;
3848 txwi.len = htole16(m->m_pkthdr.len);
3849
3850 /* send beacons at the lowest available rate */
3851 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 :
3852 MTW_RIDX_CCK1;
3853 txwi.phy = htole16(rt2860_rates[ridx].mcs);
3854 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
3855 txwi.phy |= htole16(MTW_PHY_OFDM);
3856 txwi.txop = MTW_TX_TXOP_HT;
3857 txwi.flags = MTW_TX_TS;
3858 txwi.xflags = MTW_TX_NSEQ;
3859
3860 txwisize = sizeof(txwi);
3861 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize);
3862 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *),
3863 (m->m_pkthdr.len + 1) & ~1);
3864 }
3865
3866 static void
mtw_updateprot(struct ieee80211com * ic)3867 mtw_updateprot(struct ieee80211com *ic)
3868 {
3869 struct mtw_softc *sc = ic->ic_softc;
3870 uint32_t i;
3871
3872 i = MTW_CMDQ_GET(&sc->cmdq_store);
3873 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i);
3874 sc->cmdq[i].func = mtw_updateprot_cb;
3875 sc->cmdq[i].arg0 = ic;
3876 ieee80211_runtask(ic, &sc->cmdq_task);
3877 }
3878
3879 static void
mtw_updateprot_cb(void * arg)3880 mtw_updateprot_cb(void *arg)
3881 {
3882
3883 struct ieee80211com *ic = arg;
3884 struct mtw_softc *sc = ic->ic_softc;
3885 uint32_t tmp;
3886
3887 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
3888 /* setup protection frame rate (MCS code) */
3889 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
3890 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM :
3891 rt2860_rates[MTW_RIDX_CCK11].mcs;
3892
3893 /* CCK frames don't require protection */
3894 mtw_write(sc, MTW_CCK_PROT_CFG, tmp);
3895 if (ic->ic_flags & IEEE80211_F_USEPROT) {
3896 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3897 tmp |= RT2860_PROT_CTRL_RTS_CTS;
3898 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3899 tmp |= RT2860_PROT_CTRL_CTS;
3900 }
3901 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp);
3902 }
3903
3904 static void
mtw_usb_timeout_cb(void * arg)3905 mtw_usb_timeout_cb(void *arg)
3906 {
3907 struct ieee80211vap *vap = arg;
3908 struct mtw_softc *sc = vap->iv_ic->ic_softc;
3909
3910 MTW_LOCK_ASSERT(sc, MA_OWNED);
3911
3912 if (vap->iv_state == IEEE80211_S_SCAN) {
3913 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3914 "timeout caused by scan\n");
3915 /* cancel bgscan */
3916 ieee80211_cancel_scan(vap);
3917 } else {
3918 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3919 "timeout by unknown cause\n");
3920 }
3921 }
mtw_reset(struct mtw_softc * sc)3922 static int mtw_reset(struct mtw_softc *sc)
3923 {
3924
3925 usb_device_request_t req;
3926 uint16_t tmp;
3927 uint16_t actlen;
3928
3929 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
3930 req.bRequest = MTW_RESET;
3931 USETW(req.wValue, 1);
3932 USETW(req.wIndex, 0);
3933 USETW(req.wLength, 0);
3934 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
3935 &req, &tmp, 0, &actlen, 1000));
3936
3937 }
3938
3939
3940 static void
mtw_update_promisc_locked(struct mtw_softc * sc)3941 mtw_update_promisc_locked(struct mtw_softc *sc)
3942 {
3943
3944 uint32_t tmp;
3945
3946 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp);
3947
3948 tmp |= MTW_DROP_UC_NOME;
3949 if (sc->sc_ic.ic_promisc > 0)
3950 tmp &= ~MTW_DROP_UC_NOME;
3951
3952 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
3953
3954 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n",
3955 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving");
3956 }
3957
3958 static void
mtw_update_promisc(struct ieee80211com * ic)3959 mtw_update_promisc(struct ieee80211com *ic)
3960 {
3961 struct mtw_softc *sc = ic->ic_softc;
3962
3963 if ((sc->sc_flags & MTW_RUNNING) == 0)
3964 return;
3965
3966 MTW_LOCK(sc);
3967 mtw_update_promisc_locked(sc);
3968 MTW_UNLOCK(sc);
3969 }
3970
3971 static void
mtw_enable_tsf_sync(struct mtw_softc * sc)3972 mtw_enable_tsf_sync(struct mtw_softc *sc)
3973 {
3974 struct ieee80211com *ic = &sc->sc_ic;
3975 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3976 uint32_t tmp;
3977 int error;
3978 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
3979 tmp &= ~0x1fffff;
3980 tmp |= vap->iv_bss->ni_intval * 16;
3981 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN;
3982
3983 /* local TSF is always updated with remote TSF on beacon reception */
3984 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT;
3985 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
3986 if (error != 0) {
3987 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n",
3988 error);
3989 }
3990 return;
3991 }
3992
3993 static void
mtw_enable_mrr(struct mtw_softc * sc)3994 mtw_enable_mrr(struct mtw_softc *sc)
3995 {
3996 #define CCK(mcs) (mcs)
3997
3998 #define OFDM(mcs) (1 << 3 | (mcs))
3999 mtw_write(sc, MTW_LG_FBK_CFG0,
4000 OFDM(6) << 28 | /* 54->48 */
4001 OFDM(5) << 24 | /* 48->36 */
4002 OFDM(4) << 20 | /* 36->24 */
4003 OFDM(3) << 16 | /* 24->18 */
4004 OFDM(2) << 12 | /* 18->12 */
4005 OFDM(1) << 8 | /* 12-> 9 */
4006 OFDM(0) << 4 | /* 9-> 6 */
4007 OFDM(0)); /* 6-> 6 */
4008
4009 mtw_write(sc, MTW_LG_FBK_CFG1,
4010 CCK(2) << 12 | /* 11->5.5 */
4011 CCK(1) << 8 | /* 5.5-> 2 */
4012 CCK(0) << 4 | /* 2-> 1 */
4013 CCK(0)); /* 1-> 1 */
4014 #undef OFDM
4015 #undef CCK
4016 }
4017
4018 static void
mtw_set_txpreamble(struct mtw_softc * sc)4019 mtw_set_txpreamble(struct mtw_softc *sc)
4020 {
4021 struct ieee80211com *ic = &sc->sc_ic;
4022 uint32_t tmp;
4023
4024 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp);
4025 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4026 tmp |= MTW_CCK_SHORT_EN;
4027 else
4028 tmp &= ~MTW_CCK_SHORT_EN;
4029 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp);
4030 }
4031
4032 static void
mtw_set_basicrates(struct mtw_softc * sc)4033 mtw_set_basicrates(struct mtw_softc *sc)
4034 {
4035 struct ieee80211com *ic = &sc->sc_ic;
4036
4037 /* set basic rates mask */
4038 if (ic->ic_curmode == IEEE80211_MODE_11B)
4039 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003);
4040 else if (ic->ic_curmode == IEEE80211_MODE_11A)
4041 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150);
4042 else /* 11g */
4043 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f);
4044 }
4045
4046 static void
mtw_set_bssid(struct mtw_softc * sc,const uint8_t * bssid)4047 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid)
4048 {
4049 mtw_write(sc, MTW_MAC_BSSID_DW0,
4050 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
4051 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8);
4052 }
4053
4054 static void
mtw_set_macaddr(struct mtw_softc * sc,const uint8_t * addr)4055 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr)
4056 {
4057 mtw_write(sc, MTW_MAC_ADDR_DW0,
4058 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4059 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16);
4060 }
4061
4062 static void
mtw_updateslot(struct ieee80211com * ic)4063 mtw_updateslot(struct ieee80211com *ic)
4064 {
4065
4066 struct mtw_softc *sc = ic->ic_softc;
4067 uint32_t i;
4068
4069 i = MTW_CMDQ_GET(&sc->cmdq_store);
4070 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
4071 sc->cmdq[i].func = mtw_updateslot_cb;
4072 sc->cmdq[i].arg0 = ic;
4073 ieee80211_runtask(ic, &sc->cmdq_task);
4074
4075 return;
4076 }
4077
4078 /* ARGSUSED */
4079 static void
mtw_updateslot_cb(void * arg)4080 mtw_updateslot_cb(void *arg)
4081 {
4082 struct ieee80211com *ic = arg;
4083 struct mtw_softc *sc = ic->ic_softc;
4084 uint32_t tmp;
4085 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp);
4086 tmp &= ~0xff;
4087 tmp |= IEEE80211_GET_SLOTTIME(ic);
4088 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp);
4089 }
4090
4091 static void
mtw_update_mcast(struct ieee80211com * ic)4092 mtw_update_mcast(struct ieee80211com *ic)
4093 {
4094 }
4095
4096 static int8_t
mtw_rssi2dbm(struct mtw_softc * sc,uint8_t rssi,uint8_t rxchain)4097 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain)
4098 {
4099 struct ieee80211com *ic = &sc->sc_ic;
4100 struct ieee80211_channel *c = ic->ic_curchan;
4101 int delta;
4102
4103 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4104 u_int chan = ieee80211_chan2ieee(ic, c);
4105 delta = sc->rssi_5ghz[rxchain];
4106
4107 /* determine channel group */
4108 if (chan <= 64)
4109 delta -= sc->lna[1];
4110 else if (chan <= 128)
4111 delta -= sc->lna[2];
4112 else
4113 delta -= sc->lna[3];
4114 } else
4115 delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
4116
4117 return (-12 - delta - rssi);
4118 }
4119 static int
mt7601_bbp_init(struct mtw_softc * sc)4120 mt7601_bbp_init(struct mtw_softc *sc)
4121 {
4122 uint8_t bbp;
4123 int i, error, ntries;
4124
4125 /* wait for BBP to wake up */
4126 for (ntries = 0; ntries < 20; ntries++) {
4127 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0)
4128 return (error);
4129 if (bbp != 0 && bbp != 0xff)
4130 break;
4131 }
4132
4133 if (ntries == 20)
4134 return (ETIMEDOUT);
4135
4136 mtw_bbp_read(sc, 3, &bbp);
4137 mtw_bbp_write(sc, 3, 0);
4138 mtw_bbp_read(sc, 105, &bbp);
4139 mtw_bbp_write(sc, 105, 0);
4140
4141 /* initialize BBP registers to default values */
4142 for (i = 0; i < nitems(mt7601_def_bbp); i++) {
4143 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg,
4144 mt7601_def_bbp[i].val)) != 0)
4145 return (error);
4146 }
4147
4148 sc->sc_bw_calibrated = 0;
4149
4150 return (0);
4151 }
4152
4153 static int
mt7601_rf_init(struct mtw_softc * sc)4154 mt7601_rf_init(struct mtw_softc *sc)
4155 {
4156 int i, error;
4157
4158 /* RF bank 0 */
4159 for (i = 0; i < nitems(mt7601_rf_bank0); i++) {
4160 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg,
4161 mt7601_rf_bank0[i].val);
4162 if (error != 0)
4163 return (error);
4164 }
4165 /* RF bank 4 */
4166 for (i = 0; i < nitems(mt7601_rf_bank4); i++) {
4167 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg,
4168 mt7601_rf_bank4[i].val);
4169 if (error != 0)
4170 return (error);
4171 }
4172 /* RF bank 5 */
4173 for (i = 0; i < nitems(mt7601_rf_bank5); i++) {
4174 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg,
4175 mt7601_rf_bank5[i].val);
4176 if (error != 0)
4177 return (error);
4178 }
4179 return (0);
4180 }
4181
4182 static int
mtw_txrx_enable(struct mtw_softc * sc)4183 mtw_txrx_enable(struct mtw_softc *sc)
4184 {
4185 struct ieee80211com *ic = &sc->sc_ic;
4186 uint32_t tmp;
4187 int error, ntries;
4188 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN);
4189 for (ntries = 0; ntries < 200; ntries++) {
4190 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) {
4191 return (error);
4192 }
4193 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4194 break;
4195 mtw_delay(sc, 50);
4196 }
4197 if (ntries == 200) {
4198 return (ETIMEDOUT);
4199 }
4200
4201 DELAY(50);
4202
4203 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE;
4204 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4205
4206 /* enable Rx bulk aggregation (set timeout and limit) */
4207 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4208 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2);
4209 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4210
4211 /* set Rx filter */
4212 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR;
4213 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
4214 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS |
4215 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR |
4216 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND;
4217 if (ic->ic_opmode == IEEE80211_M_STA)
4218 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL;
4219 }
4220 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
4221
4222 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4223 return (0);
4224 }
4225 static int
mt7601_rxdc_cal(struct mtw_softc * sc)4226 mt7601_rxdc_cal(struct mtw_softc *sc)
4227 {
4228 uint32_t tmp;
4229 uint8_t bbp;
4230 int ntries;
4231
4232 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4233 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN);
4234 mtw_bbp_write(sc, 158, 0x8d);
4235 mtw_bbp_write(sc, 159, 0xfc);
4236 mtw_bbp_write(sc, 158, 0x8c);
4237 mtw_bbp_write(sc, 159, 0x4c);
4238
4239 for (ntries = 0; ntries < 20; ntries++) {
4240 DELAY(300);
4241 mtw_bbp_write(sc, 158, 0x8c);
4242 mtw_bbp_read(sc, 159, &bbp);
4243 if (bbp == 0x0c)
4244 break;
4245 }
4246
4247 if (ntries == 20)
4248 return (ETIMEDOUT);
4249
4250 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4251 mtw_bbp_write(sc, 158, 0x8d);
4252 mtw_bbp_write(sc, 159, 0xe0);
4253 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4254 return (0);
4255 }
4256
4257 static int
mt7601_r49_read(struct mtw_softc * sc,uint8_t flag,int8_t * val)4258 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val)
4259 {
4260 uint8_t bbp;
4261
4262 mtw_bbp_read(sc, 47, &bbp);
4263 bbp = 0x90;
4264 mtw_bbp_write(sc, 47, bbp);
4265 bbp &= ~0x0f;
4266 bbp |= flag;
4267 mtw_bbp_write(sc, 47, bbp);
4268 return (mtw_bbp_read(sc, 49, val));
4269 }
4270
4271 static int
mt7601_rf_temperature(struct mtw_softc * sc,int8_t * val)4272 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val)
4273 {
4274 uint32_t rfb, rfs;
4275 uint8_t bbp;
4276 int ntries;
4277
4278 mtw_read(sc, MTW_RF_BYPASS0, &rfb);
4279 mtw_read(sc, MTW_RF_SETTING0, &rfs);
4280 mtw_write(sc, MTW_RF_BYPASS0, 0);
4281 mtw_write(sc, MTW_RF_SETTING0, 0x10);
4282 mtw_write(sc, MTW_RF_BYPASS0, 0x10);
4283
4284 mtw_bbp_read(sc, 47, &bbp);
4285 bbp &= ~0x7f;
4286 bbp |= 0x10;
4287 mtw_bbp_write(sc, 47, bbp);
4288
4289 mtw_bbp_write(sc, 22, 0x40);
4290
4291 for (ntries = 0; ntries < 10; ntries++) {
4292 mtw_bbp_read(sc, 47, &bbp);
4293 if ((bbp & 0x10) == 0)
4294 break;
4295 }
4296 if (ntries == 10)
4297 return (ETIMEDOUT);
4298
4299 mt7601_r49_read(sc, MT7601_R47_TEMP, val);
4300
4301 mtw_bbp_write(sc, 22, 0);
4302
4303 mtw_bbp_read(sc, 21, &bbp);
4304 bbp |= 0x02;
4305 mtw_bbp_write(sc, 21, bbp);
4306 bbp &= ~0x02;
4307 mtw_bbp_write(sc, 21, bbp);
4308
4309 mtw_write(sc, MTW_RF_BYPASS0, 0);
4310 mtw_write(sc, MTW_RF_SETTING0, rfs);
4311 mtw_write(sc, MTW_RF_BYPASS0, rfb);
4312 return (0);
4313 }
4314
4315 static int
mt7601_rf_setup(struct mtw_softc * sc)4316 mt7601_rf_setup(struct mtw_softc *sc)
4317 {
4318 uint32_t tmp;
4319 uint8_t rf;
4320 int error;
4321
4322 if (sc->sc_rf_calibrated)
4323 return (0);
4324
4325 /* init RF registers */
4326 if ((error = mt7601_rf_init(sc)) != 0)
4327 return (error);
4328
4329 /* init frequency offset */
4330 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset);
4331 mtw_rf_read(sc, 0, 12, &rf);
4332
4333 /* read temperature */
4334 mt7601_rf_temperature(sc, &rf);
4335 sc->bbp_temp = rf;
4336 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf);
4337
4338 mtw_rf_read(sc, 0, 7, &rf);
4339 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0)
4340 return (error);
4341 mtw_delay(sc, 100);
4342 mtw_rf_read(sc, 0, 7, &rf);
4343
4344 /* Calibrate VCO RF 0/4 */
4345 mtw_rf_write(sc, 0, 4, 0x0a);
4346 mtw_rf_write(sc, 0, 4, 0x20);
4347 mtw_rf_read(sc, 0, 4, &rf);
4348 mtw_rf_write(sc, 0, 4, rf | 0x80);
4349
4350 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0)
4351 return (error);
4352 if ((error = mt7601_rxdc_cal(sc)) != 0)
4353 return (error);
4354 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0)
4355 return (error);
4356 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0)
4357 return (error);
4358 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0)
4359 return (error);
4360 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0)
4361 return (error);
4362
4363 mtw_read(sc, MTW_LDO_CFG0, &tmp);
4364 tmp &= ~(1 << 4);
4365 tmp |= (1 << 2);
4366 mtw_write(sc, MTW_LDO_CFG0, tmp);
4367
4368 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0)
4369 return (error);
4370 if ((error = mt7601_rxdc_cal(sc)) != 0)
4371 return (error);
4372
4373 sc->sc_rf_calibrated = 1;
4374 return (0);
4375 }
4376
4377 static void
mtw_set_txrts(struct mtw_softc * sc)4378 mtw_set_txrts(struct mtw_softc *sc)
4379 {
4380 uint32_t tmp;
4381
4382 /* set RTS threshold */
4383 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4384 tmp &= ~0xffff00;
4385 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT;
4386 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4387 }
4388 static int
mtw_mcu_radio(struct mtw_softc * sc,int func,uint32_t val)4389 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val)
4390 {
4391 struct mtw_mcu_cmd_16 cmd;
4392
4393 cmd.r1 = htole32(func);
4394 cmd.r2 = htole32(val);
4395 cmd.r3 = 0;
4396 cmd.r4 = 0;
4397 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16)));
4398 }
4399 static void
mtw_init_locked(struct mtw_softc * sc)4400 mtw_init_locked(struct mtw_softc *sc)
4401 {
4402
4403 struct ieee80211com *ic = &sc->sc_ic;
4404 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4405 uint32_t tmp;
4406 int i, error, ridx, ntries;
4407 if (ic->ic_nrunning > 1)
4408 return;
4409 mtw_stop(sc);
4410
4411 for (i = 0; i != MTW_EP_QUEUES; i++)
4412 mtw_setup_tx_list(sc, &sc->sc_epq[i]);
4413
4414 for (ntries = 0; ntries < 100; ntries++) {
4415 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0)
4416 goto fail;
4417 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4418 break;
4419 DELAY(1000);
4420 }
4421 if (ntries == 100) {
4422 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4423 error = ETIMEDOUT;
4424 goto fail;
4425 }
4426 tmp &= 0xff0;
4427 tmp |= MTW_TX_WB_DDONE;
4428 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4429
4430 mtw_set_leds(sc, MTW_LED_MODE_ON);
4431 /* reset MAC and baseband */
4432 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST);
4433 mtw_write(sc, MTW_USB_DMA_CFG, 0);
4434 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4435
4436 /* init MAC values */
4437 if (sc->asic_ver == 0x7601) {
4438 for (i = 0; i < nitems(mt7601_def_mac); i++)
4439 mtw_write(sc, mt7601_def_mac[i].reg,
4440 mt7601_def_mac[i].val);
4441 }
4442
4443 /* wait while MAC is busy */
4444 for (ntries = 0; ntries < 100; ntries++) {
4445 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0)
4446 goto fail;
4447 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY)))
4448 break;
4449 DELAY(1000);
4450 }
4451 if (ntries == 100) {
4452 error = ETIMEDOUT;
4453 goto fail;
4454 }
4455
4456 /* set MAC address */
4457
4458 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
4459
4460 /* clear WCID attribute table */
4461 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32);
4462
4463 mtw_write(sc, 0x1648, 0x00830083);
4464 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp);
4465 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN;
4466 mtw_write(sc, MTW_FCE_L2_STUFF, tmp);
4467
4468 /* RTS config */
4469 mtw_set_txrts(sc);
4470
4471 /* clear Host to MCU mailbox */
4472 mtw_write(sc, MTW_BBP_CSR, 0);
4473 mtw_write(sc, MTW_H2M_MAILBOX, 0);
4474
4475 /* clear RX WCID search table */
4476 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512);
4477
4478 /* abort TSF synchronization */
4479 mtw_abort_tsf_sync(sc);
4480
4481 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4482 tmp = (tmp & ~0xff);
4483 if (sc->asic_ver == 0x7601)
4484 tmp |= 0x1e;
4485 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4486
4487 /* clear shared key table */
4488 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32);
4489
4490 /* clear IV/EIV table */
4491 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32);
4492
4493 /* clear shared key mode */
4494 mtw_write(sc, MTW_SKEY_MODE_0_7, 0);
4495 mtw_write(sc, MTW_SKEY_MODE_8_15, 0);
4496
4497 /* txop truncation */
4498 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f);
4499
4500 /* init Tx power for all Tx rates */
4501 for (ridx = 0; ridx < 5; ridx++) {
4502 if (sc->txpow20mhz[ridx] == 0xffffffff)
4503 continue;
4504 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
4505 }
4506 mtw_write(sc, MTW_TX_PWR_CFG7, 0);
4507 mtw_write(sc, MTW_TX_PWR_CFG9, 0);
4508
4509 mtw_read(sc, MTW_CMB_CTRL, &tmp);
4510 tmp &= ~(1 << 18 | 1 << 14);
4511 mtw_write(sc, MTW_CMB_CTRL, tmp);
4512
4513 /* clear USB DMA */
4514 mtw_write(sc, MTW_USB_DMA_CFG,
4515 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4516 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4517 mtw_delay(sc, 50);
4518 mtw_read(sc, MTW_USB_DMA_CFG, &tmp);
4519 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4520 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4521
4522 /* enable radio */
4523 mtw_mcu_radio(sc, 0x31, 0);
4524
4525 /* init RF registers */
4526 if (sc->asic_ver == 0x7601)
4527 mt7601_rf_init(sc);
4528
4529 /* init baseband registers */
4530 if (sc->asic_ver == 0x7601)
4531 error = mt7601_bbp_init(sc);
4532
4533 if (error != 0) {
4534 device_printf(sc->sc_dev, "could not initialize BBP\n");
4535 goto fail;
4536 }
4537
4538 /* setup and calibrate RF */
4539 error = mt7601_rf_setup(sc);
4540
4541 if (error != 0) {
4542 device_printf(sc->sc_dev, "could not initialize RF\n");
4543 goto fail;
4544 }
4545
4546 /* select default channel */
4547 mtw_set_chan(sc, ic->ic_curchan);
4548
4549 /* setup initial protection mode */
4550 mtw_updateprot_cb(ic);
4551
4552 sc->sc_flags |= MTW_RUNNING;
4553 sc->cmdq_run = MTW_CMDQ_GO;
4554 for (i = 0; i != MTW_N_XFER; i++)
4555 usbd_xfer_set_stall(sc->sc_xfer[i]);
4556
4557 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]);
4558
4559 error = mtw_txrx_enable(sc);
4560 if (error != 0) {
4561 goto fail;
4562 }
4563
4564 return;
4565
4566 fail:
4567
4568 mtw_stop(sc);
4569 return;
4570 }
4571
4572 static void
mtw_stop(void * arg)4573 mtw_stop(void *arg)
4574 {
4575 struct mtw_softc *sc = (struct mtw_softc *)arg;
4576 uint32_t tmp;
4577 int i, ntries, error;
4578
4579 MTW_LOCK_ASSERT(sc, MA_OWNED);
4580
4581 sc->sc_flags &= ~MTW_RUNNING;
4582
4583 sc->ratectl_run = MTW_RATECTL_OFF;
4584 sc->cmdq_run = sc->cmdq_key_set;
4585
4586 MTW_UNLOCK(sc);
4587
4588 for (i = 0; i < MTW_N_XFER; i++)
4589 usbd_transfer_drain(sc->sc_xfer[i]);
4590
4591 MTW_LOCK(sc);
4592
4593 mtw_drain_mbufq(sc);
4594
4595 if (sc->rx_m != NULL) {
4596 m_free(sc->rx_m);
4597 sc->rx_m = NULL;
4598 }
4599
4600 /* Disable Tx/Rx DMA. */
4601 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp);
4602 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN);
4603 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4604 // mtw_usb_dma_write(sc, 0);
4605
4606 for (ntries = 0; ntries < 100; ntries++) {
4607 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0)
4608 break;
4609 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4610 break;
4611 DELAY(10);
4612 }
4613 if (ntries == 100) {
4614 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4615 }
4616
4617 /* stop MAC Tx/Rx */
4618 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4619 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4620 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4621
4622 /* disable RTS retry */
4623 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4624 tmp &= ~0xff;
4625 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4626
4627 /* US_CYC_CFG */
4628 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4629 tmp = (tmp & ~0xff);
4630 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4631
4632 /* stop PBF */
4633 mtw_read(sc, MTW_PBF_CFG, &tmp);
4634 tmp &= ~0x3;
4635 mtw_write(sc, MTW_PBF_CFG, tmp);
4636
4637 /* wait for pending Tx to complete */
4638 for (ntries = 0; ntries < 100; ntries++) {
4639 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0)
4640 break;
4641 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0)
4642 break;
4643 }
4644
4645 }
4646
4647 static void
mtw_delay(struct mtw_softc * sc,u_int ms)4648 mtw_delay(struct mtw_softc *sc, u_int ms)
4649 {
4650 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL,
4651 USB_MS_TO_TICKS(ms));
4652 }
4653
4654 static void
mtw_update_chw(struct ieee80211com * ic)4655 mtw_update_chw(struct ieee80211com *ic)
4656 {
4657
4658 printf("%s: TODO\n", __func__);
4659 }
4660
4661 static int
mtw_ampdu_enable(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)4662 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
4663 {
4664
4665 /* For now, no A-MPDU TX support in the driver */
4666 return (0);
4667 }
4668
4669 static device_method_t mtw_methods[] = {
4670 /* Device interface */
4671 DEVMETHOD(device_probe, mtw_match),
4672 DEVMETHOD(device_attach, mtw_attach),
4673 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END
4674 };
4675
4676 static driver_t mtw_driver = { .name = "mtw",
4677 .methods = mtw_methods,
4678 .size = sizeof(struct mtw_softc) };
4679
4680 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL);
4681 MODULE_DEPEND(mtw, wlan, 1, 1, 1);
4682 MODULE_DEPEND(mtw, usb, 1, 1, 1);
4683 MODULE_DEPEND(mtw, firmware, 1, 1, 1);
4684 MODULE_VERSION(mtw, 1);
4685 USB_PNP_HOST_INFO(mtw_devs);
4686