1 /*-
2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2013-2014 Kevin Lo
4 * Copyright (c) 2021 James Hastings
5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * MediaTek MT7601U 802.11b/g/n WLAN.
22 */
23
24 #include "opt_wlan.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/bus.h>
29 #include <sys/endian.h>
30 #include <sys/eventhandler.h>
31 #include <sys/firmware.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_radiotap.h>
54 #include <net80211/ieee80211_ratectl.h>
55 #include <net80211/ieee80211_regdomain.h>
56 #ifdef IEEE80211_SUPPORT_SUPERG
57 #include <net80211/ieee80211_superg.h>
58 #endif
59 #include <netinet/if_ether.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67
68 #include "usbdevs.h"
69
70 #define USB_DEBUG_VAR mtw_debug
71 #include <dev/usb/usb_debug.h>
72 #include <dev/usb/usb_msctest.h>
73
74 #include "if_mtwreg.h"
75 #include "if_mtwvar.h"
76
77 #define MTW_DEBUG
78
79 #ifdef MTW_DEBUG
80 int mtw_debug;
81 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
82 "USB mtw");
83 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0,
84 "mtw debug level");
85
86 enum {
87 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
88 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
89 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */
90 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
91 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */
92 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */
93 MTW_DEBUG_USB = 0x00000040, /* usb requests */
94 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */
95 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */
96 MTW_DEBUG_INTR = 0x00000200, /* ISR */
97 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */
98 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */
99 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */
100 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */
101 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */
102 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */
103 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */
104 MTW_DEBUG_CMD = 0x00020000, /* command queue */
105 MTW_DEBUG_ANY = 0xffffffff
106 };
107
108 #define MTW_DPRINTF(_sc, _m, ...) \
109 do { \
110 if (mtw_debug & (_m)) \
111 device_printf((_sc)->sc_dev, __VA_ARGS__); \
112 } while (0)
113
114 #else
115 #define MTW_DPRINTF(_sc, _m, ...) \
116 do { \
117 (void)_sc; \
118 } while (0)
119 #endif
120
121 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
122
123 /* NB: "11" is the maximum number of padding bytes needed for Tx */
124 #define MTW_MAX_TXSZ \
125 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11)
126
127 /*
128 * Because of LOR in mtw_key_delete(), use atomic instead.
129 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
130 */
131 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ)
132
133 static const STRUCT_USB_HOST_ID mtw_devs[] = {
134 #define MTW_DEV(v, p) \
135 { \
136 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \
137 }
138 MTW_DEV(EDIMAX, MT7601U),
139 MTW_DEV(RALINK, MT7601U),
140 MTW_DEV(XIAOMI, MT7601U)
141 };
142 #undef MTW_DEV
143
144 static device_probe_t mtw_match;
145 static device_attach_t mtw_attach;
146 static device_detach_t mtw_detach;
147
148 static usb_callback_t mtw_bulk_rx_callback;
149 static usb_callback_t mtw_bulk_tx_callback0;
150 static usb_callback_t mtw_bulk_tx_callback1;
151 static usb_callback_t mtw_bulk_tx_callback2;
152 static usb_callback_t mtw_bulk_tx_callback3;
153 static usb_callback_t mtw_bulk_tx_callback4;
154 static usb_callback_t mtw_bulk_tx_callback5;
155 static usb_callback_t mtw_fw_callback;
156
157 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *);
158 static int mtw_driver_loaded(struct module *, int, void *);
159 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error,
160 u_int index);
161 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *,
162 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
163 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
164 static void mtw_vap_delete(struct ieee80211vap *);
165 static void mtw_cmdq_cb(void *, int);
166 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *);
167 static void mtw_unsetup_tx_list(struct mtw_softc *,
168 struct mtw_endpoint_queue *);
169 static void mtw_load_microcode(void *arg);
170
171 static usb_error_t mtw_do_request(struct mtw_softc *,
172 struct usb_device_request *, void *);
173 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
174 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
175 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
176 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
177 static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
178 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
179 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
180 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
181 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t);
182 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len);
183 static void mtw_get_txpower(struct mtw_softc *);
184 static int mtw_read_eeprom(struct mtw_softc *);
185 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *,
186 const uint8_t mac[IEEE80211_ADDR_LEN]);
187 static int mtw_media_change(if_t);
188 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
189 static int mtw_wme_update(struct ieee80211com *);
190 static void mtw_key_set_cb(void *);
191 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *);
192 static void mtw_key_delete_cb(void *);
193 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *);
194 static void mtw_ratectl_to(void *);
195 static void mtw_ratectl_cb(void *, int);
196 static void mtw_drain_fifo(void *);
197 static void mtw_iter_func(void *, struct ieee80211_node *);
198 static void mtw_newassoc_cb(void *);
199 static void mtw_newassoc(struct ieee80211_node *, int);
200 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val);
201 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
202 const struct ieee80211_rx_stats *, int, int);
203 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t);
204 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *,
205 int);
206 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *);
207 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *);
208 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *,
209 struct ieee80211_node *);
210 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *,
211 struct ieee80211_node *, int, int);
212 static int mtw_tx_param(struct mtw_softc *, struct mbuf *,
213 struct ieee80211_node *, const struct ieee80211_bpf_params *);
214 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 const struct ieee80211_bpf_params *);
216 static int mtw_transmit(struct ieee80211com *, struct mbuf *);
217 static void mtw_start(struct mtw_softc *);
218 static void mtw_parent(struct ieee80211com *);
219 static void mtw_select_chan_group(struct mtw_softc *, int);
220
221 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *);
222 static void mtw_set_channel(struct ieee80211com *);
223 static void mtw_getradiocaps(struct ieee80211com *, int, int *,
224 struct ieee80211_channel[]);
225 static void mtw_scan_start(struct ieee80211com *);
226 static void mtw_scan_end(struct ieee80211com *);
227 static void mtw_update_beacon(struct ieee80211vap *, int);
228 static void mtw_update_beacon_cb(void *);
229 static void mtw_updateprot(struct ieee80211com *);
230 static void mtw_updateprot_cb(void *);
231 static void mtw_usb_timeout_cb(void *);
232 static int mtw_reset(struct mtw_softc *sc);
233 static void mtw_enable_tsf_sync(struct mtw_softc *);
234
235
236 static void mtw_enable_mrr(struct mtw_softc *);
237 static void mtw_set_txpreamble(struct mtw_softc *);
238 static void mtw_set_basicrates(struct mtw_softc *);
239 static void mtw_set_leds(struct mtw_softc *, uint16_t);
240 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *);
241 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *);
242 static void mtw_updateslot(struct ieee80211com *);
243 static void mtw_updateslot_cb(void *);
244 static void mtw_update_mcast(struct ieee80211com *);
245 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t);
246 static void mtw_update_promisc_locked(struct mtw_softc *);
247 static void mtw_update_promisc(struct ieee80211com *);
248 static int mtw_txrx_enable(struct mtw_softc *);
249 static void mtw_init_locked(struct mtw_softc *);
250 static void mtw_stop(void *);
251 static void mtw_delay(struct mtw_softc *, u_int);
252 static void mtw_update_chw(struct ieee80211com *ic);
253 static int mtw_ampdu_enable(struct ieee80211_node *ni,
254 struct ieee80211_tx_ampdu *tap);
255
256 static eventhandler_tag mtw_etag;
257
258 static const struct {
259 uint8_t reg;
260 uint8_t val;
261 } mt7601_rf_bank0[] = { MT7601_BANK0_RF },
262 mt7601_rf_bank4[] = { MT7601_BANK4_RF },
263 mt7601_rf_bank5[] = { MT7601_BANK5_RF };
264 static const struct {
265 uint32_t reg;
266 uint32_t val;
267 } mt7601_def_mac[] = { MT7601_DEF_MAC };
268 static const struct {
269 uint8_t reg;
270 uint8_t val;
271 } mt7601_def_bbp[] = { MT7601_DEF_BBP };
272
273
274 static const struct {
275 u_int chan;
276 uint8_t r17, r18, r19, r20;
277 } mt7601_rf_chan[] = { MT7601_RF_CHAN };
278
279
280 static const struct usb_config mtw_config[MTW_N_XFER] = {
281 [MTW_BULK_RX] = {
282 .type = UE_BULK,
283 .endpoint = UE_ADDR_ANY,
284 .direction = UE_DIR_IN,
285 .bufsize = MTW_MAX_RXSZ,
286 .flags = {.pipe_bof = 1,
287 .short_xfer_ok = 1,},
288 .callback = mtw_bulk_rx_callback,
289 },
290 [MTW_BULK_TX_BE] = {
291 .type = UE_BULK,
292 .endpoint = UE_ADDR_ANY,
293 .direction = UE_DIR_OUT,
294 .bufsize = MTW_MAX_TXSZ,
295 .flags = {.pipe_bof = 1,
296 .force_short_xfer = 0,},
297 .callback = mtw_bulk_tx_callback0,
298 .timeout = 5000, /* ms */
299 },
300 [MTW_BULK_TX_BK] = {
301 .type = UE_BULK,
302 .endpoint = UE_ADDR_ANY,
303 .direction = UE_DIR_OUT,
304 .bufsize = MTW_MAX_TXSZ,
305 .flags = {.pipe_bof = 1,
306 .force_short_xfer = 1,},
307 .callback = mtw_bulk_tx_callback1,
308 .timeout = 5000, /* ms */
309 },
310 [MTW_BULK_TX_VI] = {
311 .type = UE_BULK,
312 .endpoint = UE_ADDR_ANY,
313 .direction = UE_DIR_OUT,
314 .bufsize = MTW_MAX_TXSZ,
315 .flags = {.pipe_bof = 1,
316 .force_short_xfer = 1,},
317 .callback = mtw_bulk_tx_callback2,
318 .timeout = 5000, /* ms */
319 },
320 [MTW_BULK_TX_VO] = {
321 .type = UE_BULK,
322 .endpoint = UE_ADDR_ANY,
323 .direction = UE_DIR_OUT,
324 .bufsize = MTW_MAX_TXSZ,
325 .flags = {.pipe_bof = 1,
326 .force_short_xfer = 1,},
327 .callback = mtw_bulk_tx_callback3,
328 .timeout = 5000, /* ms */
329 },
330 [MTW_BULK_TX_HCCA] = {
331 .type = UE_BULK,
332 .endpoint = UE_ADDR_ANY,
333 .direction = UE_DIR_OUT,
334 .bufsize = MTW_MAX_TXSZ,
335 .flags = {.pipe_bof = 1,
336 .force_short_xfer = 1, .no_pipe_ok = 1,},
337 .callback = mtw_bulk_tx_callback4,
338 .timeout = 5000, /* ms */
339 },
340 [MTW_BULK_TX_PRIO] = {
341 .type = UE_BULK,
342 .endpoint = UE_ADDR_ANY,
343 .direction = UE_DIR_OUT,
344 .bufsize = MTW_MAX_TXSZ,
345 .flags = {.pipe_bof = 1,
346 .force_short_xfer = 1, .no_pipe_ok = 1,},
347 .callback = mtw_bulk_tx_callback5,
348 .timeout = 5000, /* ms */
349 },
350
351 [MTW_BULK_FW_CMD] = {
352 .type = UE_BULK,
353 .endpoint = UE_ADDR_ANY,
354 .direction = UE_DIR_OUT,
355 .bufsize = 0x2c44,
356 .flags = {.pipe_bof = 1,
357 .force_short_xfer = 1, .no_pipe_ok = 1,},
358 .callback = mtw_fw_callback,
359
360 },
361
362 [MTW_BULK_RAW_TX] = {
363 .type = UE_BULK,
364 .ep_index = 0,
365 .endpoint = UE_ADDR_ANY,
366 .direction = UE_DIR_OUT,
367 .bufsize = MTW_MAX_TXSZ,
368 .flags = {.pipe_bof = 1,
369 .force_short_xfer = 1, .no_pipe_ok = 1,},
370 .callback = mtw_bulk_tx_callback0,
371 .timeout = 5000, /* ms */
372 },
373
374 };
375 static uint8_t mtw_wme_ac_xfer_map[4] = {
376 [WME_AC_BE] = MTW_BULK_TX_BE,
377 [WME_AC_BK] = MTW_BULK_TX_BK,
378 [WME_AC_VI] = MTW_BULK_TX_VI,
379 [WME_AC_VO] = MTW_BULK_TX_VO,
380 };
381 static void
mtw_autoinst(void * arg,struct usb_device * udev,struct usb_attach_arg * uaa)382 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa)
383 {
384 struct usb_interface *iface;
385 struct usb_interface_descriptor *id;
386
387 if (uaa->dev_state != UAA_DEV_READY)
388 return;
389
390 iface = usbd_get_iface(udev, 0);
391 if (iface == NULL)
392 return;
393 id = iface->idesc;
394 if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
395 return;
396 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa))
397 return;
398
399 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
400 uaa->dev_state = UAA_DEV_EJECTING;
401 }
402
403 static int
mtw_driver_loaded(struct module * mod,int what,void * arg)404 mtw_driver_loaded(struct module *mod, int what, void *arg)
405 {
406 switch (what) {
407 case MOD_LOAD:
408 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
409 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY);
410 break;
411 case MOD_UNLOAD:
412 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag);
413 break;
414 default:
415 return (EOPNOTSUPP);
416 }
417 return (0);
418 }
419
420 static const char *
mtw_get_rf(int rev)421 mtw_get_rf(int rev)
422 {
423 switch (rev) {
424 case MT7601_RF_7601:
425 return ("MT7601");
426 case MT7610_RF_7610:
427 return ("MT7610");
428 case MT7612_RF_7612:
429 return ("MT7612");
430 }
431 return ("unknown");
432 }
433 static int
mtw_wlan_enable(struct mtw_softc * sc,int enable)434 mtw_wlan_enable(struct mtw_softc *sc, int enable)
435 {
436 uint32_t tmp;
437 int error = 0;
438
439 if (enable) {
440 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
441 if (sc->asic_ver == 0x7612)
442 tmp &= ~0xfffff000;
443
444 tmp &= ~MTW_WLAN_CLK_EN;
445 tmp |= MTW_WLAN_EN;
446 mtw_write(sc, MTW_WLAN_CTRL, tmp);
447 mtw_delay(sc, 2);
448
449 tmp |= MTW_WLAN_CLK_EN;
450 if (sc->asic_ver == 0x7612) {
451 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF);
452 }
453 mtw_write(sc, MTW_WLAN_CTRL, tmp);
454 mtw_delay(sc, 2);
455
456 mtw_read(sc, MTW_OSC_CTRL, &tmp);
457 tmp |= MTW_OSC_EN;
458 mtw_write(sc, MTW_OSC_CTRL, tmp);
459 tmp |= MTW_OSC_CAL_REQ;
460 mtw_write(sc, MTW_OSC_CTRL, tmp);
461 } else {
462 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
463 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN);
464 mtw_write(sc, MTW_WLAN_CTRL, tmp);
465
466 mtw_read(sc, MTW_OSC_CTRL, &tmp);
467 tmp &= ~MTW_OSC_EN;
468 mtw_write(sc, MTW_OSC_CTRL, tmp);
469 }
470 return (error);
471 }
472
473 static int
mtw_read_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t * val)474 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
475 {
476 usb_device_request_t req;
477 uint32_t tmp;
478 uint16_t actlen;
479 int error;
480
481 req.bmRequestType = UT_READ_VENDOR_DEVICE;
482 req.bRequest = MTW_READ_CFG;
483 USETW(req.wValue, 0);
484 USETW(req.wIndex, reg);
485 USETW(req.wLength, 4);
486 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0,
487 &actlen, 1000);
488
489 if (error == 0)
490 *val = le32toh(tmp);
491 else
492 *val = 0xffffffff;
493 return (error);
494 }
495
496 static int
mtw_match(device_t self)497 mtw_match(device_t self)
498 {
499 struct usb_attach_arg *uaa = device_get_ivars(self);
500
501 if (uaa->usb_mode != USB_MODE_HOST)
502 return (ENXIO);
503 if (uaa->info.bConfigIndex != 0)
504 return (ENXIO);
505 if (uaa->info.bIfaceIndex != 0)
506 return (ENXIO);
507
508 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa));
509 }
510
511 static int
mtw_attach(device_t self)512 mtw_attach(device_t self)
513 {
514 struct mtw_softc *sc = device_get_softc(self);
515 struct usb_attach_arg *uaa = device_get_ivars(self);
516 struct ieee80211com *ic = &sc->sc_ic;
517 uint32_t ver;
518 int i, ret;
519 uint32_t tmp;
520 uint8_t iface_index;
521 int ntries, error;
522
523 device_set_usb_desc(self);
524 sc->sc_udev = uaa->device;
525 sc->sc_dev = self;
526 sc->sc_sent = 0;
527
528 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
529 MTX_NETWORK_LOCK, MTX_DEF);
530
531 iface_index = 0;
532
533 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
534 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx);
535 if (error) {
536 device_printf(sc->sc_dev,
537 "could not allocate USB transfers, "
538 "err=%s\n",
539 usbd_errstr(error));
540 goto detach;
541 }
542 for (i = 0; i < 4; i++) {
543 sc->txd_fw[i] = (struct mtw_txd_fw *)
544 malloc(sizeof(struct mtw_txd_fw),
545 M_USBDEV, M_NOWAIT | M_ZERO);
546 }
547 MTW_LOCK(sc);
548 sc->sc_idx = 0;
549 mbufq_init(&sc->sc_snd, ifqmaxlen);
550
551 /*enable WLAN core */
552 if ((error = mtw_wlan_enable(sc, 1)) != 0) {
553 device_printf(sc->sc_dev, "could not enable WLAN core\n");
554 return (ENXIO);
555 }
556
557 /* wait for the chip to settle */
558 DELAY(100);
559 for (ntries = 0; ntries < 100; ntries++) {
560 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) {
561 goto detach;
562 }
563 if (ver != 0 && ver != 0xffffffff)
564 break;
565 DELAY(10);
566 }
567 if (ntries == 100) {
568 device_printf(sc->sc_dev,
569 "timeout waiting for NIC to initialize\n");
570 goto detach;
571 }
572 sc->asic_ver = ver >> 16;
573 sc->asic_rev = ver & 0xffff;
574 DELAY(100);
575 if (sc->asic_ver != 0x7601) {
576 device_printf(sc->sc_dev,
577 "Your revision 0x04%x is not supported yet\n",
578 sc->asic_rev);
579 goto detach;
580 }
581
582
583 if (mtw_read(sc, MTW_MAC_VER_ID, &tmp) != 0)
584 goto detach;
585 sc->mac_rev = tmp & 0xffff;
586
587 mtw_load_microcode(sc);
588 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz);
589 if (ret == EWOULDBLOCK || sc->fwloading != 1) {
590 device_printf(sc->sc_dev,
591 "timeout waiting for MCU to initialize\n");
592 goto detach;
593 }
594
595 sc->sc_srom_read = mtw_efuse_read_2;
596 /* retrieve RF rev. no and various other things from EEPROM */
597 mtw_read_eeprom(sc);
598
599 device_printf(sc->sc_dev,
600 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
601 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains,
602 sc->nrxchains, ether_sprintf(ic->ic_macaddr));
603 DELAY(100);
604
605 //mtw_set_leds(sc,5);
606 // mtw_mcu_radio(sc,0x31,0);
607 MTW_UNLOCK(sc);
608
609
610 ic->ic_softc = sc;
611 ic->ic_name = device_get_nameunit(self);
612 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
613 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
614
615 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */
616 IEEE80211_C_MONITOR | /* monitor mode supported */
617 IEEE80211_C_IBSS |
618 IEEE80211_C_HOSTAP |
619 IEEE80211_C_WDS | /* 4-address traffic works */
620 IEEE80211_C_MBSS |
621 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
622 IEEE80211_C_SHSLOT | /* short slot time supported */
623 IEEE80211_C_WME | /* WME */
624 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
625 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n");
626 ic->ic_htcaps = IEEE80211_HTC_HT
627 | IEEE80211_HTC_AMPDU
628 | IEEE80211_HTC_AMSDU
629 | IEEE80211_HTCAP_MAXAMSDU_3839
630 | IEEE80211_HTCAP_SMPS_OFF;
631
632 ic->ic_rxstream = sc->nrxchains;
633 ic->ic_txstream = sc->ntxchains;
634
635 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM |
636 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP |
637 IEEE80211_CRYPTO_TKIPMIC;
638
639 ic->ic_flags |= IEEE80211_F_DATAPAD;
640 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
641 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
642
643 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
644 ic->ic_channels);
645
646 ieee80211_ifattach(ic);
647
648 ic->ic_scan_start = mtw_scan_start;
649 ic->ic_scan_end = mtw_scan_end;
650 ic->ic_set_channel = mtw_set_channel;
651 ic->ic_getradiocaps = mtw_getradiocaps;
652 ic->ic_node_alloc = mtw_node_alloc;
653 ic->ic_newassoc = mtw_newassoc;
654 ic->ic_update_mcast = mtw_update_mcast;
655 ic->ic_updateslot = mtw_updateslot;
656 ic->ic_wme.wme_update = mtw_wme_update;
657 ic->ic_raw_xmit = mtw_raw_xmit;
658 ic->ic_update_promisc = mtw_update_promisc;
659 ic->ic_vap_create = mtw_vap_create;
660 ic->ic_vap_delete = mtw_vap_delete;
661 ic->ic_transmit = mtw_transmit;
662 ic->ic_parent = mtw_parent;
663
664 ic->ic_update_chw = mtw_update_chw;
665 ic->ic_ampdu_enable = mtw_ampdu_enable;
666
667 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
668 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT,
669 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
670 MTW_RX_RADIOTAP_PRESENT);
671 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc);
672 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc);
673 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
674
675 if (bootverbose)
676 ieee80211_announce(ic);
677
678 return (0);
679
680 detach:
681 MTW_UNLOCK(sc);
682 mtw_detach(self);
683 return (ENXIO);
684 }
685
686 static void
mtw_drain_mbufq(struct mtw_softc * sc)687 mtw_drain_mbufq(struct mtw_softc *sc)
688 {
689 struct mbuf *m;
690 struct ieee80211_node *ni;
691
692 MTW_LOCK_ASSERT(sc, MA_OWNED);
693 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
694 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
695 m->m_pkthdr.rcvif = NULL;
696 ieee80211_free_node(ni);
697 m_freem(m);
698 }
699 }
700
701 static int
mtw_detach(device_t self)702 mtw_detach(device_t self)
703 {
704 struct mtw_softc *sc = device_get_softc(self);
705 struct ieee80211com *ic = &sc->sc_ic;
706 int i;
707 MTW_LOCK(sc);
708 mtw_reset(sc);
709 DELAY(10000);
710 sc->sc_detached = 1;
711 MTW_UNLOCK(sc);
712
713
714 /* stop all USB transfers */
715 for (i = 0; i < MTW_N_XFER; i++)
716 usbd_transfer_drain(sc->sc_xfer[i]);
717
718 MTW_LOCK(sc);
719 sc->ratectl_run = MTW_RATECTL_OFF;
720 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT;
721
722 /* free TX list, if any */
723 if (ic->ic_nrunning > 0)
724 for (i = 0; i < MTW_EP_QUEUES; i++)
725 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]);
726
727 /* Free TX queue */
728 mtw_drain_mbufq(sc);
729 MTW_UNLOCK(sc);
730 if (sc->sc_ic.ic_softc == sc) {
731 /* drain tasks */
732 usb_callout_drain(&sc->ratectl_ch);
733 ieee80211_draintask(ic, &sc->cmdq_task);
734 ieee80211_draintask(ic, &sc->ratectl_task);
735 ieee80211_ifdetach(ic);
736 }
737 for (i = 0; i < 4; i++) {
738 free(sc->txd_fw[i], M_USBDEV);
739 }
740 firmware_unregister("/mediatek/mt7601u");
741 mtx_destroy(&sc->sc_mtx);
742
743 return (0);
744 }
745
746 static struct ieee80211vap *
mtw_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])747 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
748 enum ieee80211_opmode opmode, int flags,
749 const uint8_t bssid[IEEE80211_ADDR_LEN],
750 const uint8_t mac[IEEE80211_ADDR_LEN])
751 {
752 struct mtw_softc *sc = ic->ic_softc;
753 struct mtw_vap *rvp;
754 struct ieee80211vap *vap;
755 int i;
756
757 if (sc->rvp_cnt >= MTW_VAP_MAX) {
758 device_printf(sc->sc_dev, "number of VAPs maxed out\n");
759 return (NULL);
760 }
761
762 switch (opmode) {
763 case IEEE80211_M_STA:
764 /* enable s/w bmiss handling for sta mode */
765 flags |= IEEE80211_CLONE_NOBEACONS;
766 /* fall though */
767 case IEEE80211_M_IBSS:
768 case IEEE80211_M_MONITOR:
769 case IEEE80211_M_HOSTAP:
770 case IEEE80211_M_MBSS:
771 /* other than WDS vaps, only one at a time */
772 if (!TAILQ_EMPTY(&ic->ic_vaps))
773 return (NULL);
774 break;
775 case IEEE80211_M_WDS:
776 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
777 if (vap->iv_opmode != IEEE80211_M_HOSTAP)
778 continue;
779 /* WDS vap's always share the local mac address. */
780 flags &= ~IEEE80211_CLONE_BSSID;
781 break;
782 }
783 if (vap == NULL) {
784 device_printf(sc->sc_dev,
785 "wds only supported in ap mode\n");
786 return (NULL);
787 }
788 break;
789 default:
790 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
791 return (NULL);
792 }
793
794 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
795 vap = &rvp->vap;
796
797 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) !=
798 0) {
799 /* out of memory */
800 free(rvp, M_80211_VAP);
801 return (NULL);
802 }
803
804 vap->iv_update_beacon = mtw_update_beacon;
805 vap->iv_max_aid = MTW_WCID_MAX;
806
807 /*
808 * The linux rt2800 driver limits 1 stream devices to a 32KB
809 * RX AMPDU.
810 */
811 if (ic->ic_rxstream > 1)
812 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
813 else
814 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
815 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */
816
817 /*
818 * To delete the right key from h/w, we need wcid.
819 * Luckily, there is unused space in ieee80211_key{}, wk_pad,
820 * and matching wcid will be written into there. So, cast
821 * some spells to remove 'const' from ieee80211_key{}
822 */
823 vap->iv_key_delete = (void *)mtw_key_delete;
824 vap->iv_key_set = (void *)mtw_key_set;
825
826 // override state transition machine
827 rvp->newstate = vap->iv_newstate;
828 vap->iv_newstate = mtw_newstate;
829 if (opmode == IEEE80211_M_IBSS) {
830 rvp->recv_mgmt = vap->iv_recv_mgmt;
831 vap->iv_recv_mgmt = mtw_recv_mgmt;
832 }
833
834 ieee80211_ratectl_init(vap);
835 ieee80211_ratectl_setinterval(vap, 1000); // 1 second
836
837 /* complete setup */
838 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status,
839 mac);
840
841 /* make sure id is always unique */
842 for (i = 0; i < MTW_VAP_MAX; i++) {
843 if ((sc->rvp_bmap & 1 << i) == 0) {
844 sc->rvp_bmap |= 1 << i;
845 rvp->rvp_id = i;
846 break;
847 }
848 }
849 if (sc->rvp_cnt++ == 0)
850 ic->ic_opmode = opmode;
851
852 if (opmode == IEEE80211_M_HOSTAP)
853 sc->cmdq_run = MTW_CMDQ_GO;
854
855 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n",
856 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
857
858 return (vap);
859 }
860
861 static void
mtw_vap_delete(struct ieee80211vap * vap)862 mtw_vap_delete(struct ieee80211vap *vap)
863 {
864 struct mtw_vap *rvp = MTW_VAP(vap);
865 struct ieee80211com *ic;
866 struct mtw_softc *sc;
867 uint8_t rvp_id;
868
869 if (vap == NULL)
870 return;
871
872 ic = vap->iv_ic;
873 sc = ic->ic_softc;
874
875 MTW_LOCK(sc);
876 m_freem(rvp->beacon_mbuf);
877 rvp->beacon_mbuf = NULL;
878
879 rvp_id = rvp->rvp_id;
880 sc->ratectl_run &= ~(1 << rvp_id);
881 sc->rvp_bmap &= ~(1 << rvp_id);
882 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256);
883 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512);
884 --sc->rvp_cnt;
885
886 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
887 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap,
888 sc->rvp_cnt);
889
890 MTW_UNLOCK(sc);
891
892 ieee80211_ratectl_deinit(vap);
893 ieee80211_vap_detach(vap);
894 free(rvp, M_80211_VAP);
895 }
896
897 /*
898 * There are numbers of functions need to be called in context thread.
899 * Rather than creating taskqueue event for each of those functions,
900 * here is all-for-one taskqueue callback function. This function
901 * guarantees deferred functions are executed in the same order they
902 * were enqueued.
903 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
904 */
905 static void
mtw_cmdq_cb(void * arg,int pending)906 mtw_cmdq_cb(void *arg, int pending)
907 {
908 struct mtw_softc *sc = arg;
909 uint8_t i;
910 /* call cmdq[].func locked */
911 MTW_LOCK(sc);
912 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
913 i = sc->cmdq_exec, pending--) {
914 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i,
915 pending);
916 if (sc->cmdq_run == MTW_CMDQ_GO) {
917 /*
918 * If arg0 is NULL, callback func needs more
919 * than one arg. So, pass ptr to cmdq struct.
920 */
921 if (sc->cmdq[i].arg0)
922 sc->cmdq[i].func(sc->cmdq[i].arg0);
923 else
924 sc->cmdq[i].func(&sc->cmdq[i]);
925 }
926 sc->cmdq[i].arg0 = NULL;
927 sc->cmdq[i].func = NULL;
928 sc->cmdq_exec++;
929 sc->cmdq_exec &= MTW_CMDQ_MASQ;
930 }
931 MTW_UNLOCK(sc);
932 }
933
934 static void
mtw_setup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)935 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
936 {
937 struct mtw_tx_data *data;
938
939 memset(pq, 0, sizeof(*pq));
940
941 STAILQ_INIT(&pq->tx_qh);
942 STAILQ_INIT(&pq->tx_fh);
943
944 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
945 data++) {
946 data->sc = sc;
947 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
948 }
949 pq->tx_nfree = MTW_TX_RING_COUNT;
950 }
951
952 static void
mtw_unsetup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)953 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
954 {
955 struct mtw_tx_data *data;
956 /* make sure any subsequent use of the queues will fail */
957 pq->tx_nfree = 0;
958
959 STAILQ_INIT(&pq->tx_fh);
960 STAILQ_INIT(&pq->tx_qh);
961
962 /* free up all node references and mbufs */
963 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
964 data++) {
965 if (data->m != NULL) {
966 m_freem(data->m);
967 data->m = NULL;
968 }
969 if (data->ni != NULL) {
970 ieee80211_free_node(data->ni);
971 data->ni = NULL;
972 }
973 }
974 }
975
976 static int
mtw_write_ivb(struct mtw_softc * sc,void * buf,uint16_t len)977 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len)
978 {
979 usb_device_request_t req;
980 uint16_t actlen;
981 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
982 req.bRequest = MTW_RESET;
983 USETW(req.wValue, 0x12);
984 USETW(req.wIndex, 0);
985 USETW(req.wLength, len);
986
987 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf,
988 0, &actlen, 1000);
989
990 return (error);
991 }
992
993 static int
mtw_write_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t val)994 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val)
995 {
996 usb_device_request_t req;
997 int error;
998
999 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1000 req.bRequest = MTW_WRITE_CFG;
1001 USETW(req.wValue, 0);
1002 USETW(req.wIndex, reg);
1003 USETW(req.wLength, 4);
1004 val = htole32(val);
1005 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val);
1006 return (error);
1007 }
1008
1009 static int
mtw_usb_dma_write(struct mtw_softc * sc,uint32_t val)1010 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val)
1011 {
1012 // if (sc->asic_ver == 0x7612)
1013 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val);
1014 // else
1015 return (mtw_write(sc, MTW_USB_DMA_CFG, val));
1016 }
1017
1018 static void
mtw_ucode_setup(struct mtw_softc * sc)1019 mtw_ucode_setup(struct mtw_softc *sc)
1020 {
1021
1022 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN));
1023 mtw_write(sc, MTW_FCE_PSE_CTRL, 1);
1024 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230);
1025 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1);
1026 mtw_write(sc, MTW_MCU_FW_IDX, 1);
1027 mtw_write(sc, MTW_FCE_PDMA, 0x44);
1028 mtw_write(sc, MTW_FCE_SKIP_FS, 3);
1029 }
1030 static int
mtw_ucode_write(struct mtw_softc * sc,const uint8_t * fw,const uint8_t * ivb,int32_t len,uint32_t offset)1031 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb,
1032 int32_t len, uint32_t offset)
1033 {
1034
1035 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev);
1036 #if 0 // firmware not tested
1037
1038 if (sc->asic_ver == 0x7612 && offset >= 0x90000)
1039 blksz = 0x800; /* MT7612 ROM Patch */
1040
1041 xfer = usbd_alloc_xfer(sc->sc_udev);
1042 if (xfer == NULL) {
1043 error = ENOMEM;
1044 goto fail;
1045 }
1046 buf = usbd_alloc_buffer(xfer, blksz + 12);
1047 if (buf == NULL) {
1048 error = ENOMEM;
1049 goto fail;
1050 }
1051 #endif
1052
1053
1054
1055 int mlen;
1056 int idx = 0;
1057
1058 mlen = 0x2c44;
1059
1060 while (len > 0) {
1061
1062 if (len < 0x2c44 && len > 0) {
1063 mlen = len;
1064 }
1065
1066 sc->txd_fw[idx]->len = htole16(mlen);
1067 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU);
1068
1069 memcpy(&sc->txd_fw[idx]->fw, fw, mlen);
1070 // memcpy(&txd[1], fw, mlen);
1071 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD);
1072 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset
1073 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16));
1074
1075 // sc->sc_fw_data[idx]->len=htole16(mlen);
1076
1077 // memcpy(tmpbuf,fw,mlen);
1078 // memset(tmpbuf+mlen,0,MTW_DMA_PAD);
1079 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen);
1080
1081 fw += mlen;
1082 len -= mlen;
1083 // sent+=mlen;
1084 idx++;
1085 }
1086 sc->sc_sent = 0;
1087 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN);
1088
1089 usbd_transfer_start(sc->sc_xfer[7]);
1090
1091 return (0);
1092 }
1093
1094 static void
mtw_load_microcode(void * arg)1095 mtw_load_microcode(void *arg)
1096 {
1097
1098 struct mtw_softc *sc = (struct mtw_softc *)arg;
1099 const struct mtw_ucode_hdr *hdr;
1100 // onst struct mtw_ucode *fw = NULL;
1101 const char *fwname;
1102 size_t size;
1103 int error = 0;
1104 uint32_t tmp, iofs = 0x40;
1105 // int ntries;
1106 int dlen, ilen;
1107 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
1108 /* is firmware already running? */
1109 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
1110 if (tmp == MTW_MCU_READY) {
1111 return;
1112 }
1113 if (sc->asic_ver == 0x7612) {
1114 fwname = "mtw-mt7662u_rom_patch";
1115
1116 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN);
1117 if (firmware == NULL) {
1118 device_printf(sc->sc_dev,
1119 "failed loadfirmware of file %s (error %d)\n",
1120 fwname, error);
1121 return;
1122 }
1123 size = firmware->datasize;
1124
1125 const struct mtw_ucode *fw = (const struct mtw_ucode *)
1126 firmware->data;
1127 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1128 // memcpy(fw,(const unsigned char*)firmware->data +
1129 // 0x1e,size-0x1e);
1130 ilen = size - 0x1e;
1131
1132 mtw_ucode_setup(sc);
1133
1134 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen,
1135 0x90000)) != 0) {
1136 goto fail;
1137 }
1138 mtw_usb_dma_write(sc, 0x00e41814);
1139 }
1140
1141 fwname = "/mediatek/mt7601u.bin";
1142 iofs = 0x40;
1143 // dofs = 0;
1144 if (sc->asic_ver == 0x7612) {
1145 fwname = "mtw-mt7662u";
1146 iofs = 0x80040;
1147 // dofs = 0x110800;
1148 } else if (sc->asic_ver == 0x7610) {
1149 fwname = "mt7610u";
1150 // dofs = 0x80000;
1151 }
1152 MTW_UNLOCK(sc);
1153 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN);
1154
1155 if (firmware == NULL) {
1156 device_printf(sc->sc_dev,
1157 "failed loadfirmware of file %s (error %d)\n", fwname,
1158 error);
1159 MTW_LOCK(sc);
1160 return;
1161 }
1162 MTW_LOCK(sc);
1163 size = firmware->datasize;
1164 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size);
1165 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data;
1166
1167 if (size < sizeof(struct mtw_ucode_hdr)) {
1168 device_printf(sc->sc_dev, "firmware header too short\n");
1169 goto fail;
1170 }
1171
1172 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1173
1174 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) +
1175 le32toh(hdr->dlm_len)) {
1176 device_printf(sc->sc_dev, "firmware payload too short\n");
1177 goto fail;
1178 }
1179
1180 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN;
1181 dlen = le32toh(hdr->dlm_len);
1182
1183 if (ilen > size || dlen > size) {
1184 device_printf(sc->sc_dev, "firmware payload too large\n");
1185 goto fail;
1186 }
1187
1188 mtw_write(sc, MTW_FCE_PDMA, 0);
1189 mtw_write(sc, MTW_FCE_PSE_CTRL, 0);
1190 mtw_ucode_setup(sc);
1191
1192 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0)
1193 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n",
1194 error);
1195
1196 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n",
1197 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time);
1198
1199 return;
1200 fail:
1201 return;
1202 }
1203 static usb_error_t
mtw_do_request(struct mtw_softc * sc,struct usb_device_request * req,void * data)1204 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data)
1205 {
1206 usb_error_t err;
1207 int ntries = 5;
1208
1209 MTW_LOCK_ASSERT(sc, MA_OWNED);
1210
1211 while (ntries--) {
1212 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data,
1213 0, NULL, 2000); // ms seconds
1214 if (err == 0)
1215 break;
1216 MTW_DPRINTF(sc, MTW_DEBUG_USB,
1217 "Control request failed, %s (retrying)\n",
1218 usbd_errstr(err));
1219 mtw_delay(sc, 10);
1220 }
1221 return (err);
1222 }
1223
1224 static int
mtw_read(struct mtw_softc * sc,uint16_t reg,uint32_t * val)1225 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
1226 {
1227 uint32_t tmp;
1228 int error;
1229
1230 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
1231 if (error == 0)
1232 *val = le32toh(tmp);
1233 else
1234 *val = 0xffffffff;
1235 return (error);
1236 }
1237
1238 static int
mtw_read_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1239 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1240 {
1241 usb_device_request_t req;
1242
1243 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1244 req.bRequest = MTW_READ_REGION_1;
1245 USETW(req.wValue, 0);
1246 USETW(req.wIndex, reg);
1247 USETW(req.wLength, len);
1248
1249 return (mtw_do_request(sc, &req, buf));
1250 }
1251
1252 static int
mtw_write_2(struct mtw_softc * sc,uint16_t reg,uint16_t val)1253 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val)
1254 {
1255
1256 usb_device_request_t req;
1257 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1258 req.bRequest = MTW_WRITE_2;
1259 USETW(req.wValue, val);
1260 USETW(req.wIndex, reg);
1261 USETW(req.wLength, 0);
1262 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
1263 }
1264
1265 static int
mtw_write(struct mtw_softc * sc,uint16_t reg,uint32_t val)1266 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
1267 {
1268
1269 int error;
1270
1271 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) {
1272
1273 error = mtw_write_2(sc, reg + 2, val >> 16);
1274 }
1275
1276 return (error);
1277 }
1278
1279 static int
mtw_write_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1280 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1281 {
1282
1283 usb_device_request_t req;
1284 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1285 req.bRequest = MTW_WRITE_REGION_1;
1286 USETW(req.wValue, 0);
1287 USETW(req.wIndex, reg);
1288 USETW(req.wLength, len);
1289 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf));
1290 }
1291
1292 static int
mtw_set_region_4(struct mtw_softc * sc,uint16_t reg,uint32_t val,int count)1293 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count)
1294 {
1295 int i, error = 0;
1296
1297 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n"));
1298 for (i = 0; i < count && error == 0; i += 4)
1299 error = mtw_write(sc, reg + i, val);
1300 return (error);
1301 }
1302
1303 static int
mtw_efuse_read_2(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1304 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1305 {
1306
1307 uint32_t tmp;
1308 uint16_t reg;
1309 int error, ntries;
1310
1311 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1312 return (error);
1313
1314 addr *= 2;
1315 /*
1316 * Read one 16-byte block into registers EFUSE_DATA[0-3]:
1317 * DATA0: 3 2 1 0
1318 * DATA1: 7 6 5 4
1319 * DATA2: B A 9 8
1320 * DATA3: F E D C
1321 */
1322 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK);
1323 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK;
1324 mtw_write(sc, MTW_EFUSE_CTRL, tmp);
1325 for (ntries = 0; ntries < 100; ntries++) {
1326 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1327 return (error);
1328 if (!(tmp & MTW_EFSROM_KICK))
1329 break;
1330 DELAY(2);
1331 }
1332 if (ntries == 100)
1333 return (ETIMEDOUT);
1334
1335 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) {
1336 *val = 0xffff; // address not found
1337 return (0);
1338 }
1339 // determine to which 32-bit register our 16-bit word belongs
1340 reg = MTW_EFUSE_DATA0 + (addr & 0xc);
1341 if ((error = mtw_read(sc, reg, &tmp)) != 0)
1342 return (error);
1343
1344 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff;
1345 return (0);
1346 }
1347
1348 static __inline int
mtw_srom_read(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1349 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1350 {
1351 /* either eFUSE ROM or EEPROM */
1352 return (sc->sc_srom_read(sc, addr, val));
1353 }
1354
1355 static int
mtw_bbp_read(struct mtw_softc * sc,uint8_t reg,uint8_t * val)1356 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val)
1357 {
1358 uint32_t tmp;
1359 int ntries, error;
1360
1361 for (ntries = 0; ntries < 10; ntries++) {
1362 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1363 return (error);
1364 if (!(tmp & MTW_BBP_CSR_KICK))
1365 break;
1366 }
1367 if (ntries == 10)
1368 return (ETIMEDOUT);
1369
1370 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8;
1371 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0)
1372 return (error);
1373
1374 for (ntries = 0; ntries < 10; ntries++) {
1375 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1376 return (error);
1377 if (!(tmp & MTW_BBP_CSR_KICK))
1378 break;
1379 }
1380 if (ntries == 10)
1381 return (ETIMEDOUT);
1382
1383 *val = tmp & 0xff;
1384 return (0);
1385 }
1386
1387 static int
mtw_bbp_write(struct mtw_softc * sc,uint8_t reg,uint8_t val)1388 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val)
1389 {
1390 uint32_t tmp;
1391 int ntries, error;
1392
1393 for (ntries = 0; ntries < 10; ntries++) {
1394 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1395 return (error);
1396 if (!(tmp & MTW_BBP_CSR_KICK))
1397 break;
1398 }
1399 if (ntries == 10)
1400 return (ETIMEDOUT);
1401
1402 tmp = MTW_BBP_CSR_KICK | reg << 8 | val;
1403 return (mtw_write(sc, MTW_BBP_CSR, tmp));
1404 }
1405
1406 static int
mtw_mcu_cmd(struct mtw_softc * sc,u_int8_t cmd,void * buf,int len)1407 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len)
1408 {
1409 sc->sc_idx = 0;
1410 sc->txd_fw[sc->sc_idx]->len = htole16(
1411 len + 8);
1412 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU |
1413 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf));
1414
1415 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004);
1416 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len);
1417 usbd_transfer_start(sc->sc_xfer[7]);
1418 return (0);
1419 }
1420
1421 /*
1422 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
1423 * Used to adjust per-rate Tx power registers.
1424 */
1425 static __inline uint32_t
b4inc(uint32_t b32,int8_t delta)1426 b4inc(uint32_t b32, int8_t delta)
1427 {
1428 int8_t i, b4;
1429
1430 for (i = 0; i < 8; i++) {
1431 b4 = b32 & 0xf;
1432 b4 += delta;
1433 if (b4 < 0)
1434 b4 = 0;
1435 else if (b4 > 0xf)
1436 b4 = 0xf;
1437 b32 = b32 >> 4 | b4 << 28;
1438 }
1439 return (b32);
1440 }
1441 static void
mtw_get_txpower(struct mtw_softc * sc)1442 mtw_get_txpower(struct mtw_softc *sc)
1443 {
1444 uint16_t val;
1445 int i;
1446
1447 /* Read power settings for 2GHz channels. */
1448 for (i = 0; i < 14; i += 2) {
1449 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
1450 sc->txpow1[i + 0] = (int8_t)(val & 0xff);
1451 sc->txpow1[i + 1] = (int8_t)(val >> 8);
1452 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
1453 sc->txpow2[i + 0] = (int8_t)(val & 0xff);
1454 sc->txpow2[i + 1] = (int8_t)(val >> 8);
1455 }
1456 /* Fix broken Tx power entries. */
1457 for (i = 0; i < 14; i++) {
1458 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
1459 sc->txpow1[i] = 5;
1460 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
1461 sc->txpow2[i] = 5;
1462 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR,
1463 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan,
1464 sc->txpow1[i], sc->txpow2[i]);
1465 }
1466 }
1467
1468 struct ieee80211_node *
mtw_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])1469 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1470 {
1471 return (malloc(sizeof(struct mtw_node), M_80211_NODE,
1472 M_NOWAIT | M_ZERO));
1473 }
1474 static int
mtw_read_eeprom(struct mtw_softc * sc)1475 mtw_read_eeprom(struct mtw_softc *sc)
1476 {
1477 struct ieee80211com *ic = &sc->sc_ic;
1478 int8_t delta_2ghz, delta_5ghz;
1479 uint16_t val;
1480 int ridx, ant;
1481
1482 sc->sc_srom_read = mtw_efuse_read_2;
1483
1484 /* read RF information */
1485 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val);
1486 sc->rf_rev = val;
1487 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val);
1488 sc->ntxchains = (val >> 4) & 0xf;
1489 sc->nrxchains = val & 0xf;
1490 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n",
1491 sc->rf_rev, sc->ntxchains, sc->nrxchains);
1492
1493 /* read ROM version */
1494 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val);
1495 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff,
1496 val >> 8);
1497
1498 /* read MAC address */
1499 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val);
1500 ic->ic_macaddr[0] = val & 0xff;
1501 ic->ic_macaddr[1] = val >> 8;
1502 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val);
1503 ic->ic_macaddr[2] = val & 0xff;
1504 ic->ic_macaddr[3] = val >> 8;
1505 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val);
1506 ic->ic_macaddr[4] = val & 0xff;
1507 ic->ic_macaddr[5] = val >> 8;
1508 #if 0
1509 printf("eFUSE ROM\n00: ");
1510 for (int i = 0; i < 256; i++) {
1511 if (((i % 8) == 0) && i > 0)
1512 printf("\n%02x: ", i);
1513 mtw_srom_read(sc, i, &val);
1514 printf(" %04x", val);
1515 }
1516 printf("\n");
1517 #endif
1518 /* check if RF supports automatic Tx access gain control */
1519 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val);
1520 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val);
1521 if ((val & 0xff) != 0xff) {
1522 sc->ext_5ghz_lna = (val >> 3) & 1;
1523 sc->ext_2ghz_lna = (val >> 2) & 1;
1524 /* check if RF supports automatic Tx access gain control */
1525 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
1526 /* check if we have a hardware radio switch */
1527 sc->rfswitch = val & 1;
1528 }
1529
1530 /* read RF frequency offset from EEPROM */
1531 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val);
1532 if ((val & 0xff) != 0xff)
1533 sc->rf_freq_offset = val;
1534 else
1535 sc->rf_freq_offset = 0;
1536 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n",
1537 sc->rf_freq_offset);
1538
1539 /* Read Tx power settings. */
1540 mtw_get_txpower(sc);
1541
1542 /* read Tx power compensation for each Tx rate */
1543 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val);
1544 delta_2ghz = delta_5ghz = 0;
1545 if ((val & 0xff) != 0xff && (val & 0x80)) {
1546 delta_2ghz = val & 0xf;
1547 if (!(val & 0x40)) /* negative number */
1548 delta_2ghz = -delta_2ghz;
1549 }
1550 val >>= 8;
1551 if ((val & 0xff) != 0xff && (val & 0x80)) {
1552 delta_5ghz = val & 0xf;
1553 if (!(val & 0x40)) /* negative number */
1554 delta_5ghz = -delta_5ghz;
1555 }
1556 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1557 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz,
1558 delta_5ghz);
1559
1560 for (ridx = 0; ridx < 5; ridx++) {
1561 uint32_t reg;
1562
1563 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val);
1564 reg = val;
1565 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val);
1566 reg |= (uint32_t)val << 16;
1567
1568 sc->txpow20mhz[ridx] = reg;
1569 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
1570 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
1571
1572 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1573 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
1574 "40MHz/5GHz=0x%08x\n",
1575 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx],
1576 sc->txpow40mhz_5ghz[ridx]);
1577 }
1578
1579 /* read RSSI offsets and LNA gains from EEPROM */
1580 val = 0;
1581 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val);
1582 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
1583 sc->rssi_2ghz[1] = val >> 8; /* Ant B */
1584 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val);
1585 /*
1586 * On RT3070 chips (limited to 2 Rx chains), this ROM
1587 * field contains the Tx mixer gain for the 2GHz band.
1588 */
1589 if ((val & 0xff) != 0xff)
1590 sc->txmixgain_2ghz = val & 0x7;
1591 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n",
1592 sc->txmixgain_2ghz);
1593 sc->lna[2] = val >> 8; /* channel group 2 */
1594 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val);
1595 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
1596 sc->rssi_5ghz[1] = val >> 8; /* Ant B */
1597 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val);
1598 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
1599
1600 sc->lna[3] = val >> 8; /* channel group 3 */
1601
1602 mtw_srom_read(sc, MTW_EEPROM_LNA, &val);
1603 sc->lna[0] = val & 0xff; /* channel group 0 */
1604 sc->lna[1] = val >> 8; /* channel group 1 */
1605 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]);
1606
1607 /* fix broken 5GHz LNA entries */
1608 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
1609 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1610 "invalid LNA for channel group %d\n", 2);
1611 sc->lna[2] = sc->lna[1];
1612 }
1613 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
1614 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1615 "invalid LNA for channel group %d\n", 3);
1616 sc->lna[3] = sc->lna[1];
1617 }
1618
1619 /* fix broken RSSI offset entries */
1620 for (ant = 0; ant < 3; ant++) {
1621 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
1622 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1623 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1,
1624 sc->rssi_2ghz[ant]);
1625 sc->rssi_2ghz[ant] = 0;
1626 }
1627 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
1628 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1629 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1,
1630 sc->rssi_5ghz[ant]);
1631 sc->rssi_5ghz[ant] = 0;
1632 }
1633 }
1634 return (0);
1635 }
1636 static int
mtw_media_change(if_t ifp)1637 mtw_media_change(if_t ifp)
1638 {
1639 struct ieee80211vap *vap = if_getsoftc(ifp);
1640 struct ieee80211com *ic = vap->iv_ic;
1641 const struct ieee80211_txparam *tp;
1642 struct mtw_softc *sc = ic->ic_softc;
1643 uint8_t rate, ridx;
1644
1645 MTW_LOCK(sc);
1646 ieee80211_media_change(ifp);
1647 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
1648 tp = &vap->iv_txparms[ic->ic_curmode];
1649 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
1650 struct ieee80211_node *ni;
1651 struct mtw_node *rn;
1652 /* XXX TODO: methodize with MCS rates */
1653 rate =
1654 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] &
1655 IEEE80211_RATE_VAL;
1656 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
1657 if (rt2860_rates[ridx].rate == rate)
1658 break;
1659 }
1660 ni = ieee80211_ref_node(vap->iv_bss);
1661 rn = MTW_NODE(ni);
1662 rn->fix_ridx = ridx;
1663
1664 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate,
1665 rn->fix_ridx);
1666 ieee80211_free_node(ni);
1667 }
1668 MTW_UNLOCK(sc);
1669
1670 return (0);
1671 }
1672
1673 void
mtw_set_leds(struct mtw_softc * sc,uint16_t which)1674 mtw_set_leds(struct mtw_softc *sc, uint16_t which)
1675 {
1676 struct mtw_mcu_cmd_8 cmd;
1677 cmd.func = htole32(0x1);
1678 cmd.val = htole32(which);
1679 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8));
1680 }
1681 static void
mtw_abort_tsf_sync(struct mtw_softc * sc)1682 mtw_abort_tsf_sync(struct mtw_softc *sc)
1683 {
1684 uint32_t tmp;
1685
1686 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
1687 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN);
1688 mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
1689 }
1690 static int
mtw_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)1691 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1692 {
1693 const struct ieee80211_txparam *tp;
1694 struct ieee80211com *ic = vap->iv_ic;
1695 struct mtw_softc *sc = ic->ic_softc;
1696 struct mtw_vap *rvp = MTW_VAP(vap);
1697 enum ieee80211_state ostate;
1698 uint32_t sta[3];
1699 uint8_t ratectl = 0;
1700 uint8_t restart_ratectl = 0;
1701 uint8_t bid = 1 << rvp->rvp_id;
1702
1703
1704 ostate = vap->iv_state;
1705 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n",
1706 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
1707 IEEE80211_UNLOCK(ic);
1708 MTW_LOCK(sc);
1709 ratectl = sc->ratectl_run; /* remember current state */
1710 usb_callout_stop(&sc->ratectl_ch);
1711 sc->ratectl_run = MTW_RATECTL_OFF;
1712 if (ostate == IEEE80211_S_RUN) {
1713 /* turn link LED off */
1714 }
1715
1716 switch (nstate) {
1717 case IEEE80211_S_INIT:
1718 restart_ratectl = 1;
1719 if (ostate != IEEE80211_S_RUN)
1720 break;
1721
1722 ratectl &= ~bid;
1723 sc->runbmap &= ~bid;
1724
1725 /* abort TSF synchronization if there is no vap running */
1726 if (--sc->running == 0)
1727 mtw_abort_tsf_sync(sc);
1728 break;
1729
1730 case IEEE80211_S_RUN:
1731 if (!(sc->runbmap & bid)) {
1732 if (sc->running++)
1733 restart_ratectl = 1;
1734 sc->runbmap |= bid;
1735 }
1736
1737 m_freem(rvp->beacon_mbuf);
1738 rvp->beacon_mbuf = NULL;
1739
1740 switch (vap->iv_opmode) {
1741 case IEEE80211_M_HOSTAP:
1742 case IEEE80211_M_MBSS:
1743 sc->ap_running |= bid;
1744 ic->ic_opmode = vap->iv_opmode;
1745 mtw_update_beacon_cb(vap);
1746 break;
1747 case IEEE80211_M_IBSS:
1748 sc->adhoc_running |= bid;
1749 if (!sc->ap_running)
1750 ic->ic_opmode = vap->iv_opmode;
1751 mtw_update_beacon_cb(vap);
1752 break;
1753 case IEEE80211_M_STA:
1754 sc->sta_running |= bid;
1755 if (!sc->ap_running && !sc->adhoc_running)
1756 ic->ic_opmode = vap->iv_opmode;
1757
1758 /* read statistic counters (clear on read) */
1759 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
1760 sizeof sta);
1761
1762 break;
1763 default:
1764 ic->ic_opmode = vap->iv_opmode;
1765 break;
1766 }
1767
1768 if (vap->iv_opmode != IEEE80211_M_MONITOR) {
1769 struct ieee80211_node *ni;
1770
1771 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
1772 MTW_UNLOCK(sc);
1773 IEEE80211_LOCK(ic);
1774 return (-1);
1775 }
1776 mtw_updateslot(ic);
1777 mtw_enable_mrr(sc);
1778 mtw_set_txpreamble(sc);
1779 mtw_set_basicrates(sc);
1780 ni = ieee80211_ref_node(vap->iv_bss);
1781 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
1782 mtw_set_bssid(sc, sc->sc_bssid);
1783 ieee80211_free_node(ni);
1784 mtw_enable_tsf_sync(sc);
1785
1786 /* enable automatic rate adaptation */
1787 tp = &vap->iv_txparms[ieee80211_chan2mode(
1788 ic->ic_curchan)];
1789 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
1790 ratectl |= bid;
1791 } else {
1792 mtw_enable_tsf_sync(sc);
1793 }
1794
1795 break;
1796 default:
1797 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n");
1798 break;
1799 }
1800
1801 /* restart amrr for running VAPs */
1802 if ((sc->ratectl_run = ratectl) && restart_ratectl) {
1803 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
1804 }
1805 MTW_UNLOCK(sc);
1806 IEEE80211_LOCK(ic);
1807 return (rvp->newstate(vap, nstate, arg));
1808 }
1809
1810 static int
mtw_wme_update(struct ieee80211com * ic)1811 mtw_wme_update(struct ieee80211com *ic)
1812 {
1813 struct chanAccParams chp;
1814 struct mtw_softc *sc = ic->ic_softc;
1815 const struct wmeParams *ac;
1816 int aci, error = 0;
1817 ieee80211_wme_ic_getparams(ic, &chp);
1818 ac = chp.cap_wmeParams;
1819
1820 MTW_LOCK(sc);
1821 /* update MAC TX configuration registers */
1822 for (aci = 0; aci < WME_NUM_AC; aci++) {
1823 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci),
1824 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 |
1825 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit);
1826 if (error)
1827 goto err;
1828 }
1829
1830 /* update SCH/DMA registers too */
1831 error = mtw_write(sc, MTW_WMM_AIFSN_CFG,
1832 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 |
1833 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn);
1834 if (error)
1835 goto err;
1836 error = mtw_write(sc, MTW_WMM_CWMIN_CFG,
1837 ac[WME_AC_VO].wmep_logcwmin << 12 |
1838 ac[WME_AC_VI].wmep_logcwmin << 8 |
1839 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin);
1840 if (error)
1841 goto err;
1842 error = mtw_write(sc, MTW_WMM_CWMAX_CFG,
1843 ac[WME_AC_VO].wmep_logcwmax << 12 |
1844 ac[WME_AC_VI].wmep_logcwmax << 8 |
1845 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax);
1846 if (error)
1847 goto err;
1848 error = mtw_write(sc, MTW_WMM_TXOP0_CFG,
1849 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit);
1850 if (error)
1851 goto err;
1852 error = mtw_write(sc, MTW_WMM_TXOP1_CFG,
1853 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit);
1854
1855 err:
1856 MTW_UNLOCK(sc);
1857 if (error)
1858 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n");
1859
1860 return (error);
1861 }
1862
1863 static int
mtw_key_set(struct ieee80211vap * vap,struct ieee80211_key * k)1864 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k)
1865 {
1866 struct ieee80211com *ic = vap->iv_ic;
1867 struct mtw_softc *sc = ic->ic_softc;
1868 uint32_t i;
1869
1870 i = MTW_CMDQ_GET(&sc->cmdq_store);
1871 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
1872 sc->cmdq[i].func = mtw_key_set_cb;
1873 sc->cmdq[i].arg0 = NULL;
1874 sc->cmdq[i].arg1 = vap;
1875 sc->cmdq[i].k = k;
1876 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr);
1877 ieee80211_runtask(ic, &sc->cmdq_task);
1878
1879 /*
1880 * To make sure key will be set when hostapd
1881 * calls iv_key_set() before if_init().
1882 */
1883 if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1884 MTW_LOCK(sc);
1885 sc->cmdq_key_set = MTW_CMDQ_GO;
1886 MTW_UNLOCK(sc);
1887 }
1888
1889 return (1);
1890 }
1891 static void
mtw_key_set_cb(void * arg)1892 mtw_key_set_cb(void *arg)
1893 {
1894 struct mtw_cmdq *cmdq = arg;
1895 struct ieee80211vap *vap = cmdq->arg1;
1896 struct ieee80211_key *k = cmdq->k;
1897 struct ieee80211com *ic = vap->iv_ic;
1898 struct mtw_softc *sc = ic->ic_softc;
1899 struct ieee80211_node *ni;
1900 u_int cipher = k->wk_cipher->ic_cipher;
1901 uint32_t attr;
1902 uint16_t base;
1903 uint8_t mode, wcid, iv[8];
1904 MTW_LOCK_ASSERT(sc, MA_OWNED);
1905
1906 if (vap->iv_opmode == IEEE80211_M_HOSTAP)
1907 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
1908 else
1909 ni = vap->iv_bss;
1910
1911 /* map net80211 cipher to RT2860 security mode */
1912 switch (cipher) {
1913 case IEEE80211_CIPHER_WEP:
1914 if (k->wk_keylen < 8)
1915 mode = MTW_MODE_WEP40;
1916 else
1917 mode = MTW_MODE_WEP104;
1918 break;
1919 case IEEE80211_CIPHER_TKIP:
1920 mode = MTW_MODE_TKIP;
1921 break;
1922 case IEEE80211_CIPHER_AES_CCM:
1923 mode = MTW_MODE_AES_CCMP;
1924 break;
1925 default:
1926 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n");
1927 return;
1928 }
1929
1930 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1931 wcid = 0; /* NB: update WCID0 for group keys */
1932 base = MTW_SKEY(0, k->wk_keyix);
1933 } else {
1934 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0;
1935 base = MTW_PKEY(wcid);
1936 }
1937
1938 if (cipher == IEEE80211_CIPHER_TKIP) {
1939 mtw_write_region_1(sc, base, k->wk_key, 16);
1940 mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8);
1941 mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8);
1942 } else {
1943 /* roundup len to 16-bit: XXX fix write_region_1() instead */
1944 mtw_write_region_1(sc, base, k->wk_key,
1945 (k->wk_keylen + 1) & ~1);
1946 }
1947
1948 if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
1949 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
1950 /* set initial packet number in IV+EIV */
1951 if (cipher == IEEE80211_CIPHER_WEP) {
1952 memset(iv, 0, sizeof iv);
1953 iv[3] = vap->iv_def_txkey << 6;
1954 } else {
1955 if (cipher == IEEE80211_CIPHER_TKIP) {
1956 iv[0] = k->wk_keytsc >> 8;
1957 iv[1] = (iv[0] | 0x20) & 0x7f;
1958 iv[2] = k->wk_keytsc;
1959 } else { //CCMP
1960 iv[0] = k->wk_keytsc;
1961 iv[1] = k->wk_keytsc >> 8;
1962 iv[2] = 0;
1963 }
1964 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
1965 iv[4] = k->wk_keytsc >> 16;
1966 iv[5] = k->wk_keytsc >> 24;
1967 iv[6] = k->wk_keytsc >> 32;
1968 iv[7] = k->wk_keytsc >> 40;
1969 }
1970 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8);
1971 }
1972
1973 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1974 /* install group key */
1975 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
1976 attr &= ~(0xf << (k->wk_keyix * 4));
1977 attr |= mode << (k->wk_keyix * 4);
1978 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
1979
1980 if (cipher & (IEEE80211_CIPHER_WEP)) {
1981 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr);
1982 attr = (attr & ~0xf) | (mode << 1);
1983 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr);
1984
1985 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4);
1986
1987 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
1988 attr = (attr & ~0xf) | (mode << 1);
1989 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
1990 }
1991 } else {
1992 /* install pairwise key */
1993 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
1994 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN;
1995 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
1996 }
1997 k->wk_pad = wcid;
1998 }
1999
2000 /*
2001 * If wlan is destroyed without being brought down i.e. without
2002 * wlan down or wpa_cli terminate, this function is called after
2003 * vap is gone. Don't refer it.
2004 */
2005 static void
mtw_key_delete_cb(void * arg)2006 mtw_key_delete_cb(void *arg)
2007 {
2008 struct mtw_cmdq *cmdq = arg;
2009 struct mtw_softc *sc = cmdq->arg1;
2010 struct ieee80211_key *k = &cmdq->key;
2011 uint32_t attr;
2012 uint8_t wcid;
2013
2014 MTW_LOCK_ASSERT(sc, MA_OWNED);
2015
2016 if (k->wk_flags & IEEE80211_KEY_GROUP) {
2017 /* remove group key */
2018 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n");
2019 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
2020 attr &= ~(0xf << (k->wk_keyix * 4));
2021 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
2022 } else {
2023 /* remove pairwise key */
2024 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n",
2025 k->wk_pad);
2026 /* matching wcid was written to wk_pad in mtw_key_set() */
2027 wcid = k->wk_pad;
2028 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2029 attr &= ~0xf;
2030 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2031 }
2032
2033 k->wk_pad = 0;
2034 }
2035
2036 /*
2037 * return 0 on error
2038 */
2039 static int
mtw_key_delete(struct ieee80211vap * vap,struct ieee80211_key * k)2040 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
2041 {
2042 struct ieee80211com *ic = vap->iv_ic;
2043 struct mtw_softc *sc = ic->ic_softc;
2044 struct ieee80211_key *k0;
2045 uint32_t i;
2046 if (sc->sc_flags & MTW_RUNNING)
2047 return (1);
2048
2049 /*
2050 * When called back, key might be gone. So, make a copy
2051 * of some values need to delete keys before deferring.
2052 * But, because of LOR with node lock, cannot use lock here.
2053 * So, use atomic instead.
2054 */
2055 i = MTW_CMDQ_GET(&sc->cmdq_store);
2056 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
2057 sc->cmdq[i].func = mtw_key_delete_cb;
2058 sc->cmdq[i].arg0 = NULL;
2059 sc->cmdq[i].arg1 = sc;
2060 k0 = &sc->cmdq[i].key;
2061 k0->wk_flags = k->wk_flags;
2062 k0->wk_keyix = k->wk_keyix;
2063 /* matching wcid was written to wk_pad in mtw_key_set() */
2064 k0->wk_pad = k->wk_pad;
2065 ieee80211_runtask(ic, &sc->cmdq_task);
2066 return (1); /* return fake success */
2067 }
2068
2069 static void
mtw_ratectl_to(void * arg)2070 mtw_ratectl_to(void *arg)
2071 {
2072 struct mtw_softc *sc = arg;
2073 /* do it in a process context, so it can go sleep */
2074 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
2075 /* next timeout will be rescheduled in the callback task */
2076 }
2077
2078 /* ARGSUSED */
2079 static void
mtw_ratectl_cb(void * arg,int pending)2080 mtw_ratectl_cb(void *arg, int pending)
2081 {
2082
2083 struct mtw_softc *sc = arg;
2084 struct ieee80211com *ic = &sc->sc_ic;
2085 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2086
2087 if (vap == NULL)
2088 return;
2089
2090 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc);
2091
2092 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
2093
2094
2095 }
2096
2097 static void
mtw_drain_fifo(void * arg)2098 mtw_drain_fifo(void *arg)
2099 {
2100 struct mtw_softc *sc = arg;
2101 uint32_t stat;
2102 uint16_t(*wstat)[3];
2103 uint8_t wcid, mcs, pid;
2104 int8_t retry;
2105
2106 MTW_LOCK_ASSERT(sc, MA_OWNED);
2107
2108 for (;;) {
2109 /* drain Tx status FIFO (maxsize = 16) */
2110 mtw_read(sc, MTW_TX_STAT_FIFO, &stat);
2111 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat);
2112 if (!(stat & MTW_TXQ_VLD))
2113 break;
2114
2115 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff;
2116
2117 /* if no ACK was requested, no feedback is available */
2118 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX ||
2119 wcid == 0)
2120 continue;
2121
2122 /*
2123 * Even though each stat is Tx-complete-status like format,
2124 * the device can poll stats. Because there is no guarantee
2125 * that the referring node is still around when read the stats.
2126 * So that, if we use ieee80211_ratectl_tx_update(), we will
2127 * have hard time not to refer already freed node.
2128 *
2129 * To eliminate such page faults, we poll stats in softc.
2130 * Then, update the rates later with
2131 * ieee80211_ratectl_tx_update().
2132 */
2133 wstat = &(sc->wcid_stats[wcid]);
2134 (*wstat)[MTW_TXCNT]++;
2135 if (stat & MTW_TXQ_OK)
2136 (*wstat)[MTW_SUCCESS]++;
2137 else
2138 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
2139 /*
2140 * Check if there were retries, ie if the Tx success rate is
2141 * different from the requested rate. Note that it works only
2142 * because we do not allow rate fallback from OFDM to CCK.
2143 */
2144 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f;
2145 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf;
2146 if ((retry = pid - 1 - mcs) > 0) {
2147 (*wstat)[MTW_TXCNT] += retry;
2148 (*wstat)[MTW_RETRY] += retry;
2149 }
2150 }
2151 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt);
2152
2153 sc->fifo_cnt = 0;
2154 }
2155
2156 static void
mtw_iter_func(void * arg,struct ieee80211_node * ni)2157 mtw_iter_func(void *arg, struct ieee80211_node *ni)
2158 {
2159 struct mtw_softc *sc = arg;
2160 MTW_LOCK(sc);
2161 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs;
2162 struct ieee80211vap *vap = ni->ni_vap;
2163 struct mtw_node *rn = MTW_NODE(ni);
2164 uint32_t sta[3];
2165 uint16_t(*wstat)[3];
2166 int error, ridx;
2167 uint8_t txrate = 0;
2168
2169 /* Check for special case */
2170 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
2171 ni != vap->iv_bss)
2172 goto fail;
2173
2174 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE |
2175 IEEE80211_RATECTL_TX_STATS_RETRIES;
2176 txs->ni = ni;
2177 if (sc->rvp_cnt <= 1 &&
2178 (vap->iv_opmode == IEEE80211_M_IBSS ||
2179 vap->iv_opmode == IEEE80211_M_STA)) {
2180 /*
2181 * read statistic counters (clear on read) and update AMRR state
2182 */
2183 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
2184 sizeof sta);
2185 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error);
2186 if (error != 0)
2187 goto fail;
2188
2189 /* count failed TX as errors */
2190 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
2191 le32toh(sta[0]) & 0xffff);
2192
2193 txs->nretries = (le32toh(sta[1]) >> 16);
2194 txs->nsuccess = (le32toh(sta[1]) & 0xffff);
2195 /* nretries??? */
2196 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff);
2197
2198 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2199 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries,
2200 txs->nsuccess, le32toh(sta[0]) & 0xffff);
2201 } else {
2202 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]);
2203
2204 if (wstat == &(sc->wcid_stats[0]) ||
2205 wstat > &(sc->wcid_stats[MTW_WCID_MAX]))
2206 goto fail;
2207
2208 txs->nretries = (*wstat)[MTW_RETRY];
2209 txs->nsuccess = (*wstat)[MTW_SUCCESS];
2210 txs->nframes = (*wstat)[MTW_TXCNT];
2211 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2212 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries,
2213 txs->nframes, txs->nsuccess);
2214
2215 memset(wstat, 0, sizeof(*wstat));
2216 }
2217
2218 ieee80211_ratectl_tx_update(vap, txs);
2219 ieee80211_ratectl_rate(ni, NULL, 0);
2220 txrate = ieee80211_node_get_txrate_dot11rate(ni);
2221
2222 /* XXX TODO: methodize with MCS rates */
2223 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
2224 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n",
2225 txrate);
2226 if (rt2860_rates[ridx].rate == txrate) {
2227 break;
2228 }
2229 }
2230 rn->amrr_ridx = ridx;
2231 fail:
2232 MTW_UNLOCK(sc);
2233
2234 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n",
2235 txrate, rn->amrr_ridx);
2236 }
2237
2238 static void
mtw_newassoc_cb(void * arg)2239 mtw_newassoc_cb(void *arg)
2240 {
2241 struct mtw_cmdq *cmdq = arg;
2242 struct ieee80211_node *ni = cmdq->arg1;
2243 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc;
2244
2245 uint8_t wcid = cmdq->wcid;
2246
2247 MTW_LOCK_ASSERT(sc, MA_OWNED);
2248
2249 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr,
2250 IEEE80211_ADDR_LEN);
2251
2252 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
2253 }
2254
2255 static void
mtw_newassoc(struct ieee80211_node * ni,int isnew)2256 mtw_newassoc(struct ieee80211_node *ni, int isnew)
2257 {
2258
2259 struct mtw_node *mn = MTW_NODE(ni);
2260 struct ieee80211vap *vap = ni->ni_vap;
2261 struct ieee80211com *ic = vap->iv_ic;
2262 struct mtw_softc *sc = ic->ic_softc;
2263
2264 uint8_t rate;
2265 uint8_t ridx;
2266 uint8_t wcid;
2267 //int i;
2268 // int i,j;
2269 wcid = MTW_AID2WCID(ni->ni_associd);
2270
2271 if (wcid > MTW_WCID_MAX) {
2272 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
2273 return;
2274 }
2275
2276 /* only interested in true associations */
2277 if (isnew && ni->ni_associd != 0) {
2278 /*
2279 * This function could is called though timeout function.
2280 * Need to deferggxr.
2281 */
2282
2283 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store);
2284 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt);
2285 sc->cmdq[cnt].func = mtw_newassoc_cb;
2286 sc->cmdq[cnt].arg0 = NULL;
2287 sc->cmdq[cnt].arg1 = ni;
2288 sc->cmdq[cnt].wcid = wcid;
2289 ieee80211_runtask(ic, &sc->cmdq_task);
2290 }
2291
2292 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
2293 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd,
2294 ether_sprintf(ni->ni_macaddr));
2295 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
2296 /* XXX TODO: methodize with MCS rates */
2297 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
2298 if (rt2860_rates[ridx].rate == rate)
2299 break;
2300 mn->mgt_ridx = ridx;
2301 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE,
2302 "rate=%d, ctl_ridx=%d\n", rate, ridx);
2303 MTW_LOCK(sc);
2304 if (sc->ratectl_run != MTW_RATECTL_OFF) {
2305 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc);
2306 }
2307 MTW_UNLOCK(sc);
2308
2309 }
2310
2311 /*
2312 * Return the Rx chain with the highest RSSI for a given frame.
2313 */
2314 static __inline uint8_t
mtw_maxrssi_chain(struct mtw_softc * sc,const struct mtw_rxwi * rxwi)2315 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi)
2316 {
2317 uint8_t rxchain = 0;
2318
2319 if (sc->nrxchains > 1) {
2320 if (rxwi->rssi[1] > rxwi->rssi[rxchain])
2321 rxchain = 1;
2322 if (sc->nrxchains > 2)
2323 if (rxwi->rssi[2] > rxwi->rssi[rxchain])
2324 rxchain = 2;
2325 }
2326 return (rxchain);
2327 }
2328 static void
mtw_get_tsf(struct mtw_softc * sc,uint64_t * buf)2329 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf)
2330 {
2331 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf));
2332 }
2333
2334 static void
mtw_recv_mgmt(struct ieee80211_node * ni,struct mbuf * m,int subtype,const struct ieee80211_rx_stats * rxs,int rssi,int nf)2335 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
2336 const struct ieee80211_rx_stats *rxs, int rssi, int nf)
2337 {
2338 struct ieee80211vap *vap = ni->ni_vap;
2339 struct mtw_softc *sc = vap->iv_ic->ic_softc;
2340 struct mtw_vap *rvp = MTW_VAP(vap);
2341 uint64_t ni_tstamp, rx_tstamp;
2342
2343 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf);
2344
2345 if (vap->iv_state == IEEE80211_S_RUN &&
2346 (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
2347 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
2348 ni_tstamp = le64toh(ni->ni_tstamp.tsf);
2349 MTW_LOCK(sc);
2350 mtw_get_tsf(sc, &rx_tstamp);
2351 MTW_UNLOCK(sc);
2352 rx_tstamp = le64toh(rx_tstamp);
2353
2354 if (ni_tstamp >= rx_tstamp) {
2355 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON,
2356 "ibss merge, tsf %ju tstamp %ju\n",
2357 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
2358 (void)ieee80211_ibss_merge(ni);
2359 }
2360 }
2361 }
2362 static void
mtw_rx_frame(struct mtw_softc * sc,struct mbuf * m,uint32_t dmalen)2363 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen)
2364 {
2365 struct ieee80211com *ic = &sc->sc_ic;
2366 struct ieee80211_frame *wh;
2367 struct ieee80211_node *ni;
2368 struct epoch_tracker et;
2369
2370 struct mtw_rxwi *rxwi;
2371 uint32_t flags;
2372 uint16_t len, rxwisize;
2373 uint8_t ant, rssi;
2374 int8_t nf;
2375
2376 rxwisize = sizeof(struct mtw_rxwi);
2377
2378 if (__predict_false(
2379 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) {
2380 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2381 "payload is too short: dma length %u < %zu\n", dmalen,
2382 rxwisize + sizeof(struct ieee80211_frame_ack));
2383 goto fail;
2384 }
2385
2386 rxwi = mtod(m, struct mtw_rxwi *);
2387 len = le16toh(rxwi->len) & 0xfff;
2388 flags = le32toh(rxwi->flags);
2389 if (__predict_false(len > dmalen - rxwisize)) {
2390 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n",
2391 len, dmalen);
2392 goto fail;
2393 }
2394
2395 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) {
2396 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n",
2397 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV");
2398 goto fail;
2399 }
2400
2401 if (flags & MTW_RX_L2PAD) {
2402 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2403 "received RT2860_RX_L2PAD frame\n");
2404 len += 2;
2405 }
2406
2407 m->m_data += rxwisize;
2408 m->m_pkthdr.len = m->m_len = len;
2409
2410 wh = mtod(m, struct ieee80211_frame *);
2411 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2412 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2413 m->m_flags |= M_WEP;
2414 }
2415
2416 if (len >= sizeof(struct ieee80211_frame_min)) {
2417 ni = ieee80211_find_rxnode(ic,
2418 mtod(m, struct ieee80211_frame_min *));
2419 } else
2420 ni = NULL;
2421
2422 if (ni && ni->ni_flags & IEEE80211_NODE_HT) {
2423 m->m_flags |= M_AMPDU;
2424 }
2425
2426 if (__predict_false(flags & MTW_RX_MICERR)) {
2427 /* report MIC failures to net80211 for TKIP */
2428 if (ni != NULL)
2429 ieee80211_notify_michael_failure(ni->ni_vap, wh,
2430 rxwi->keyidx);
2431 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2432 "MIC error. Someone is lying.\n");
2433 goto fail;
2434 }
2435
2436 ant = mtw_maxrssi_chain(sc, rxwi);
2437 rssi = rxwi->rssi[ant];
2438 nf = mtw_rssi2dbm(sc, rssi, ant);
2439
2440 if (__predict_false(ieee80211_radiotap_active(ic))) {
2441 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap;
2442 uint16_t phy;
2443
2444 tap->wr_flags = 0;
2445 if (flags & MTW_RX_L2PAD)
2446 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2447 tap->wr_antsignal = rssi;
2448 tap->wr_antenna = ant;
2449 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant);
2450 tap->wr_rate = 2; /* in case it can't be found below */
2451 //MTW_LOCK(sc);
2452
2453 // MTW_UNLOCK(sc);
2454 phy = le16toh(rxwi->phy);
2455 switch (phy >> MT7601_PHY_SHIFT) {
2456 case MTW_PHY_CCK:
2457 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) {
2458 case 0:
2459 tap->wr_rate = 2;
2460 break;
2461 case 1:
2462 tap->wr_rate = 4;
2463 break;
2464 case 2:
2465 tap->wr_rate = 11;
2466 break;
2467 case 3:
2468 tap->wr_rate = 22;
2469 break;
2470 }
2471 if (phy & MTW_PHY_SHPRE)
2472 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2473 break;
2474 case MTW_PHY_OFDM:
2475 switch (phy & MTW_PHY_MCS) {
2476 case 0:
2477 tap->wr_rate = 12;
2478 break;
2479 case 1:
2480 tap->wr_rate = 18;
2481 break;
2482 case 2:
2483 tap->wr_rate = 24;
2484 break;
2485 case 3:
2486 tap->wr_rate = 36;
2487 break;
2488 case 4:
2489 tap->wr_rate = 48;
2490 break;
2491 case 5:
2492 tap->wr_rate = 72;
2493 break;
2494 case 6:
2495 tap->wr_rate = 96;
2496 break;
2497 case 7:
2498 tap->wr_rate = 108;
2499 break;
2500 }
2501 break;
2502 }
2503 }
2504
2505 NET_EPOCH_ENTER(et);
2506 if (ni != NULL) {
2507 (void)ieee80211_input(ni, m, rssi, nf);
2508 ieee80211_free_node(ni);
2509 } else {
2510 (void)ieee80211_input_all(ic, m, rssi, nf);
2511 }
2512 NET_EPOCH_EXIT(et);
2513
2514 return;
2515
2516 fail:
2517 m_freem(m);
2518 counter_u64_add(ic->ic_ierrors, 1);
2519 }
2520
2521 static void
mtw_bulk_rx_callback(struct usb_xfer * xfer,usb_error_t error)2522 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
2523 {
2524 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2525 struct ieee80211com *ic = &sc->sc_ic;
2526 struct mbuf *m = NULL;
2527 struct mbuf *m0;
2528 uint32_t dmalen, mbuf_len;
2529 uint16_t rxwisize;
2530 int xferlen;
2531
2532 rxwisize = sizeof(struct mtw_rxwi);
2533
2534 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
2535
2536 switch (USB_GET_STATE(xfer)) {
2537 case USB_ST_TRANSFERRED:
2538 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n",
2539 xferlen);
2540 if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
2541 sizeof(struct mtw_rxd))) {
2542 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2543 "xfer too short %d %d\n", xferlen,
2544 (int)(sizeof(uint32_t) + rxwisize +
2545 sizeof(struct mtw_rxd)));
2546 goto tr_setup;
2547 }
2548
2549 m = sc->rx_m;
2550 sc->rx_m = NULL;
2551
2552 /* FALLTHROUGH */
2553 case USB_ST_SETUP:
2554 tr_setup:
2555
2556 if (sc->rx_m == NULL) {
2557 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2558 MTW_MAX_RXSZ);
2559 }
2560 if (sc->rx_m == NULL) {
2561 MTW_DPRINTF(sc,
2562 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC |
2563 MTW_DEBUG_USB,
2564 "could not allocate mbuf - idle with stall\n");
2565 counter_u64_add(ic->ic_ierrors, 1);
2566 usbd_xfer_set_stall(xfer);
2567 usbd_xfer_set_frames(xfer, 0);
2568 } else {
2569 /*
2570 * Directly loading a mbuf cluster into DMA to
2571 * save some data copying. This works because
2572 * there is only one cluster.
2573 */
2574 usbd_xfer_set_frame_data(xfer, 0,
2575 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ);
2576 usbd_xfer_set_frames(xfer, 1);
2577 }
2578 usbd_transfer_submit(xfer);
2579 break;
2580
2581 default: /* Error */
2582 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2583 "USB transfer error, %s\n", usbd_errstr(error));
2584
2585 if (error != USB_ERR_CANCELLED) {
2586 /* try to clear stall first */
2587 usbd_xfer_set_stall(xfer);
2588 if (error == USB_ERR_TIMEOUT)
2589 device_printf(sc->sc_dev, "device timeout %s\n",
2590 __func__);
2591 counter_u64_add(ic->ic_ierrors, 1);
2592 goto tr_setup;
2593 }
2594 if (sc->rx_m != NULL) {
2595 m_freem(sc->rx_m);
2596 sc->rx_m = NULL;
2597 }
2598 break;
2599 }
2600
2601 if (m == NULL)
2602 return;
2603
2604 /* inputting all the frames must be last */
2605
2606 MTW_UNLOCK(sc);
2607
2608 m->m_pkthdr.len = m->m_len = xferlen;
2609
2610 /* HW can aggregate multiple 802.11 frames in a single USB xfer */
2611 for (;;) {
2612 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
2613
2614 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
2615 ((dmalen & 3) != 0)) {
2616 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2617 "bad DMA length %u\n", dmalen);
2618 break;
2619 }
2620 if ((dmalen + 8) > (uint32_t)xferlen) {
2621 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2622 "bad DMA length %u > %d\n", dmalen + 8, xferlen);
2623 break;
2624 }
2625
2626 /* If it is the last one or a single frame, we won't copy. */
2627 if ((xferlen -= dmalen + 8) <= 8) {
2628 /* trim 32-bit DMA-len header */
2629 m->m_data += 4;
2630 m->m_pkthdr.len = m->m_len -= 4;
2631 mtw_rx_frame(sc, m, dmalen);
2632 m = NULL; /* don't free source buffer */
2633 break;
2634 }
2635
2636 mbuf_len = dmalen + sizeof(struct mtw_rxd);
2637 if (__predict_false(mbuf_len > MCLBYTES)) {
2638 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2639 "payload is too big: mbuf_len %u\n", mbuf_len);
2640 counter_u64_add(ic->ic_ierrors, 1);
2641 break;
2642 }
2643
2644 /* copy aggregated frames to another mbuf */
2645 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2646 if (__predict_false(m0 == NULL)) {
2647 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC,
2648 "could not allocate mbuf\n");
2649 counter_u64_add(ic->ic_ierrors, 1);
2650 break;
2651 }
2652 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len,
2653 mtod(m0, caddr_t));
2654 m0->m_pkthdr.len = m0->m_len = mbuf_len;
2655 mtw_rx_frame(sc, m0, dmalen);
2656
2657 /* update data ptr */
2658 m->m_data += mbuf_len + 4;
2659 m->m_pkthdr.len = m->m_len -= mbuf_len + 4;
2660 }
2661
2662 /* make sure we free the source buffer, if any */
2663 m_freem(m);
2664
2665 #ifdef IEEE80211_SUPPORT_SUPERG
2666 ieee80211_ff_age_all(ic, 100);
2667 #endif
2668 MTW_LOCK(sc);
2669 }
2670
2671 static void
mtw_tx_free(struct mtw_endpoint_queue * pq,struct mtw_tx_data * data,int txerr)2672 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr)
2673 {
2674
2675 ieee80211_tx_complete(data->ni, data->m, txerr);
2676 data->m = NULL;
2677 data->ni = NULL;
2678
2679 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
2680 pq->tx_nfree++;
2681 }
2682 static void
mtw_bulk_tx_callbackN(struct usb_xfer * xfer,usb_error_t error,u_int index)2683 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
2684 {
2685 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2686 struct ieee80211com *ic = &sc->sc_ic;
2687 struct mtw_tx_data *data;
2688 struct ieee80211vap *vap = NULL;
2689 struct usb_page_cache *pc;
2690 struct mtw_endpoint_queue *pq = &sc->sc_epq[index];
2691 struct mbuf *m;
2692 usb_frlength_t size;
2693 int actlen;
2694 int sumlen;
2695 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
2696
2697 switch (USB_GET_STATE(xfer)) {
2698 case USB_ST_TRANSFERRED:
2699 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2700 "transfer complete: %d bytes @ index %d\n", actlen, index);
2701
2702 data = usbd_xfer_get_priv(xfer);
2703 mtw_tx_free(pq, data, 0);
2704 usbd_xfer_set_priv(xfer, NULL);
2705
2706 /* FALLTHROUGH */
2707 case USB_ST_SETUP:
2708 tr_setup:
2709 data = STAILQ_FIRST(&pq->tx_qh);
2710 if (data == NULL)
2711 break;
2712
2713 STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
2714
2715 m = data->m;
2716
2717 size = sizeof(data->desc);
2718 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) {
2719 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB,
2720 "data overflow, %u bytes\n", m->m_pkthdr.len);
2721 mtw_tx_free(pq, data, 1);
2722 goto tr_setup;
2723 }
2724
2725 pc = usbd_xfer_get_frame(xfer, 0);
2726 usbd_copy_in(pc, 0, &data->desc, size);
2727 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
2728 size += m->m_pkthdr.len;
2729 /*
2730 * Align end on a 4-byte boundary, pad 8 bytes (CRC +
2731 * 4-byte padding), and be sure to zero those trailing
2732 * bytes:
2733 */
2734 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD);
2735 size += ((-size) & 3) + MTW_DMA_PAD;
2736
2737 vap = data->ni->ni_vap;
2738 if (ieee80211_radiotap_active_vap(vap)) {
2739 const struct ieee80211_frame *wh;
2740 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap;
2741 struct mtw_txwi *txwi =
2742 (struct mtw_txwi *)(&data->desc +
2743 sizeof(struct mtw_txd));
2744 int has_l2pad;
2745
2746 wh = mtod(m, struct ieee80211_frame *);
2747 has_l2pad = IEEE80211_HAS_ADDR4(wh) !=
2748 IEEE80211_QOS_HAS_SEQ(wh);
2749
2750 tap->wt_flags = 0;
2751 tap->wt_rate = rt2860_rates[data->ridx].rate;
2752 tap->wt_hwqueue = index;
2753 if (le16toh(txwi->phy) & MTW_PHY_SHPRE)
2754 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2755 if (has_l2pad)
2756 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2757
2758 ieee80211_radiotap_tx(vap, m);
2759 }
2760
2761 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2762 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len,
2763 size, index);
2764
2765 usbd_xfer_set_frame_len(xfer, 0, size);
2766 usbd_xfer_set_priv(xfer, data);
2767 usbd_transfer_submit(xfer);
2768 mtw_start(sc);
2769
2770 break;
2771
2772 default:
2773 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2774 "USB transfer error, %s\n", usbd_errstr(error));
2775
2776 data = usbd_xfer_get_priv(xfer);
2777
2778 if (data != NULL) {
2779 if (data->ni != NULL)
2780 vap = data->ni->ni_vap;
2781 mtw_tx_free(pq, data, error);
2782 usbd_xfer_set_priv(xfer, NULL);
2783 }
2784
2785 if (vap == NULL)
2786 vap = TAILQ_FIRST(&ic->ic_vaps);
2787
2788 if (error != USB_ERR_CANCELLED) {
2789 if (error == USB_ERR_TIMEOUT) {
2790 device_printf(sc->sc_dev, "device timeout %s\n",
2791 __func__);
2792 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
2793 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2794 "cmdq_store=%d\n", i);
2795 sc->cmdq[i].func = mtw_usb_timeout_cb;
2796 sc->cmdq[i].arg0 = vap;
2797 ieee80211_runtask(ic, &sc->cmdq_task);
2798 }
2799
2800 /*
2801 * Try to clear stall first, also if other
2802 * errors occur, hence clearing stall
2803 * introduces a 50 ms delay:
2804 */
2805 usbd_xfer_set_stall(xfer);
2806 goto tr_setup;
2807 }
2808 break;
2809 }
2810 #ifdef IEEE80211_SUPPORT_SUPERG
2811 /* XXX TODO: make this deferred rather than unlock/relock */
2812 /* XXX TODO: should only do the QoS AC this belongs to */
2813 if (pq->tx_nfree >= MTW_TX_RING_COUNT) {
2814 MTW_UNLOCK(sc);
2815 ieee80211_ff_flush_all(ic);
2816 MTW_LOCK(sc);
2817 }
2818 #endif
2819 }
2820
2821 static void
mtw_fw_callback(struct usb_xfer * xfer,usb_error_t error)2822 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
2823 {
2824 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2825
2826 int actlen;
2827 int ntries, tmp;
2828 // struct mtw_txd *data;
2829
2830 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
2831 // data = usbd_xfer_get_priv(xfer);
2832 usbd_xfer_set_priv(xfer, NULL);
2833 switch (USB_GET_STATE(xfer)) {
2834
2835 case USB_ST_TRANSFERRED:
2836 sc->sc_sent += actlen;
2837 memset(sc->txd_fw[sc->sc_idx], 0, actlen);
2838
2839 if (actlen < 0x2c44 && sc->sc_idx == 0) {
2840 return;
2841 }
2842 if (sc->sc_idx == 3) {
2843
2844 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1,
2845 MTW_MCU_IVB_LEN)) != 0) {
2846 device_printf(sc->sc_dev,
2847 "Could not write ivb error: %d\n", error);
2848 }
2849
2850 mtw_delay(sc, 10);
2851 for (ntries = 0; ntries < 100; ntries++) {
2852 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
2853 &tmp)) != 0) {
2854 device_printf(sc->sc_dev,
2855 "Could not read cfg error: %d\n", error);
2856
2857 }
2858 if (tmp == MTW_MCU_READY) {
2859 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE,
2860 "mcu reaady %d\n", tmp);
2861 sc->fwloading = 1;
2862 break;
2863 }
2864
2865 mtw_delay(sc, 10);
2866 }
2867 if (ntries == 100)
2868 sc->fwloading = 0;
2869 wakeup(&sc->fwloading);
2870 return;
2871 }
2872
2873 if (actlen == 0x2c44) {
2874 sc->sc_idx++;
2875 DELAY(1000);
2876 }
2877
2878 case USB_ST_SETUP: {
2879 int dlen = 0;
2880 dlen = sc->txd_fw[sc->sc_idx]->len;
2881
2882 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent);
2883 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16));
2884
2885 usbd_xfer_set_frame_len(xfer, 0, dlen);
2886 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen);
2887
2888 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]);
2889 usbd_transfer_submit(xfer);
2890 break;
2891
2892 default: /* Error */
2893 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__,
2894 usbd_errstr(error));
2895 sc->fwloading = 0;
2896 wakeup(&sc->fwloading);
2897 /*
2898 * Print error message and clear stall
2899 * for example.
2900 */
2901 break;
2902 }
2903 /*
2904 * Here it is safe to do something without the private
2905 * USB mutex locked.
2906 */
2907 }
2908 return;
2909 }
2910 static void
mtw_bulk_tx_callback0(struct usb_xfer * xfer,usb_error_t error)2911 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
2912 {
2913 mtw_bulk_tx_callbackN(xfer, error, 0);
2914 }
2915
2916 static void
mtw_bulk_tx_callback1(struct usb_xfer * xfer,usb_error_t error)2917 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
2918 {
2919
2920
2921 mtw_bulk_tx_callbackN(xfer, error, 1);
2922 }
2923
2924 static void
mtw_bulk_tx_callback2(struct usb_xfer * xfer,usb_error_t error)2925 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
2926 {
2927 mtw_bulk_tx_callbackN(xfer, error, 2);
2928 }
2929
2930 static void
mtw_bulk_tx_callback3(struct usb_xfer * xfer,usb_error_t error)2931 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
2932 {
2933 mtw_bulk_tx_callbackN(xfer, error, 3);
2934 }
2935
2936 static void
mtw_bulk_tx_callback4(struct usb_xfer * xfer,usb_error_t error)2937 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
2938 {
2939 mtw_bulk_tx_callbackN(xfer, error, 4);
2940 }
2941
2942 static void
mtw_bulk_tx_callback5(struct usb_xfer * xfer,usb_error_t error)2943 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
2944 {
2945 mtw_bulk_tx_callbackN(xfer, error, 5);
2946 }
2947
2948 static void
mtw_set_tx_desc(struct mtw_softc * sc,struct mtw_tx_data * data)2949 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data)
2950 {
2951 struct mbuf *m = data->m;
2952 struct ieee80211com *ic = &sc->sc_ic;
2953 struct ieee80211vap *vap = data->ni->ni_vap;
2954 struct ieee80211_frame *wh;
2955 struct mtw_txd *txd;
2956 struct mtw_txwi *txwi;
2957 uint16_t xferlen, txwisize;
2958 uint16_t mcs;
2959 uint8_t ridx = data->ridx;
2960 uint8_t pad;
2961
2962 /* get MCS code from rate index */
2963 mcs = rt2860_rates[ridx].mcs;
2964
2965 txwisize = sizeof(*txwi);
2966 xferlen = txwisize + m->m_pkthdr.len;
2967
2968 /* roundup to 32-bit alignment */
2969 xferlen = (xferlen + 3) & ~3;
2970
2971 txd = (struct mtw_txd *)&data->desc;
2972 txd->len = htole16(xferlen);
2973
2974 wh = mtod(m, struct ieee80211_frame *);
2975
2976 /*
2977 * Ether both are true or both are false, the header
2978 * are nicely aligned to 32-bit. So, no L2 padding.
2979 */
2980 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
2981 pad = 0;
2982 else
2983 pad = 2;
2984
2985 /* setup TX Wireless Information */
2986 txwi = (struct mtw_txwi *)(txd + 1);
2987 txwi->len = htole16(m->m_pkthdr.len - pad);
2988 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
2989 mcs |= MTW_PHY_CCK;
2990 if (ridx != MTW_RIDX_CCK1 &&
2991 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
2992 mcs |= MTW_PHY_SHPRE;
2993 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) {
2994 mcs |= MTW_PHY_OFDM;
2995 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) {
2996 /* XXX TODO: [adrian] set short preamble for MCS? */
2997 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */
2998 }
2999 txwi->phy = htole16(mcs);
3000
3001 /* check if RTS/CTS or CTS-to-self protection is required */
3002 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3003 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) ||
3004 ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3005 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) ||
3006 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
3007 rt2860_rates[ridx].phy == IEEE80211_T_HT)))
3008 txwi->txop |= MTW_TX_TXOP_HT;
3009 else
3010 txwi->txop |= MTW_TX_TXOP_BACKOFF;
3011
3012 }
3013
3014 /* This function must be called locked */
3015 static int
mtw_tx(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3016 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3017 {
3018 struct ieee80211com *ic = &sc->sc_ic;
3019 struct ieee80211vap *vap = ni->ni_vap;
3020 struct ieee80211_frame *wh;
3021
3022
3023 //const struct ieee80211_txparam *tp = ni->ni_txparms;
3024 struct mtw_node *rn = MTW_NODE(ni);
3025 struct mtw_tx_data *data;
3026 struct mtw_txd *txd;
3027 struct mtw_txwi *txwi;
3028 uint16_t qos;
3029 uint16_t dur;
3030 uint16_t qid;
3031 uint8_t type;
3032 uint8_t tid;
3033 uint16_t ridx;
3034 uint8_t ctl_ridx;
3035 uint16_t qflags;
3036 uint8_t xflags = 0;
3037
3038 int hasqos;
3039
3040 MTW_LOCK_ASSERT(sc, MA_OWNED);
3041
3042 wh = mtod(m, struct ieee80211_frame *);
3043 const struct ieee80211_txparam *tp = ni->ni_txparms;
3044 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3045
3046 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 |
3047 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA);
3048
3049 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
3050 uint8_t *frm;
3051 frm = ieee80211_getqos(wh);
3052
3053
3054 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm);
3055 qos = le16toh(*(const uint16_t *)frm);
3056 tid = ieee80211_gettid(wh);
3057 qid = TID_TO_WME_AC(tid);
3058 qflags |= MTW_TXD_QSEL_EDCA;
3059 } else {
3060 qos = 0;
3061 tid = 0;
3062 qid = WME_AC_BE;
3063 }
3064 if (type & IEEE80211_FC0_TYPE_MGT) {
3065 qid = 0;
3066 }
3067
3068 if (type != IEEE80211_FC0_TYPE_DATA)
3069 qflags |= htole16(MTW_TXD_WIV);
3070
3071 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3072 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
3073 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */
3074 ridx = (ic->ic_curmode == IEEE80211_MODE_11A
3075 || ic->ic_curmode == IEEE80211_MODE_11NA) ?
3076 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1;
3077 if (type == IEEE80211_MODE_11NG) {
3078 ridx = 12;
3079 }
3080 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3081 } else {
3082 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3083 ridx = rn->fix_ridx;
3084
3085 } else {
3086 ridx = rn->amrr_ridx;
3087 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3088 }
3089 }
3090
3091 if (hasqos)
3092 xflags = 0;
3093 else
3094 xflags = MTW_TX_NSEQ;
3095
3096 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3097 (!hasqos ||
3098 (qos & IEEE80211_QOS_ACKPOLICY) !=
3099 IEEE80211_QOS_ACKPOLICY_NOACK)) {
3100 xflags |= MTW_TX_ACK;
3101 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3102 dur = rt2860_rates[ctl_ridx].sp_ack_dur;
3103 else
3104 dur = rt2860_rates[ctl_ridx].lp_ack_dur;
3105 USETW(wh->i_dur, dur);
3106 }
3107 /* reserve slots for mgmt packets, just in case */
3108 if (sc->sc_epq[qid].tx_nfree < 3) {
3109 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid);
3110 return (-1);
3111 }
3112
3113 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
3114 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
3115 sc->sc_epq[qid].tx_nfree--;
3116
3117 txd = (struct mtw_txd *)&data->desc;
3118 txd->flags = qflags;
3119
3120 txwi = (struct mtw_txwi *)(txd + 1);
3121 txwi->xflags = xflags;
3122 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ?
3123
3124 MTW_AID2WCID(ni->ni_associd) :
3125 0xff;
3126
3127 /* clear leftover garbage bits */
3128 txwi->flags = 0;
3129 txwi->txop = 0;
3130
3131 data->m = m;
3132 data->ni = ni;
3133 data->ridx = ridx;
3134
3135 ieee80211_output_seqno_assign(ni, -1, m);
3136
3137 mtw_set_tx_desc(sc, data);
3138
3139 /*
3140 * The chip keeps track of 2 kind of Tx stats,
3141 * * TX_STAT_FIFO, for per WCID stats, and
3142 * * TX_STA_CNT0 for all-TX-in-one stats.
3143 *
3144 * To use FIFO stats, we need to store MCS into the driver-private
3145 * PacketID field. So that, we can tell whose stats when we read them.
3146 * We add 1 to the MCS because setting the PacketID field to 0 means
3147 * that we don't want feedback in TX_STAT_FIFO.
3148 * And, that's what we want for STA mode, since TX_STA_CNT0 does the
3149 * job.
3150 *
3151 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in
3152 * run_tx().
3153 */
3154
3155 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
3156 vap->iv_opmode == IEEE80211_M_MBSS) {
3157
3158 /*
3159 * Unlike PCI based devices, we don't get any interrupt from
3160 * USB devices, so we simulate FIFO-is-full interrupt here.
3161 * Ralink recommends to drain FIFO stats every 100 ms, but 16
3162 * slots quickly get fulled. To prevent overflow, increment a
3163 * counter on every FIFO stat request, so we know how many slots
3164 * are left. We do this only in HOSTAP or multiple vap mode
3165 * since FIFO stats are used only in those modes. We just drain
3166 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via
3167 * callout. Call it early. Otherwise overflow.
3168 */
3169 if (sc->fifo_cnt++ == 10) {
3170 /*
3171 * With multiple vaps or if_bridge, if_start() is called
3172 * with a non-sleepable lock, tcpinp. So, need to defer.
3173 */
3174 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
3175 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i);
3176 sc->cmdq[i].func = mtw_drain_fifo;
3177 sc->cmdq[i].arg0 = sc;
3178 ieee80211_runtask(ic, &sc->cmdq_task);
3179 }
3180 }
3181
3182 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
3183 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]);
3184
3185 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3186 "sending data frame len=%d rate=%d qid=%d\n",
3187 m->m_pkthdr.len +
3188 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3189 rt2860_rates[ridx].rate, qid);
3190
3191 return (0);
3192 }
3193
3194 static int
mtw_tx_mgt(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3195 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3196 {
3197 struct ieee80211com *ic = &sc->sc_ic;
3198 struct mtw_node *rn = MTW_NODE(ni);
3199 struct mtw_tx_data *data;
3200 struct ieee80211_frame *wh;
3201 struct mtw_txd *txd;
3202 struct mtw_txwi *txwi;
3203 uint8_t type;
3204 uint16_t dur;
3205 uint8_t ridx = rn->mgt_ridx;
3206 uint8_t xflags = 0;
3207 uint8_t wflags = 0;
3208
3209 MTW_LOCK_ASSERT(sc, MA_OWNED);
3210
3211 wh = mtod(m, struct ieee80211_frame *);
3212
3213 /* tell hardware to add timestamp for probe responses */
3214 if ((wh->i_fc[0] &
3215 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3216 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
3217 wflags |= MTW_TX_TS;
3218 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3219 xflags |= MTW_TX_ACK;
3220
3221 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
3222 ic->ic_flags & IEEE80211_F_SHPREAMBLE);
3223 USETW(wh->i_dur, dur);
3224 }
3225 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3226 if (sc->sc_epq[0].tx_nfree == 0)
3227 /* let caller free mbuf */
3228 return (EIO);
3229 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3230 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3231 sc->sc_epq[0].tx_nfree--;
3232
3233 txd = (struct mtw_txd *)&data->desc;
3234 txd->flags = htole16(
3235 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3236 if (type != IEEE80211_FC0_TYPE_DATA)
3237 txd->flags |= htole16(MTW_TXD_WIV);
3238
3239 txwi = (struct mtw_txwi *)(txd + 1);
3240 txwi->wcid = 0xff;
3241 txwi->xflags = xflags;
3242 txwi->flags = wflags;
3243
3244 txwi->txop = 0; /* clear leftover garbage bits */
3245
3246 data->m = m;
3247 data->ni = ni;
3248 data->ridx = ridx;
3249
3250 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
3251 m->m_pkthdr.len +
3252 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3253 rt2860_rates[ridx].rate);
3254
3255 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3256
3257 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]);
3258
3259 return (0);
3260 }
3261
3262 static int
mtw_sendprot(struct mtw_softc * sc,const struct mbuf * m,struct ieee80211_node * ni,int prot,int rate)3263 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m,
3264 struct ieee80211_node *ni, int prot, int rate)
3265 {
3266 struct ieee80211com *ic = ni->ni_ic;
3267 struct mtw_tx_data *data;
3268 struct mtw_txd *txd;
3269 struct mtw_txwi *txwi;
3270 struct mbuf *mprot;
3271 int ridx;
3272 int protrate;
3273 uint8_t wflags = 0;
3274 uint8_t xflags = 0;
3275
3276 MTW_LOCK_ASSERT(sc, MA_OWNED);
3277
3278 /* check that there are free slots before allocating the mbuf */
3279 if (sc->sc_epq[0].tx_nfree == 0)
3280 /* let caller free mbuf */
3281 return (ENOBUFS);
3282
3283 mprot = ieee80211_alloc_prot(ni, m, rate, prot);
3284 if (mprot == NULL) {
3285 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3286 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n");
3287 return (ENOBUFS);
3288 }
3289
3290 protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
3291 wflags = MTW_TX_FRAG;
3292 xflags = 0;
3293 if (prot == IEEE80211_PROT_RTSCTS)
3294 xflags |= MTW_TX_ACK;
3295
3296 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3297 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3298 sc->sc_epq[0].tx_nfree--;
3299
3300 txd = (struct mtw_txd *)&data->desc;
3301 txd->flags = RT2860_TX_QSEL_EDCA;
3302 txwi = (struct mtw_txwi *)(txd + 1);
3303 txwi->wcid = 0xff;
3304 txwi->flags = wflags;
3305 txwi->xflags = xflags;
3306 txwi->txop = 0; /* clear leftover garbage bits */
3307
3308 data->m = mprot;
3309 data->ni = ieee80211_ref_node(ni);
3310
3311 /* XXX TODO: methodize with MCS rates */
3312 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3313 if (rt2860_rates[ridx].rate == protrate)
3314 break;
3315 data->ridx = ridx;
3316
3317 mtw_set_tx_desc(sc, data);
3318 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n",
3319 m->m_pkthdr.len, rate);
3320
3321 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3322
3323 usbd_transfer_start(sc->sc_xfer[0]);
3324
3325 return (0);
3326 }
3327
3328 static int
mtw_tx_param(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni,const struct ieee80211_bpf_params * params)3329 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3330 const struct ieee80211_bpf_params *params)
3331 {
3332 struct ieee80211com *ic = ni->ni_ic;
3333 struct mtw_tx_data *data;
3334 struct mtw_txd *txd;
3335 struct mtw_txwi *txwi;
3336 uint8_t ridx;
3337 uint8_t rate;
3338 uint8_t opflags = 0;
3339 uint8_t xflags = 0;
3340 int error;
3341
3342 MTW_LOCK_ASSERT(sc, MA_OWNED);
3343
3344 KASSERT(params != NULL, ("no raw xmit params"));
3345
3346 rate = params->ibp_rate0;
3347 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3348 /* let caller free mbuf */
3349 return (EINVAL);
3350 }
3351
3352 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3353 xflags |= MTW_TX_ACK;
3354 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) {
3355 error = mtw_sendprot(sc, m, ni,
3356 params->ibp_flags & IEEE80211_BPF_RTS ?
3357 IEEE80211_PROT_RTSCTS :
3358 IEEE80211_PROT_CTSONLY,
3359 rate);
3360 if (error) {
3361 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__,
3362 __LINE__, error);
3363 return (error);
3364 }
3365 opflags |= MTW_TX_TXOP_SIFS;
3366 }
3367
3368 if (sc->sc_epq[0].tx_nfree == 0) {
3369 /* let caller free mbuf */
3370 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3371 "sending raw frame, but tx ring is full\n");
3372 return (EIO);
3373 }
3374 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3375 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3376 sc->sc_epq[0].tx_nfree--;
3377
3378 txd = (struct mtw_txd *)&data->desc;
3379 txd->flags = htole16(
3380 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3381 // txd->flags = htole16(MTW_TXD_QSEL_EDCA);
3382 txwi = (struct mtw_txwi *)(txd + 1);
3383 txwi->wcid = 0xff;
3384 txwi->xflags = xflags;
3385 txwi->txop = opflags;
3386 txwi->flags = 0; /* clear leftover garbage bits */
3387
3388 data->m = m;
3389 data->ni = ni;
3390 /* XXX TODO: methodize with MCS rates */
3391 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3392 if (rt2860_rates[ridx].rate == rate)
3393 break;
3394 data->ridx = ridx;
3395
3396 ieee80211_output_seqno_assign(ni, -1, m);
3397
3398 mtw_set_tx_desc(sc, data);
3399
3400 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
3401 m->m_pkthdr.len, rate);
3402
3403 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3404
3405 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]);
3406
3407 return (0);
3408 }
3409
3410 static int
mtw_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3411 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3412 const struct ieee80211_bpf_params *params)
3413 {
3414 struct mtw_softc *sc = ni->ni_ic->ic_softc;
3415 int error = 0;
3416 MTW_LOCK(sc);
3417 /* prevent management frames from being sent if we're not ready */
3418 if (!(sc->sc_flags & MTW_RUNNING)) {
3419 error = ENETDOWN;
3420 goto done;
3421 }
3422
3423 if (params == NULL) {
3424 /* tx mgt packet */
3425 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) {
3426 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n");
3427 goto done;
3428 }
3429 } else {
3430 /* tx raw packet with param */
3431 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) {
3432 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3433 "tx with param failed\n");
3434 goto done;
3435 }
3436 }
3437
3438 done:
3439
3440 MTW_UNLOCK(sc);
3441
3442 if (error != 0) {
3443 if (m != NULL)
3444 m_freem(m);
3445 }
3446
3447 return (error);
3448 }
3449
3450 static int
mtw_transmit(struct ieee80211com * ic,struct mbuf * m)3451 mtw_transmit(struct ieee80211com *ic, struct mbuf *m)
3452 {
3453 struct mtw_softc *sc = ic->ic_softc;
3454 int error;
3455 MTW_LOCK(sc);
3456 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3457 MTW_UNLOCK(sc);
3458 return (ENXIO);
3459 }
3460 error = mbufq_enqueue(&sc->sc_snd, m);
3461 if (error) {
3462 MTW_UNLOCK(sc);
3463 return (error);
3464 }
3465 mtw_start(sc);
3466 MTW_UNLOCK(sc);
3467
3468 return (0);
3469 }
3470
3471 static void
mtw_start(struct mtw_softc * sc)3472 mtw_start(struct mtw_softc *sc)
3473 {
3474 struct ieee80211_node *ni;
3475 struct mbuf *m;
3476
3477 MTW_LOCK_ASSERT(sc, MA_OWNED);
3478
3479 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3480
3481 return;
3482 }
3483 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3484 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3485 if (mtw_tx(sc, m, ni) != 0) {
3486 mbufq_prepend(&sc->sc_snd, m);
3487 break;
3488 }
3489 }
3490 }
3491
3492 static void
mtw_parent(struct ieee80211com * ic)3493 mtw_parent(struct ieee80211com *ic)
3494 {
3495
3496 struct mtw_softc *sc = ic->ic_softc;
3497
3498 MTW_LOCK(sc);
3499 if (sc->sc_detached) {
3500 MTW_UNLOCK(sc);
3501 return;
3502 }
3503
3504 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3505 mtw_init_locked(sc);
3506 MTW_UNLOCK(sc);
3507 ieee80211_start_all(ic);
3508 return;
3509 }
3510 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3511 mtw_update_promisc_locked(sc);
3512 MTW_UNLOCK(sc);
3513 return;
3514 }
3515 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 &&
3516 ic->ic_nrunning == 0) {
3517 mtw_stop(sc);
3518 MTW_UNLOCK(sc);
3519 return;
3520 }
3521 return;
3522 }
3523
3524 static void
mt7601_set_agc(struct mtw_softc * sc,uint8_t agc)3525 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc)
3526 {
3527 uint8_t bbp;
3528
3529 mtw_bbp_write(sc, 66, agc);
3530 mtw_bbp_write(sc, 195, 0x87);
3531 bbp = (agc & 0xf0) | 0x08;
3532 mtw_bbp_write(sc, 196, bbp);
3533 }
3534
3535 static int
mtw_mcu_calibrate(struct mtw_softc * sc,int func,uint32_t val)3536 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val)
3537 {
3538 struct mtw_mcu_cmd_8 cmd;
3539
3540 cmd.func = htole32(func);
3541 cmd.val = htole32(val);
3542 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8)));
3543 }
3544
3545 static int
mtw_rf_write(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t val)3546 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val)
3547 {
3548 uint32_t tmp;
3549 int error, ntries, shift;
3550
3551 for (ntries = 0; ntries < 10; ntries++) {
3552 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3553 return (error);
3554 if (!(tmp & MTW_RF_CSR_KICK))
3555 break;
3556 }
3557 if (ntries == 10)
3558 return (ETIMEDOUT);
3559
3560 if (sc->asic_ver == 0x7601)
3561 shift = MT7601_BANK_SHIFT;
3562 else
3563 shift = MT7610_BANK_SHIFT;
3564
3565 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift |
3566 reg << 8 | val;
3567 return (mtw_write(sc, MTW_RF_CSR, tmp));
3568 }
3569
3570 void
mtw_select_chan_group(struct mtw_softc * sc,int group)3571 mtw_select_chan_group(struct mtw_softc *sc, int group)
3572 {
3573 uint32_t tmp;
3574 uint8_t bbp;
3575
3576 /* Tx band 20MHz 2G */
3577 mtw_read(sc, MTW_TX_BAND_CFG, &tmp);
3578 tmp &= ~(
3579 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M);
3580 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G;
3581 mtw_write(sc, MTW_TX_BAND_CFG, tmp);
3582
3583 /* select 20 MHz bandwidth */
3584 mtw_bbp_read(sc, 4, &bbp);
3585 bbp &= ~0x18;
3586 bbp |= 0x40;
3587 mtw_bbp_write(sc, 4, bbp);
3588
3589 /* calibrate BBP */
3590 mtw_bbp_write(sc, 69, 0x12);
3591 mtw_bbp_write(sc, 91, 0x07);
3592 mtw_bbp_write(sc, 195, 0x23);
3593 mtw_bbp_write(sc, 196, 0x17);
3594 mtw_bbp_write(sc, 195, 0x24);
3595 mtw_bbp_write(sc, 196, 0x06);
3596 mtw_bbp_write(sc, 195, 0x81);
3597 mtw_bbp_write(sc, 196, 0x12);
3598 mtw_bbp_write(sc, 195, 0x83);
3599 mtw_bbp_write(sc, 196, 0x17);
3600 mtw_rf_write(sc, 5, 8, 0x00);
3601 // mtw_mcu_calibrate(sc, 0x6, 0x10001);
3602
3603 /* set initial AGC value */
3604 mt7601_set_agc(sc, 0x14);
3605 }
3606
3607 static int
mtw_rf_read(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t * val)3608 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val)
3609 {
3610 uint32_t tmp;
3611 int error, ntries, shift;
3612
3613 for (ntries = 0; ntries < 100; ntries++) {
3614 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3615 return (error);
3616 if (!(tmp & MTW_RF_CSR_KICK))
3617 break;
3618 }
3619 if (ntries == 100)
3620 return (ETIMEDOUT);
3621
3622 if (sc->asic_ver == 0x7601)
3623 shift = MT7601_BANK_SHIFT;
3624 else
3625 shift = MT7610_BANK_SHIFT;
3626
3627 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8;
3628 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0)
3629 return (error);
3630
3631 for (ntries = 0; ntries < 100; ntries++) {
3632 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3633 return (error);
3634 if (!(tmp & MTW_RF_CSR_KICK))
3635 break;
3636 }
3637 if (ntries == 100)
3638 return (ETIMEDOUT);
3639
3640 *val = tmp & 0xff;
3641 return (0);
3642 }
3643 static void
mt7601_set_chan(struct mtw_softc * sc,u_int chan)3644 mt7601_set_chan(struct mtw_softc *sc, u_int chan)
3645 {
3646 uint32_t tmp;
3647 uint8_t bbp, rf, txpow1;
3648 int i;
3649 /* find the settings for this channel */
3650 for (i = 0; mt7601_rf_chan[i].chan != chan; i++)
3651 ;
3652
3653 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17);
3654 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18);
3655 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19);
3656 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20);
3657
3658 /* use Tx power values from EEPROM */
3659 txpow1 = sc->txpow1[i];
3660
3661 /* Tx automatic level control */
3662 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp);
3663 tmp &= ~0x3f3f;
3664 tmp |= (txpow1 & 0x3f);
3665 mtw_write(sc, MTW_TX_ALC_CFG0, tmp);
3666
3667 /* LNA */
3668 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]);
3669 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]);
3670 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]);
3671
3672 /* VCO calibration */
3673 mtw_rf_write(sc, 0, 4, 0x0a);
3674 mtw_rf_write(sc, 0, 5, 0x20);
3675 mtw_rf_read(sc, 0, 4, &rf);
3676 mtw_rf_write(sc, 0, 4, rf | 0x80);
3677
3678 /* select 20 MHz bandwidth */
3679 mtw_bbp_read(sc, 4, &bbp);
3680 bbp &= ~0x18;
3681 bbp |= 0x40;
3682 mtw_bbp_write(sc, 4, bbp);
3683 mtw_bbp_write(sc, 178, 0xff);
3684 }
3685
3686 static int
mtw_set_chan(struct mtw_softc * sc,struct ieee80211_channel * c)3687 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c)
3688 {
3689 struct ieee80211com *ic = &sc->sc_ic;
3690 u_int chan, group;
3691
3692 chan = ieee80211_chan2ieee(ic, c);
3693 if (chan == 0 || chan == IEEE80211_CHAN_ANY)
3694 return (EINVAL);
3695
3696 /* determine channel group */
3697 if (chan <= 14)
3698 group = 0;
3699 else if (chan <= 64)
3700 group = 1;
3701 else if (chan <= 128)
3702 group = 2;
3703 else
3704 group = 3;
3705
3706 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated)
3707 mtw_select_chan_group(sc, group);
3708
3709 sc->sc_chan_group = group;
3710
3711 /* chipset specific */
3712 if (sc->asic_ver == 0x7601)
3713 mt7601_set_chan(sc, chan);
3714
3715 DELAY(1000);
3716 return (0);
3717 }
3718
3719 static void
mtw_set_channel(struct ieee80211com * ic)3720 mtw_set_channel(struct ieee80211com *ic)
3721 {
3722 struct mtw_softc *sc = ic->ic_softc;
3723
3724 MTW_LOCK(sc);
3725 mtw_set_chan(sc, ic->ic_curchan);
3726 MTW_UNLOCK(sc);
3727
3728 return;
3729 }
3730
3731 static void
mtw_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3732 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans,
3733 struct ieee80211_channel chans[])
3734 {
3735 // struct mtw_softc *sc = ic->ic_softc;
3736 uint8_t bands[IEEE80211_MODE_BYTES];
3737
3738 memset(bands, 0, sizeof(bands));
3739 setbit(bands, IEEE80211_MODE_11B);
3740 setbit(bands, IEEE80211_MODE_11G);
3741 setbit(bands, IEEE80211_MODE_11NG);
3742
3743 /* Note: for now, only support HT20 channels */
3744 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0);
3745 }
3746
3747 static void
mtw_scan_start(struct ieee80211com * ic)3748 mtw_scan_start(struct ieee80211com *ic)
3749 {
3750 struct mtw_softc *sc = ic->ic_softc;
3751 MTW_LOCK(sc);
3752 /* abort TSF synchronization */
3753 mtw_abort_tsf_sync(sc);
3754 mtw_set_bssid(sc, ieee80211broadcastaddr);
3755
3756 MTW_UNLOCK(sc);
3757
3758 return;
3759 }
3760
3761 static void
mtw_scan_end(struct ieee80211com * ic)3762 mtw_scan_end(struct ieee80211com *ic)
3763 {
3764 struct mtw_softc *sc = ic->ic_softc;
3765
3766 MTW_LOCK(sc);
3767
3768 mtw_enable_tsf_sync(sc);
3769 mtw_set_bssid(sc, sc->sc_bssid);
3770
3771 MTW_UNLOCK(sc);
3772
3773 return;
3774 }
3775
3776 /*
3777 * Could be called from ieee80211_node_timeout()
3778 * (non-sleepable thread)
3779 */
3780 static void
mtw_update_beacon(struct ieee80211vap * vap,int item)3781 mtw_update_beacon(struct ieee80211vap *vap, int item)
3782 {
3783 struct ieee80211com *ic = vap->iv_ic;
3784 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
3785 struct ieee80211_node *ni = vap->iv_bss;
3786 struct mtw_softc *sc = ic->ic_softc;
3787 struct mtw_vap *rvp = MTW_VAP(vap);
3788 int mcast = 0;
3789 uint32_t i;
3790
3791 switch (item) {
3792 case IEEE80211_BEACON_ERP:
3793 mtw_updateslot(ic);
3794 break;
3795 case IEEE80211_BEACON_HTINFO:
3796 mtw_updateprot(ic);
3797 break;
3798 case IEEE80211_BEACON_TIM:
3799 mcast = 1; /*TODO*/
3800 break;
3801 default:
3802 break;
3803 }
3804
3805 setbit(bo->bo_flags, item);
3806 if (rvp->beacon_mbuf == NULL) {
3807 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3808 if (rvp->beacon_mbuf == NULL)
3809 return;
3810 }
3811 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast);
3812
3813 i = MTW_CMDQ_GET(&sc->cmdq_store);
3814 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
3815 sc->cmdq[i].func = mtw_update_beacon_cb;
3816 sc->cmdq[i].arg0 = vap;
3817 ieee80211_runtask(ic, &sc->cmdq_task);
3818
3819 return;
3820 }
3821
3822 static void
mtw_update_beacon_cb(void * arg)3823 mtw_update_beacon_cb(void *arg)
3824 {
3825
3826 struct ieee80211vap *vap = arg;
3827 struct ieee80211_node *ni = vap->iv_bss;
3828 struct mtw_vap *rvp = MTW_VAP(vap);
3829 struct ieee80211com *ic = vap->iv_ic;
3830 struct mtw_softc *sc = ic->ic_softc;
3831 struct mtw_txwi txwi;
3832 struct mbuf *m;
3833 uint16_t txwisize;
3834 uint8_t ridx;
3835 if (ni->ni_chan == IEEE80211_CHAN_ANYC)
3836 return;
3837 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
3838 return;
3839
3840 /*
3841 * No need to call ieee80211_beacon_update(), mtw_update_beacon()
3842 * is taking care of appropriate calls.
3843 */
3844 if (rvp->beacon_mbuf == NULL) {
3845 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3846 if (rvp->beacon_mbuf == NULL)
3847 return;
3848 }
3849 m = rvp->beacon_mbuf;
3850
3851 memset(&txwi, 0, sizeof(txwi));
3852 txwi.wcid = 0xff;
3853 txwi.len = htole16(m->m_pkthdr.len);
3854
3855 /* send beacons at the lowest available rate */
3856 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 :
3857 MTW_RIDX_CCK1;
3858 txwi.phy = htole16(rt2860_rates[ridx].mcs);
3859 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
3860 txwi.phy |= htole16(MTW_PHY_OFDM);
3861 txwi.txop = MTW_TX_TXOP_HT;
3862 txwi.flags = MTW_TX_TS;
3863 txwi.xflags = MTW_TX_NSEQ;
3864
3865 txwisize = sizeof(txwi);
3866 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize);
3867 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *),
3868 (m->m_pkthdr.len + 1) & ~1);
3869 }
3870
3871 static void
mtw_updateprot(struct ieee80211com * ic)3872 mtw_updateprot(struct ieee80211com *ic)
3873 {
3874 struct mtw_softc *sc = ic->ic_softc;
3875 uint32_t i;
3876
3877 i = MTW_CMDQ_GET(&sc->cmdq_store);
3878 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i);
3879 sc->cmdq[i].func = mtw_updateprot_cb;
3880 sc->cmdq[i].arg0 = ic;
3881 ieee80211_runtask(ic, &sc->cmdq_task);
3882 }
3883
3884 static void
mtw_updateprot_cb(void * arg)3885 mtw_updateprot_cb(void *arg)
3886 {
3887
3888 struct ieee80211com *ic = arg;
3889 struct mtw_softc *sc = ic->ic_softc;
3890 uint32_t tmp;
3891
3892 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
3893 /* setup protection frame rate (MCS code) */
3894 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
3895 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM :
3896 rt2860_rates[MTW_RIDX_CCK11].mcs;
3897
3898 /* CCK frames don't require protection */
3899 mtw_write(sc, MTW_CCK_PROT_CFG, tmp);
3900 if (ic->ic_flags & IEEE80211_F_USEPROT) {
3901 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3902 tmp |= RT2860_PROT_CTRL_RTS_CTS;
3903 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3904 tmp |= RT2860_PROT_CTRL_CTS;
3905 }
3906 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp);
3907 }
3908
3909 static void
mtw_usb_timeout_cb(void * arg)3910 mtw_usb_timeout_cb(void *arg)
3911 {
3912 struct ieee80211vap *vap = arg;
3913 struct mtw_softc *sc = vap->iv_ic->ic_softc;
3914
3915 MTW_LOCK_ASSERT(sc, MA_OWNED);
3916
3917 if (vap->iv_state == IEEE80211_S_SCAN) {
3918 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3919 "timeout caused by scan\n");
3920 /* cancel bgscan */
3921 ieee80211_cancel_scan(vap);
3922 } else {
3923 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3924 "timeout by unknown cause\n");
3925 }
3926 }
mtw_reset(struct mtw_softc * sc)3927 static int mtw_reset(struct mtw_softc *sc)
3928 {
3929
3930 usb_device_request_t req;
3931 uint16_t tmp;
3932 uint16_t actlen;
3933
3934 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
3935 req.bRequest = MTW_RESET;
3936 USETW(req.wValue, 1);
3937 USETW(req.wIndex, 0);
3938 USETW(req.wLength, 0);
3939 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
3940 &req, &tmp, 0, &actlen, 1000));
3941
3942 }
3943
3944
3945 static void
mtw_update_promisc_locked(struct mtw_softc * sc)3946 mtw_update_promisc_locked(struct mtw_softc *sc)
3947 {
3948
3949 uint32_t tmp;
3950
3951 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp);
3952
3953 tmp |= MTW_DROP_UC_NOME;
3954 if (sc->sc_ic.ic_promisc > 0)
3955 tmp &= ~MTW_DROP_UC_NOME;
3956
3957 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
3958
3959 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n",
3960 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving");
3961 }
3962
3963 static void
mtw_update_promisc(struct ieee80211com * ic)3964 mtw_update_promisc(struct ieee80211com *ic)
3965 {
3966 struct mtw_softc *sc = ic->ic_softc;
3967
3968 if ((sc->sc_flags & MTW_RUNNING) == 0)
3969 return;
3970
3971 MTW_LOCK(sc);
3972 mtw_update_promisc_locked(sc);
3973 MTW_UNLOCK(sc);
3974 }
3975
3976 static void
mtw_enable_tsf_sync(struct mtw_softc * sc)3977 mtw_enable_tsf_sync(struct mtw_softc *sc)
3978 {
3979 struct ieee80211com *ic = &sc->sc_ic;
3980 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3981 uint32_t tmp;
3982 int error;
3983 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
3984 tmp &= ~0x1fffff;
3985 tmp |= vap->iv_bss->ni_intval * 16;
3986 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN;
3987
3988 /* local TSF is always updated with remote TSF on beacon reception */
3989 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT;
3990 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
3991 if (error != 0) {
3992 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n",
3993 error);
3994 }
3995 return;
3996 }
3997
3998 static void
mtw_enable_mrr(struct mtw_softc * sc)3999 mtw_enable_mrr(struct mtw_softc *sc)
4000 {
4001 #define CCK(mcs) (mcs)
4002
4003 #define OFDM(mcs) (1 << 3 | (mcs))
4004 mtw_write(sc, MTW_LG_FBK_CFG0,
4005 OFDM(6) << 28 | /* 54->48 */
4006 OFDM(5) << 24 | /* 48->36 */
4007 OFDM(4) << 20 | /* 36->24 */
4008 OFDM(3) << 16 | /* 24->18 */
4009 OFDM(2) << 12 | /* 18->12 */
4010 OFDM(1) << 8 | /* 12-> 9 */
4011 OFDM(0) << 4 | /* 9-> 6 */
4012 OFDM(0)); /* 6-> 6 */
4013
4014 mtw_write(sc, MTW_LG_FBK_CFG1,
4015 CCK(2) << 12 | /* 11->5.5 */
4016 CCK(1) << 8 | /* 5.5-> 2 */
4017 CCK(0) << 4 | /* 2-> 1 */
4018 CCK(0)); /* 1-> 1 */
4019 #undef OFDM
4020 #undef CCK
4021 }
4022
4023 static void
mtw_set_txpreamble(struct mtw_softc * sc)4024 mtw_set_txpreamble(struct mtw_softc *sc)
4025 {
4026 struct ieee80211com *ic = &sc->sc_ic;
4027 uint32_t tmp;
4028
4029 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp);
4030 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4031 tmp |= MTW_CCK_SHORT_EN;
4032 else
4033 tmp &= ~MTW_CCK_SHORT_EN;
4034 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp);
4035 }
4036
4037 static void
mtw_set_basicrates(struct mtw_softc * sc)4038 mtw_set_basicrates(struct mtw_softc *sc)
4039 {
4040 struct ieee80211com *ic = &sc->sc_ic;
4041
4042 /* set basic rates mask */
4043 if (ic->ic_curmode == IEEE80211_MODE_11B)
4044 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003);
4045 else if (ic->ic_curmode == IEEE80211_MODE_11A)
4046 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150);
4047 else /* 11g */
4048 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f);
4049 }
4050
4051 static void
mtw_set_bssid(struct mtw_softc * sc,const uint8_t * bssid)4052 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid)
4053 {
4054 mtw_write(sc, MTW_MAC_BSSID_DW0,
4055 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
4056 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8);
4057 }
4058
4059 static void
mtw_set_macaddr(struct mtw_softc * sc,const uint8_t * addr)4060 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr)
4061 {
4062 mtw_write(sc, MTW_MAC_ADDR_DW0,
4063 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4064 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16);
4065 }
4066
4067 static void
mtw_updateslot(struct ieee80211com * ic)4068 mtw_updateslot(struct ieee80211com *ic)
4069 {
4070
4071 struct mtw_softc *sc = ic->ic_softc;
4072 uint32_t i;
4073
4074 i = MTW_CMDQ_GET(&sc->cmdq_store);
4075 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
4076 sc->cmdq[i].func = mtw_updateslot_cb;
4077 sc->cmdq[i].arg0 = ic;
4078 ieee80211_runtask(ic, &sc->cmdq_task);
4079
4080 return;
4081 }
4082
4083 /* ARGSUSED */
4084 static void
mtw_updateslot_cb(void * arg)4085 mtw_updateslot_cb(void *arg)
4086 {
4087 struct ieee80211com *ic = arg;
4088 struct mtw_softc *sc = ic->ic_softc;
4089 uint32_t tmp;
4090 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp);
4091 tmp &= ~0xff;
4092 tmp |= IEEE80211_GET_SLOTTIME(ic);
4093 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp);
4094 }
4095
4096 static void
mtw_update_mcast(struct ieee80211com * ic)4097 mtw_update_mcast(struct ieee80211com *ic)
4098 {
4099 }
4100
4101 static int8_t
mtw_rssi2dbm(struct mtw_softc * sc,uint8_t rssi,uint8_t rxchain)4102 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain)
4103 {
4104 struct ieee80211com *ic = &sc->sc_ic;
4105 struct ieee80211_channel *c = ic->ic_curchan;
4106 int delta;
4107
4108 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4109 u_int chan = ieee80211_chan2ieee(ic, c);
4110 delta = sc->rssi_5ghz[rxchain];
4111
4112 /* determine channel group */
4113 if (chan <= 64)
4114 delta -= sc->lna[1];
4115 else if (chan <= 128)
4116 delta -= sc->lna[2];
4117 else
4118 delta -= sc->lna[3];
4119 } else
4120 delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
4121
4122 return (-12 - delta - rssi);
4123 }
4124 static int
mt7601_bbp_init(struct mtw_softc * sc)4125 mt7601_bbp_init(struct mtw_softc *sc)
4126 {
4127 uint8_t bbp;
4128 int i, error, ntries;
4129
4130 /* wait for BBP to wake up */
4131 for (ntries = 0; ntries < 20; ntries++) {
4132 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0)
4133 return (error);
4134 if (bbp != 0 && bbp != 0xff)
4135 break;
4136 }
4137
4138 if (ntries == 20)
4139 return (ETIMEDOUT);
4140
4141 mtw_bbp_read(sc, 3, &bbp);
4142 mtw_bbp_write(sc, 3, 0);
4143 mtw_bbp_read(sc, 105, &bbp);
4144 mtw_bbp_write(sc, 105, 0);
4145
4146 /* initialize BBP registers to default values */
4147 for (i = 0; i < nitems(mt7601_def_bbp); i++) {
4148 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg,
4149 mt7601_def_bbp[i].val)) != 0)
4150 return (error);
4151 }
4152
4153 sc->sc_bw_calibrated = 0;
4154
4155 return (0);
4156 }
4157
4158 static int
mt7601_rf_init(struct mtw_softc * sc)4159 mt7601_rf_init(struct mtw_softc *sc)
4160 {
4161 int i, error;
4162
4163 /* RF bank 0 */
4164 for (i = 0; i < nitems(mt7601_rf_bank0); i++) {
4165 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg,
4166 mt7601_rf_bank0[i].val);
4167 if (error != 0)
4168 return (error);
4169 }
4170 /* RF bank 4 */
4171 for (i = 0; i < nitems(mt7601_rf_bank4); i++) {
4172 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg,
4173 mt7601_rf_bank4[i].val);
4174 if (error != 0)
4175 return (error);
4176 }
4177 /* RF bank 5 */
4178 for (i = 0; i < nitems(mt7601_rf_bank5); i++) {
4179 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg,
4180 mt7601_rf_bank5[i].val);
4181 if (error != 0)
4182 return (error);
4183 }
4184 return (0);
4185 }
4186
4187 static int
mtw_txrx_enable(struct mtw_softc * sc)4188 mtw_txrx_enable(struct mtw_softc *sc)
4189 {
4190 struct ieee80211com *ic = &sc->sc_ic;
4191 uint32_t tmp;
4192 int error, ntries;
4193 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN);
4194 for (ntries = 0; ntries < 200; ntries++) {
4195 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) {
4196 return (error);
4197 }
4198 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4199 break;
4200 mtw_delay(sc, 50);
4201 }
4202 if (ntries == 200) {
4203 return (ETIMEDOUT);
4204 }
4205
4206 DELAY(50);
4207
4208 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE;
4209 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4210
4211 /* enable Rx bulk aggregation (set timeout and limit) */
4212 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4213 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2);
4214 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4215
4216 /* set Rx filter */
4217 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR;
4218 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
4219 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS |
4220 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR |
4221 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND;
4222 if (ic->ic_opmode == IEEE80211_M_STA)
4223 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL;
4224 }
4225 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
4226
4227 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4228 return (0);
4229 }
4230 static int
mt7601_rxdc_cal(struct mtw_softc * sc)4231 mt7601_rxdc_cal(struct mtw_softc *sc)
4232 {
4233 uint32_t tmp;
4234 uint8_t bbp;
4235 int ntries;
4236
4237 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4238 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN);
4239 mtw_bbp_write(sc, 158, 0x8d);
4240 mtw_bbp_write(sc, 159, 0xfc);
4241 mtw_bbp_write(sc, 158, 0x8c);
4242 mtw_bbp_write(sc, 159, 0x4c);
4243
4244 for (ntries = 0; ntries < 20; ntries++) {
4245 DELAY(300);
4246 mtw_bbp_write(sc, 158, 0x8c);
4247 mtw_bbp_read(sc, 159, &bbp);
4248 if (bbp == 0x0c)
4249 break;
4250 }
4251
4252 if (ntries == 20)
4253 return (ETIMEDOUT);
4254
4255 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4256 mtw_bbp_write(sc, 158, 0x8d);
4257 mtw_bbp_write(sc, 159, 0xe0);
4258 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4259 return (0);
4260 }
4261
4262 static int
mt7601_r49_read(struct mtw_softc * sc,uint8_t flag,int8_t * val)4263 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val)
4264 {
4265 uint8_t bbp;
4266
4267 mtw_bbp_read(sc, 47, &bbp);
4268 bbp = 0x90;
4269 mtw_bbp_write(sc, 47, bbp);
4270 bbp &= ~0x0f;
4271 bbp |= flag;
4272 mtw_bbp_write(sc, 47, bbp);
4273 return (mtw_bbp_read(sc, 49, val));
4274 }
4275
4276 static int
mt7601_rf_temperature(struct mtw_softc * sc,int8_t * val)4277 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val)
4278 {
4279 uint32_t rfb, rfs;
4280 uint8_t bbp;
4281 int ntries;
4282
4283 mtw_read(sc, MTW_RF_BYPASS0, &rfb);
4284 mtw_read(sc, MTW_RF_SETTING0, &rfs);
4285 mtw_write(sc, MTW_RF_BYPASS0, 0);
4286 mtw_write(sc, MTW_RF_SETTING0, 0x10);
4287 mtw_write(sc, MTW_RF_BYPASS0, 0x10);
4288
4289 mtw_bbp_read(sc, 47, &bbp);
4290 bbp &= ~0x7f;
4291 bbp |= 0x10;
4292 mtw_bbp_write(sc, 47, bbp);
4293
4294 mtw_bbp_write(sc, 22, 0x40);
4295
4296 for (ntries = 0; ntries < 10; ntries++) {
4297 mtw_bbp_read(sc, 47, &bbp);
4298 if ((bbp & 0x10) == 0)
4299 break;
4300 }
4301 if (ntries == 10)
4302 return (ETIMEDOUT);
4303
4304 mt7601_r49_read(sc, MT7601_R47_TEMP, val);
4305
4306 mtw_bbp_write(sc, 22, 0);
4307
4308 mtw_bbp_read(sc, 21, &bbp);
4309 bbp |= 0x02;
4310 mtw_bbp_write(sc, 21, bbp);
4311 bbp &= ~0x02;
4312 mtw_bbp_write(sc, 21, bbp);
4313
4314 mtw_write(sc, MTW_RF_BYPASS0, 0);
4315 mtw_write(sc, MTW_RF_SETTING0, rfs);
4316 mtw_write(sc, MTW_RF_BYPASS0, rfb);
4317 return (0);
4318 }
4319
4320 static int
mt7601_rf_setup(struct mtw_softc * sc)4321 mt7601_rf_setup(struct mtw_softc *sc)
4322 {
4323 uint32_t tmp;
4324 uint8_t rf;
4325 int error;
4326
4327 if (sc->sc_rf_calibrated)
4328 return (0);
4329
4330 /* init RF registers */
4331 if ((error = mt7601_rf_init(sc)) != 0)
4332 return (error);
4333
4334 /* init frequency offset */
4335 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset);
4336 mtw_rf_read(sc, 0, 12, &rf);
4337
4338 /* read temperature */
4339 mt7601_rf_temperature(sc, &rf);
4340 sc->bbp_temp = rf;
4341 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf);
4342
4343 mtw_rf_read(sc, 0, 7, &rf);
4344 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0)
4345 return (error);
4346 mtw_delay(sc, 100);
4347 mtw_rf_read(sc, 0, 7, &rf);
4348
4349 /* Calibrate VCO RF 0/4 */
4350 mtw_rf_write(sc, 0, 4, 0x0a);
4351 mtw_rf_write(sc, 0, 4, 0x20);
4352 mtw_rf_read(sc, 0, 4, &rf);
4353 mtw_rf_write(sc, 0, 4, rf | 0x80);
4354
4355 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0)
4356 return (error);
4357 if ((error = mt7601_rxdc_cal(sc)) != 0)
4358 return (error);
4359 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0)
4360 return (error);
4361 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0)
4362 return (error);
4363 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0)
4364 return (error);
4365 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0)
4366 return (error);
4367
4368 mtw_read(sc, MTW_LDO_CFG0, &tmp);
4369 tmp &= ~(1 << 4);
4370 tmp |= (1 << 2);
4371 mtw_write(sc, MTW_LDO_CFG0, tmp);
4372
4373 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0)
4374 return (error);
4375 if ((error = mt7601_rxdc_cal(sc)) != 0)
4376 return (error);
4377
4378 sc->sc_rf_calibrated = 1;
4379 return (0);
4380 }
4381
4382 static void
mtw_set_txrts(struct mtw_softc * sc)4383 mtw_set_txrts(struct mtw_softc *sc)
4384 {
4385 uint32_t tmp;
4386
4387 /* set RTS threshold */
4388 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4389 tmp &= ~0xffff00;
4390 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT;
4391 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4392 }
4393 static int
mtw_mcu_radio(struct mtw_softc * sc,int func,uint32_t val)4394 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val)
4395 {
4396 struct mtw_mcu_cmd_16 cmd;
4397
4398 cmd.r1 = htole32(func);
4399 cmd.r2 = htole32(val);
4400 cmd.r3 = 0;
4401 cmd.r4 = 0;
4402 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16)));
4403 }
4404 static void
mtw_init_locked(struct mtw_softc * sc)4405 mtw_init_locked(struct mtw_softc *sc)
4406 {
4407
4408 struct ieee80211com *ic = &sc->sc_ic;
4409 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4410 uint32_t tmp;
4411 int i, error, ridx, ntries;
4412 if (ic->ic_nrunning > 1)
4413 return;
4414 mtw_stop(sc);
4415
4416 for (i = 0; i != MTW_EP_QUEUES; i++)
4417 mtw_setup_tx_list(sc, &sc->sc_epq[i]);
4418
4419 for (ntries = 0; ntries < 100; ntries++) {
4420 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0)
4421 goto fail;
4422 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4423 break;
4424 DELAY(1000);
4425 }
4426 if (ntries == 100) {
4427 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4428 error = ETIMEDOUT;
4429 goto fail;
4430 }
4431 tmp &= 0xff0;
4432 tmp |= MTW_TX_WB_DDONE;
4433 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4434
4435 mtw_set_leds(sc, MTW_LED_MODE_ON);
4436 /* reset MAC and baseband */
4437 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST);
4438 mtw_write(sc, MTW_USB_DMA_CFG, 0);
4439 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4440
4441 /* init MAC values */
4442 if (sc->asic_ver == 0x7601) {
4443 for (i = 0; i < nitems(mt7601_def_mac); i++)
4444 mtw_write(sc, mt7601_def_mac[i].reg,
4445 mt7601_def_mac[i].val);
4446 }
4447
4448 /* wait while MAC is busy */
4449 for (ntries = 0; ntries < 100; ntries++) {
4450 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0)
4451 goto fail;
4452 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY)))
4453 break;
4454 DELAY(1000);
4455 }
4456 if (ntries == 100) {
4457 error = ETIMEDOUT;
4458 goto fail;
4459 }
4460
4461 /* set MAC address */
4462
4463 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
4464
4465 /* clear WCID attribute table */
4466 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32);
4467
4468 mtw_write(sc, 0x1648, 0x00830083);
4469 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp);
4470 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN;
4471 mtw_write(sc, MTW_FCE_L2_STUFF, tmp);
4472
4473 /* RTS config */
4474 mtw_set_txrts(sc);
4475
4476 /* clear Host to MCU mailbox */
4477 mtw_write(sc, MTW_BBP_CSR, 0);
4478 mtw_write(sc, MTW_H2M_MAILBOX, 0);
4479
4480 /* clear RX WCID search table */
4481 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512);
4482
4483 /* abort TSF synchronization */
4484 mtw_abort_tsf_sync(sc);
4485
4486 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4487 tmp = (tmp & ~0xff);
4488 if (sc->asic_ver == 0x7601)
4489 tmp |= 0x1e;
4490 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4491
4492 /* clear shared key table */
4493 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32);
4494
4495 /* clear IV/EIV table */
4496 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32);
4497
4498 /* clear shared key mode */
4499 mtw_write(sc, MTW_SKEY_MODE_0_7, 0);
4500 mtw_write(sc, MTW_SKEY_MODE_8_15, 0);
4501
4502 /* txop truncation */
4503 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f);
4504
4505 /* init Tx power for all Tx rates */
4506 for (ridx = 0; ridx < 5; ridx++) {
4507 if (sc->txpow20mhz[ridx] == 0xffffffff)
4508 continue;
4509 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
4510 }
4511 mtw_write(sc, MTW_TX_PWR_CFG7, 0);
4512 mtw_write(sc, MTW_TX_PWR_CFG9, 0);
4513
4514 mtw_read(sc, MTW_CMB_CTRL, &tmp);
4515 tmp &= ~(1 << 18 | 1 << 14);
4516 mtw_write(sc, MTW_CMB_CTRL, tmp);
4517
4518 /* clear USB DMA */
4519 mtw_write(sc, MTW_USB_DMA_CFG,
4520 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4521 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4522 mtw_delay(sc, 50);
4523 mtw_read(sc, MTW_USB_DMA_CFG, &tmp);
4524 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4525 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4526
4527 /* enable radio */
4528 mtw_mcu_radio(sc, 0x31, 0);
4529
4530 /* init RF registers */
4531 if (sc->asic_ver == 0x7601)
4532 mt7601_rf_init(sc);
4533
4534 /* init baseband registers */
4535 if (sc->asic_ver == 0x7601)
4536 error = mt7601_bbp_init(sc);
4537
4538 if (error != 0) {
4539 device_printf(sc->sc_dev, "could not initialize BBP\n");
4540 goto fail;
4541 }
4542
4543 /* setup and calibrate RF */
4544 error = mt7601_rf_setup(sc);
4545
4546 if (error != 0) {
4547 device_printf(sc->sc_dev, "could not initialize RF\n");
4548 goto fail;
4549 }
4550
4551 /* select default channel */
4552 mtw_set_chan(sc, ic->ic_curchan);
4553
4554 /* setup initial protection mode */
4555 mtw_updateprot_cb(ic);
4556
4557 sc->sc_flags |= MTW_RUNNING;
4558 sc->cmdq_run = MTW_CMDQ_GO;
4559 for (i = 0; i != MTW_N_XFER; i++)
4560 usbd_xfer_set_stall(sc->sc_xfer[i]);
4561
4562 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]);
4563
4564 error = mtw_txrx_enable(sc);
4565 if (error != 0) {
4566 goto fail;
4567 }
4568
4569 return;
4570
4571 fail:
4572
4573 mtw_stop(sc);
4574 return;
4575 }
4576
4577 static void
mtw_stop(void * arg)4578 mtw_stop(void *arg)
4579 {
4580 struct mtw_softc *sc = (struct mtw_softc *)arg;
4581 uint32_t tmp;
4582 int i, ntries, error;
4583
4584 MTW_LOCK_ASSERT(sc, MA_OWNED);
4585
4586 sc->sc_flags &= ~MTW_RUNNING;
4587
4588 sc->ratectl_run = MTW_RATECTL_OFF;
4589 sc->cmdq_run = sc->cmdq_key_set;
4590
4591 MTW_UNLOCK(sc);
4592
4593 for (i = 0; i < MTW_N_XFER; i++)
4594 usbd_transfer_drain(sc->sc_xfer[i]);
4595
4596 MTW_LOCK(sc);
4597
4598 mtw_drain_mbufq(sc);
4599
4600 if (sc->rx_m != NULL) {
4601 m_free(sc->rx_m);
4602 sc->rx_m = NULL;
4603 }
4604
4605 /* Disable Tx/Rx DMA. */
4606 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp);
4607 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN);
4608 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4609 // mtw_usb_dma_write(sc, 0);
4610
4611 for (ntries = 0; ntries < 100; ntries++) {
4612 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0)
4613 break;
4614 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4615 break;
4616 DELAY(10);
4617 }
4618 if (ntries == 100) {
4619 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4620 }
4621
4622 /* stop MAC Tx/Rx */
4623 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4624 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4625 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4626
4627 /* disable RTS retry */
4628 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4629 tmp &= ~0xff;
4630 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4631
4632 /* US_CYC_CFG */
4633 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4634 tmp = (tmp & ~0xff);
4635 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4636
4637 /* stop PBF */
4638 mtw_read(sc, MTW_PBF_CFG, &tmp);
4639 tmp &= ~0x3;
4640 mtw_write(sc, MTW_PBF_CFG, tmp);
4641
4642 /* wait for pending Tx to complete */
4643 for (ntries = 0; ntries < 100; ntries++) {
4644 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0)
4645 break;
4646 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0)
4647 break;
4648 }
4649
4650 }
4651
4652 static void
mtw_delay(struct mtw_softc * sc,u_int ms)4653 mtw_delay(struct mtw_softc *sc, u_int ms)
4654 {
4655 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL,
4656 USB_MS_TO_TICKS(ms));
4657 }
4658
4659 static void
mtw_update_chw(struct ieee80211com * ic)4660 mtw_update_chw(struct ieee80211com *ic)
4661 {
4662
4663 printf("%s: TODO\n", __func__);
4664 }
4665
4666 static int
mtw_ampdu_enable(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)4667 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
4668 {
4669
4670 /* For now, no A-MPDU TX support in the driver */
4671 return (0);
4672 }
4673
4674 static device_method_t mtw_methods[] = {
4675 /* Device interface */
4676 DEVMETHOD(device_probe, mtw_match),
4677 DEVMETHOD(device_attach, mtw_attach),
4678 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END
4679 };
4680
4681 static driver_t mtw_driver = { .name = "mtw",
4682 .methods = mtw_methods,
4683 .size = sizeof(struct mtw_softc) };
4684
4685 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL);
4686 MODULE_DEPEND(mtw, wlan, 1, 1, 1);
4687 MODULE_DEPEND(mtw, usb, 1, 1, 1);
4688 MODULE_DEPEND(mtw, firmware, 1, 1, 1);
4689 MODULE_VERSION(mtw, 1);
4690 USB_PNP_HOST_INFO(mtw_devs);
4691