1 /*-
2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2013-2014 Kevin Lo
4 * Copyright (c) 2021 James Hastings
5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * MediaTek MT7601U 802.11b/g/n WLAN.
22 */
23
24 #include "opt_wlan.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/bus.h>
29 #include <sys/endian.h>
30 #include <sys/eventhandler.h>
31 #include <sys/firmware.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_radiotap.h>
54 #include <net80211/ieee80211_ratectl.h>
55 #include <net80211/ieee80211_regdomain.h>
56 #ifdef IEEE80211_SUPPORT_SUPERG
57 #include <net80211/ieee80211_superg.h>
58 #endif
59 #include <netinet/if_ether.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67
68 #include "usbdevs.h"
69
70 #define USB_DEBUG_VAR mtw_debug
71 #include <dev/usb/usb_debug.h>
72 #include <dev/usb/usb_msctest.h>
73
74 #include "if_mtwreg.h"
75 #include "if_mtwvar.h"
76
77 #define MTW_DEBUG
78
79 #ifdef MTW_DEBUG
80 int mtw_debug;
81 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
82 "USB mtw");
83 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0,
84 "mtw debug level");
85
86 enum {
87 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
88 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
89 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */
90 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
91 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */
92 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */
93 MTW_DEBUG_USB = 0x00000040, /* usb requests */
94 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */
95 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */
96 MTW_DEBUG_INTR = 0x00000200, /* ISR */
97 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */
98 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */
99 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */
100 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */
101 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */
102 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */
103 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */
104 MTW_DEBUG_CMD = 0x00020000, /* command queue */
105 MTW_DEBUG_ANY = 0xffffffff
106 };
107
108 #define MTW_DPRINTF(_sc, _m, ...) \
109 do { \
110 if (mtw_debug & (_m)) \
111 device_printf((_sc)->sc_dev, __VA_ARGS__); \
112 } while (0)
113
114 #else
115 #define MTW_DPRINTF(_sc, _m, ...) \
116 do { \
117 (void)_sc; \
118 } while (0)
119 #endif
120
121 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
122
123 /* NB: "11" is the maximum number of padding bytes needed for Tx */
124 #define MTW_MAX_TXSZ \
125 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11)
126
127 /*
128 * Because of LOR in mtw_key_delete(), use atomic instead.
129 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
130 */
131 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ)
132
133 static const STRUCT_USB_HOST_ID mtw_devs[] = {
134 #define MTW_DEV(v, p) \
135 { \
136 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \
137 }
138 MTW_DEV(EDIMAX, MT7601U),
139 MTW_DEV(RALINK, MT7601U),
140 MTW_DEV(XIAOMI, MT7601U)
141 };
142 #undef MTW_DEV
143
144 static device_probe_t mtw_match;
145 static device_attach_t mtw_attach;
146 static device_detach_t mtw_detach;
147
148 static usb_callback_t mtw_bulk_rx_callback;
149 static usb_callback_t mtw_bulk_tx_callback0;
150 static usb_callback_t mtw_bulk_tx_callback1;
151 static usb_callback_t mtw_bulk_tx_callback2;
152 static usb_callback_t mtw_bulk_tx_callback3;
153 static usb_callback_t mtw_bulk_tx_callback4;
154 static usb_callback_t mtw_bulk_tx_callback5;
155 static usb_callback_t mtw_fw_callback;
156
157 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *);
158 static int mtw_driver_loaded(struct module *, int, void *);
159 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error,
160 u_int index);
161 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *,
162 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
163 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
164 static void mtw_vap_delete(struct ieee80211vap *);
165 static void mtw_cmdq_cb(void *, int);
166 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *);
167 static void mtw_unsetup_tx_list(struct mtw_softc *,
168 struct mtw_endpoint_queue *);
169 static void mtw_load_microcode(void *arg);
170
171 static usb_error_t mtw_do_request(struct mtw_softc *,
172 struct usb_device_request *, void *);
173 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
174 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
175 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
176 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
177 static int mtw_write_region_1(struct mtw_softc *, uint16_t, const uint8_t *, int);
178 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
179 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
180 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
181 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t);
182 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len);
183 static void mtw_get_txpower(struct mtw_softc *);
184 static int mtw_read_eeprom(struct mtw_softc *);
185 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *,
186 const uint8_t mac[IEEE80211_ADDR_LEN]);
187 static int mtw_media_change(if_t);
188 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
189 static int mtw_wme_update(struct ieee80211com *);
190 static void mtw_key_set_cb(void *);
191 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *);
192 static void mtw_key_delete_cb(void *);
193 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *);
194 static void mtw_ratectl_to(void *);
195 static void mtw_ratectl_cb(void *, int);
196 static void mtw_drain_fifo(void *);
197 static void mtw_iter_func(void *, struct ieee80211_node *);
198 static void mtw_newassoc_cb(void *);
199 static void mtw_newassoc(struct ieee80211_node *, int);
200 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val);
201 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
202 const struct ieee80211_rx_stats *, int, int);
203 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t);
204 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *,
205 int);
206 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *);
207 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *);
208 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *,
209 struct ieee80211_node *);
210 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *,
211 struct ieee80211_node *, int, int);
212 static int mtw_tx_param(struct mtw_softc *, struct mbuf *,
213 struct ieee80211_node *, const struct ieee80211_bpf_params *);
214 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 const struct ieee80211_bpf_params *);
216 static int mtw_transmit(struct ieee80211com *, struct mbuf *);
217 static void mtw_start(struct mtw_softc *);
218 static void mtw_parent(struct ieee80211com *);
219 static void mtw_select_chan_group(struct mtw_softc *, int);
220
221 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *);
222 static void mtw_set_channel(struct ieee80211com *);
223 static void mtw_getradiocaps(struct ieee80211com *, int, int *,
224 struct ieee80211_channel[]);
225 static void mtw_scan_start(struct ieee80211com *);
226 static void mtw_scan_end(struct ieee80211com *);
227 static void mtw_update_beacon(struct ieee80211vap *, int);
228 static void mtw_update_beacon_cb(void *);
229 static void mtw_updateprot(struct ieee80211com *);
230 static void mtw_updateprot_cb(void *);
231 static void mtw_usb_timeout_cb(void *);
232 static int mtw_reset(struct mtw_softc *sc);
233 static void mtw_enable_tsf_sync(struct mtw_softc *);
234
235
236 static void mtw_enable_mrr(struct mtw_softc *);
237 static void mtw_set_txpreamble(struct mtw_softc *);
238 static void mtw_set_basicrates(struct mtw_softc *);
239 static void mtw_set_leds(struct mtw_softc *, uint16_t);
240 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *);
241 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *);
242 static void mtw_updateslot(struct ieee80211com *);
243 static void mtw_updateslot_cb(void *);
244 static void mtw_update_mcast(struct ieee80211com *);
245 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t);
246 static void mtw_update_promisc_locked(struct mtw_softc *);
247 static void mtw_update_promisc(struct ieee80211com *);
248 static int mtw_txrx_enable(struct mtw_softc *);
249 static void mtw_init_locked(struct mtw_softc *);
250 static void mtw_stop(void *);
251 static void mtw_delay(struct mtw_softc *, u_int);
252 static void mtw_update_chw(struct ieee80211com *ic);
253 static int mtw_ampdu_enable(struct ieee80211_node *ni,
254 struct ieee80211_tx_ampdu *tap);
255
256 static eventhandler_tag mtw_etag;
257
258 static const struct {
259 uint8_t reg;
260 uint8_t val;
261 } mt7601_rf_bank0[] = { MT7601_BANK0_RF },
262 mt7601_rf_bank4[] = { MT7601_BANK4_RF },
263 mt7601_rf_bank5[] = { MT7601_BANK5_RF };
264 static const struct {
265 uint32_t reg;
266 uint32_t val;
267 } mt7601_def_mac[] = { MT7601_DEF_MAC };
268 static const struct {
269 uint8_t reg;
270 uint8_t val;
271 } mt7601_def_bbp[] = { MT7601_DEF_BBP };
272
273
274 static const struct {
275 u_int chan;
276 uint8_t r17, r18, r19, r20;
277 } mt7601_rf_chan[] = { MT7601_RF_CHAN };
278
279
280 static const struct usb_config mtw_config[MTW_N_XFER] = {
281 [MTW_BULK_RX] = {
282 .type = UE_BULK,
283 .endpoint = UE_ADDR_ANY,
284 .direction = UE_DIR_IN,
285 .bufsize = MTW_MAX_RXSZ,
286 .flags = {.pipe_bof = 1,
287 .short_xfer_ok = 1,},
288 .callback = mtw_bulk_rx_callback,
289 },
290 [MTW_BULK_TX_BE] = {
291 .type = UE_BULK,
292 .endpoint = UE_ADDR_ANY,
293 .direction = UE_DIR_OUT,
294 .bufsize = MTW_MAX_TXSZ,
295 .flags = {.pipe_bof = 1,
296 .force_short_xfer = 0,},
297 .callback = mtw_bulk_tx_callback0,
298 .timeout = 5000, /* ms */
299 },
300 [MTW_BULK_TX_BK] = {
301 .type = UE_BULK,
302 .endpoint = UE_ADDR_ANY,
303 .direction = UE_DIR_OUT,
304 .bufsize = MTW_MAX_TXSZ,
305 .flags = {.pipe_bof = 1,
306 .force_short_xfer = 1,},
307 .callback = mtw_bulk_tx_callback1,
308 .timeout = 5000, /* ms */
309 },
310 [MTW_BULK_TX_VI] = {
311 .type = UE_BULK,
312 .endpoint = UE_ADDR_ANY,
313 .direction = UE_DIR_OUT,
314 .bufsize = MTW_MAX_TXSZ,
315 .flags = {.pipe_bof = 1,
316 .force_short_xfer = 1,},
317 .callback = mtw_bulk_tx_callback2,
318 .timeout = 5000, /* ms */
319 },
320 [MTW_BULK_TX_VO] = {
321 .type = UE_BULK,
322 .endpoint = UE_ADDR_ANY,
323 .direction = UE_DIR_OUT,
324 .bufsize = MTW_MAX_TXSZ,
325 .flags = {.pipe_bof = 1,
326 .force_short_xfer = 1,},
327 .callback = mtw_bulk_tx_callback3,
328 .timeout = 5000, /* ms */
329 },
330 [MTW_BULK_TX_HCCA] = {
331 .type = UE_BULK,
332 .endpoint = UE_ADDR_ANY,
333 .direction = UE_DIR_OUT,
334 .bufsize = MTW_MAX_TXSZ,
335 .flags = {.pipe_bof = 1,
336 .force_short_xfer = 1, .no_pipe_ok = 1,},
337 .callback = mtw_bulk_tx_callback4,
338 .timeout = 5000, /* ms */
339 },
340 [MTW_BULK_TX_PRIO] = {
341 .type = UE_BULK,
342 .endpoint = UE_ADDR_ANY,
343 .direction = UE_DIR_OUT,
344 .bufsize = MTW_MAX_TXSZ,
345 .flags = {.pipe_bof = 1,
346 .force_short_xfer = 1, .no_pipe_ok = 1,},
347 .callback = mtw_bulk_tx_callback5,
348 .timeout = 5000, /* ms */
349 },
350
351 [MTW_BULK_FW_CMD] = {
352 .type = UE_BULK,
353 .endpoint = UE_ADDR_ANY,
354 .direction = UE_DIR_OUT,
355 .bufsize = 0x2c44,
356 .flags = {.pipe_bof = 1,
357 .force_short_xfer = 1, .no_pipe_ok = 1,},
358 .callback = mtw_fw_callback,
359
360 },
361
362 [MTW_BULK_RAW_TX] = {
363 .type = UE_BULK,
364 .ep_index = 0,
365 .endpoint = UE_ADDR_ANY,
366 .direction = UE_DIR_OUT,
367 .bufsize = MTW_MAX_TXSZ,
368 .flags = {.pipe_bof = 1,
369 .force_short_xfer = 1, .no_pipe_ok = 1,},
370 .callback = mtw_bulk_tx_callback0,
371 .timeout = 5000, /* ms */
372 },
373
374 };
375 static uint8_t mtw_wme_ac_xfer_map[4] = {
376 [WME_AC_BE] = MTW_BULK_TX_BE,
377 [WME_AC_BK] = MTW_BULK_TX_BK,
378 [WME_AC_VI] = MTW_BULK_TX_VI,
379 [WME_AC_VO] = MTW_BULK_TX_VO,
380 };
381 static void
mtw_autoinst(void * arg,struct usb_device * udev,struct usb_attach_arg * uaa)382 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa)
383 {
384 struct usb_interface *iface;
385 struct usb_interface_descriptor *id;
386
387 if (uaa->dev_state != UAA_DEV_READY)
388 return;
389
390 iface = usbd_get_iface(udev, 0);
391 if (iface == NULL)
392 return;
393 id = iface->idesc;
394 if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
395 return;
396 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa))
397 return;
398
399 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
400 uaa->dev_state = UAA_DEV_EJECTING;
401 }
402
403 static int
mtw_driver_loaded(struct module * mod,int what,void * arg)404 mtw_driver_loaded(struct module *mod, int what, void *arg)
405 {
406 switch (what) {
407 case MOD_LOAD:
408 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
409 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY);
410 break;
411 case MOD_UNLOAD:
412 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag);
413 break;
414 default:
415 return (EOPNOTSUPP);
416 }
417 return (0);
418 }
419
420 static const char *
mtw_get_rf(int rev)421 mtw_get_rf(int rev)
422 {
423 switch (rev) {
424 case MT7601_RF_7601:
425 return ("MT7601");
426 case MT7610_RF_7610:
427 return ("MT7610");
428 case MT7612_RF_7612:
429 return ("MT7612");
430 }
431 return ("unknown");
432 }
433 static int
mtw_wlan_enable(struct mtw_softc * sc,int enable)434 mtw_wlan_enable(struct mtw_softc *sc, int enable)
435 {
436 uint32_t tmp;
437 int error = 0;
438
439 if (enable) {
440 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
441 if (sc->asic_ver == 0x7612)
442 tmp &= ~0xfffff000;
443
444 tmp &= ~MTW_WLAN_CLK_EN;
445 tmp |= MTW_WLAN_EN;
446 mtw_write(sc, MTW_WLAN_CTRL, tmp);
447 mtw_delay(sc, 2);
448
449 tmp |= MTW_WLAN_CLK_EN;
450 if (sc->asic_ver == 0x7612) {
451 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF);
452 }
453 mtw_write(sc, MTW_WLAN_CTRL, tmp);
454 mtw_delay(sc, 2);
455
456 mtw_read(sc, MTW_OSC_CTRL, &tmp);
457 tmp |= MTW_OSC_EN;
458 mtw_write(sc, MTW_OSC_CTRL, tmp);
459 tmp |= MTW_OSC_CAL_REQ;
460 mtw_write(sc, MTW_OSC_CTRL, tmp);
461 } else {
462 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
463 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN);
464 mtw_write(sc, MTW_WLAN_CTRL, tmp);
465
466 mtw_read(sc, MTW_OSC_CTRL, &tmp);
467 tmp &= ~MTW_OSC_EN;
468 mtw_write(sc, MTW_OSC_CTRL, tmp);
469 }
470 return (error);
471 }
472
473 static int
mtw_read_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t * val)474 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
475 {
476 usb_device_request_t req;
477 uint32_t tmp;
478 uint16_t actlen;
479 int error;
480
481 req.bmRequestType = UT_READ_VENDOR_DEVICE;
482 req.bRequest = MTW_READ_CFG;
483 USETW(req.wValue, 0);
484 USETW(req.wIndex, reg);
485 USETW(req.wLength, 4);
486 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0,
487 &actlen, 1000);
488
489 if (error == 0)
490 *val = le32toh(tmp);
491 else
492 *val = 0xffffffff;
493 return (error);
494 }
495
496 static int
mtw_match(device_t self)497 mtw_match(device_t self)
498 {
499 struct usb_attach_arg *uaa = device_get_ivars(self);
500
501 if (uaa->usb_mode != USB_MODE_HOST)
502 return (ENXIO);
503 if (uaa->info.bConfigIndex != 0)
504 return (ENXIO);
505 if (uaa->info.bIfaceIndex != 0)
506 return (ENXIO);
507
508 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa));
509 }
510
511 static int
mtw_attach(device_t self)512 mtw_attach(device_t self)
513 {
514 struct mtw_softc *sc = device_get_softc(self);
515 struct usb_attach_arg *uaa = device_get_ivars(self);
516 struct ieee80211com *ic = &sc->sc_ic;
517 uint32_t ver;
518 int i, ret;
519 uint32_t tmp;
520 uint8_t iface_index;
521 int ntries, error;
522
523 device_set_usb_desc(self);
524 sc->sc_udev = uaa->device;
525 sc->sc_dev = self;
526 sc->sc_sent = 0;
527
528 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
529 MTX_NETWORK_LOCK, MTX_DEF);
530
531 iface_index = 0;
532
533 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
534 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx);
535 if (error) {
536 device_printf(sc->sc_dev,
537 "could not allocate USB transfers, "
538 "err=%s\n",
539 usbd_errstr(error));
540 goto detach;
541 }
542 for (i = 0; i < 4; i++) {
543 sc->txd_fw[i] = (struct mtw_txd_fw *)
544 malloc(sizeof(struct mtw_txd_fw),
545 M_USBDEV, M_NOWAIT | M_ZERO);
546 }
547 MTW_LOCK(sc);
548 sc->sc_idx = 0;
549 mbufq_init(&sc->sc_snd, ifqmaxlen);
550
551 /*enable WLAN core */
552 if ((error = mtw_wlan_enable(sc, 1)) != 0) {
553 device_printf(sc->sc_dev, "could not enable WLAN core\n");
554 return (ENXIO);
555 }
556
557 /* wait for the chip to settle */
558 DELAY(100);
559 for (ntries = 0; ntries < 100; ntries++) {
560 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) {
561 goto detach;
562 }
563 if (ver != 0 && ver != 0xffffffff)
564 break;
565 DELAY(10);
566 }
567 if (ntries == 100) {
568 device_printf(sc->sc_dev,
569 "timeout waiting for NIC to initialize\n");
570 goto detach;
571 }
572 sc->asic_ver = ver >> 16;
573 sc->asic_rev = ver & 0xffff;
574 DELAY(100);
575 if (sc->asic_ver != 0x7601) {
576 device_printf(sc->sc_dev,
577 "Your revision 0x04%x is not supported yet\n",
578 sc->asic_rev);
579 goto detach;
580 }
581
582
583 if (mtw_read(sc, MTW_MAC_VER_ID, &tmp) != 0)
584 goto detach;
585 sc->mac_rev = tmp & 0xffff;
586
587 mtw_load_microcode(sc);
588 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz);
589 if (ret == EWOULDBLOCK || sc->fwloading != 1) {
590 device_printf(sc->sc_dev,
591 "timeout waiting for MCU to initialize\n");
592 goto detach;
593 }
594
595 sc->sc_srom_read = mtw_efuse_read_2;
596 /* retrieve RF rev. no and various other things from EEPROM */
597 mtw_read_eeprom(sc);
598
599 device_printf(sc->sc_dev,
600 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
601 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains,
602 sc->nrxchains, ether_sprintf(ic->ic_macaddr));
603 DELAY(100);
604
605 //mtw_set_leds(sc,5);
606 // mtw_mcu_radio(sc,0x31,0);
607 MTW_UNLOCK(sc);
608
609
610 ic->ic_softc = sc;
611 ic->ic_name = device_get_nameunit(self);
612 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
613 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
614
615 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */
616 IEEE80211_C_MONITOR | /* monitor mode supported */
617 IEEE80211_C_IBSS |
618 IEEE80211_C_HOSTAP |
619 IEEE80211_C_WDS | /* 4-address traffic works */
620 IEEE80211_C_MBSS |
621 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
622 IEEE80211_C_SHSLOT | /* short slot time supported */
623 IEEE80211_C_WME | /* WME */
624 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
625 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n");
626 ic->ic_htcaps = IEEE80211_HTC_HT
627 | IEEE80211_HTC_AMPDU
628 | IEEE80211_HTC_AMSDU
629 | IEEE80211_HTCAP_MAXAMSDU_3839
630 | IEEE80211_HTCAP_SMPS_OFF;
631
632 ic->ic_rxstream = sc->nrxchains;
633 ic->ic_txstream = sc->ntxchains;
634
635 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM |
636 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP |
637 IEEE80211_CRYPTO_TKIPMIC;
638
639 ic->ic_flags |= IEEE80211_F_DATAPAD;
640 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
641 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
642
643 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
644 ic->ic_channels);
645
646 ieee80211_ifattach(ic);
647
648 ic->ic_scan_start = mtw_scan_start;
649 ic->ic_scan_end = mtw_scan_end;
650 ic->ic_set_channel = mtw_set_channel;
651 ic->ic_getradiocaps = mtw_getradiocaps;
652 ic->ic_node_alloc = mtw_node_alloc;
653 ic->ic_newassoc = mtw_newassoc;
654 ic->ic_update_mcast = mtw_update_mcast;
655 ic->ic_updateslot = mtw_updateslot;
656 ic->ic_wme.wme_update = mtw_wme_update;
657 ic->ic_raw_xmit = mtw_raw_xmit;
658 ic->ic_update_promisc = mtw_update_promisc;
659 ic->ic_vap_create = mtw_vap_create;
660 ic->ic_vap_delete = mtw_vap_delete;
661 ic->ic_transmit = mtw_transmit;
662 ic->ic_parent = mtw_parent;
663
664 ic->ic_update_chw = mtw_update_chw;
665 ic->ic_ampdu_enable = mtw_ampdu_enable;
666
667 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
668 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT,
669 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
670 MTW_RX_RADIOTAP_PRESENT);
671 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc);
672 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc);
673 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
674
675 if (bootverbose)
676 ieee80211_announce(ic);
677
678 return (0);
679
680 detach:
681 MTW_UNLOCK(sc);
682 mtw_detach(self);
683 return (ENXIO);
684 }
685
686 static void
mtw_drain_mbufq(struct mtw_softc * sc)687 mtw_drain_mbufq(struct mtw_softc *sc)
688 {
689 struct mbuf *m;
690 struct ieee80211_node *ni;
691
692 MTW_LOCK_ASSERT(sc, MA_OWNED);
693 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
694 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
695 m->m_pkthdr.rcvif = NULL;
696 ieee80211_free_node(ni);
697 m_freem(m);
698 }
699 }
700
701 static int
mtw_detach(device_t self)702 mtw_detach(device_t self)
703 {
704 struct mtw_softc *sc = device_get_softc(self);
705 struct ieee80211com *ic = &sc->sc_ic;
706 int i;
707 MTW_LOCK(sc);
708 mtw_reset(sc);
709 DELAY(10000);
710 sc->sc_detached = 1;
711 MTW_UNLOCK(sc);
712
713
714 /* stop all USB transfers */
715 for (i = 0; i < MTW_N_XFER; i++)
716 usbd_transfer_drain(sc->sc_xfer[i]);
717
718 MTW_LOCK(sc);
719 sc->ratectl_run = MTW_RATECTL_OFF;
720 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT;
721
722 /* free TX list, if any */
723 if (ic->ic_nrunning > 0)
724 for (i = 0; i < MTW_EP_QUEUES; i++)
725 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]);
726
727 /* Free TX queue */
728 mtw_drain_mbufq(sc);
729 MTW_UNLOCK(sc);
730 if (sc->sc_ic.ic_softc == sc) {
731 /* drain tasks */
732 usb_callout_drain(&sc->ratectl_ch);
733 ieee80211_draintask(ic, &sc->cmdq_task);
734 ieee80211_draintask(ic, &sc->ratectl_task);
735 ieee80211_ifdetach(ic);
736 }
737 for (i = 0; i < 4; i++) {
738 free(sc->txd_fw[i], M_USBDEV);
739 }
740 firmware_unregister("/mediatek/mt7601u");
741 mtx_destroy(&sc->sc_mtx);
742
743 return (0);
744 }
745
746 static struct ieee80211vap *
mtw_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])747 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
748 enum ieee80211_opmode opmode, int flags,
749 const uint8_t bssid[IEEE80211_ADDR_LEN],
750 const uint8_t mac[IEEE80211_ADDR_LEN])
751 {
752 struct mtw_softc *sc = ic->ic_softc;
753 struct mtw_vap *rvp;
754 struct ieee80211vap *vap;
755 int i;
756
757 if (sc->rvp_cnt >= MTW_VAP_MAX) {
758 device_printf(sc->sc_dev, "number of VAPs maxed out\n");
759 return (NULL);
760 }
761
762 switch (opmode) {
763 case IEEE80211_M_STA:
764 /* enable s/w bmiss handling for sta mode */
765 flags |= IEEE80211_CLONE_NOBEACONS;
766 /* fall though */
767 case IEEE80211_M_IBSS:
768 case IEEE80211_M_MONITOR:
769 case IEEE80211_M_HOSTAP:
770 case IEEE80211_M_MBSS:
771 /* other than WDS vaps, only one at a time */
772 if (!TAILQ_EMPTY(&ic->ic_vaps))
773 return (NULL);
774 break;
775 case IEEE80211_M_WDS:
776 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
777 if (vap->iv_opmode != IEEE80211_M_HOSTAP)
778 continue;
779 /* WDS vap's always share the local mac address. */
780 flags &= ~IEEE80211_CLONE_BSSID;
781 break;
782 }
783 if (vap == NULL) {
784 device_printf(sc->sc_dev,
785 "wds only supported in ap mode\n");
786 return (NULL);
787 }
788 break;
789 default:
790 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
791 return (NULL);
792 }
793
794 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
795 vap = &rvp->vap;
796
797 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) !=
798 0) {
799 /* out of memory */
800 free(rvp, M_80211_VAP);
801 return (NULL);
802 }
803
804 vap->iv_update_beacon = mtw_update_beacon;
805 vap->iv_max_aid = MTW_WCID_MAX;
806
807 /*
808 * The linux rt2800 driver limits 1 stream devices to a 32KB
809 * RX AMPDU.
810 */
811 if (ic->ic_rxstream > 1)
812 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
813 else
814 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
815 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */
816
817 /*
818 * To delete the right key from h/w, we need wcid.
819 * Luckily, there is unused space in ieee80211_key{}, wk_pad,
820 * and matching wcid will be written into there. So, cast
821 * some spells to remove 'const' from ieee80211_key{}
822 */
823 vap->iv_key_delete = (void *)mtw_key_delete;
824 vap->iv_key_set = (void *)mtw_key_set;
825
826 // override state transition machine
827 rvp->newstate = vap->iv_newstate;
828 vap->iv_newstate = mtw_newstate;
829 if (opmode == IEEE80211_M_IBSS) {
830 rvp->recv_mgmt = vap->iv_recv_mgmt;
831 vap->iv_recv_mgmt = mtw_recv_mgmt;
832 }
833
834 ieee80211_ratectl_init(vap);
835 ieee80211_ratectl_setinterval(vap, 1000); // 1 second
836
837 /* complete setup */
838 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status,
839 mac);
840
841 /* make sure id is always unique */
842 for (i = 0; i < MTW_VAP_MAX; i++) {
843 if ((sc->rvp_bmap & 1 << i) == 0) {
844 sc->rvp_bmap |= 1 << i;
845 rvp->rvp_id = i;
846 break;
847 }
848 }
849 if (sc->rvp_cnt++ == 0)
850 ic->ic_opmode = opmode;
851
852 if (opmode == IEEE80211_M_HOSTAP)
853 sc->cmdq_run = MTW_CMDQ_GO;
854
855 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n",
856 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
857
858 return (vap);
859 }
860
861 static void
mtw_vap_delete(struct ieee80211vap * vap)862 mtw_vap_delete(struct ieee80211vap *vap)
863 {
864 struct mtw_vap *rvp = MTW_VAP(vap);
865 struct ieee80211com *ic;
866 struct mtw_softc *sc;
867 uint8_t rvp_id;
868
869 if (vap == NULL)
870 return;
871
872 ic = vap->iv_ic;
873 sc = ic->ic_softc;
874
875 MTW_LOCK(sc);
876 m_freem(rvp->beacon_mbuf);
877 rvp->beacon_mbuf = NULL;
878
879 rvp_id = rvp->rvp_id;
880 sc->ratectl_run &= ~(1 << rvp_id);
881 sc->rvp_bmap &= ~(1 << rvp_id);
882 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256);
883 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512);
884 --sc->rvp_cnt;
885
886 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
887 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap,
888 sc->rvp_cnt);
889
890 MTW_UNLOCK(sc);
891
892 ieee80211_ratectl_deinit(vap);
893 ieee80211_vap_detach(vap);
894 free(rvp, M_80211_VAP);
895 }
896
897 /*
898 * There are numbers of functions need to be called in context thread.
899 * Rather than creating taskqueue event for each of those functions,
900 * here is all-for-one taskqueue callback function. This function
901 * guarantees deferred functions are executed in the same order they
902 * were enqueued.
903 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
904 */
905 static void
mtw_cmdq_cb(void * arg,int pending)906 mtw_cmdq_cb(void *arg, int pending)
907 {
908 struct mtw_softc *sc = arg;
909 uint8_t i;
910 /* call cmdq[].func locked */
911 MTW_LOCK(sc);
912 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
913 i = sc->cmdq_exec, pending--) {
914 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i,
915 pending);
916 if (sc->cmdq_run == MTW_CMDQ_GO) {
917 /*
918 * If arg0 is NULL, callback func needs more
919 * than one arg. So, pass ptr to cmdq struct.
920 */
921 if (sc->cmdq[i].arg0)
922 sc->cmdq[i].func(sc->cmdq[i].arg0);
923 else
924 sc->cmdq[i].func(&sc->cmdq[i]);
925 }
926 sc->cmdq[i].arg0 = NULL;
927 sc->cmdq[i].func = NULL;
928 sc->cmdq_exec++;
929 sc->cmdq_exec &= MTW_CMDQ_MASQ;
930 }
931 MTW_UNLOCK(sc);
932 }
933
934 static void
mtw_setup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)935 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
936 {
937 struct mtw_tx_data *data;
938
939 memset(pq, 0, sizeof(*pq));
940
941 STAILQ_INIT(&pq->tx_qh);
942 STAILQ_INIT(&pq->tx_fh);
943
944 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
945 data++) {
946 data->sc = sc;
947 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
948 }
949 pq->tx_nfree = MTW_TX_RING_COUNT;
950 }
951
952 static void
mtw_unsetup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)953 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
954 {
955 struct mtw_tx_data *data;
956 /* make sure any subsequent use of the queues will fail */
957 pq->tx_nfree = 0;
958
959 STAILQ_INIT(&pq->tx_fh);
960 STAILQ_INIT(&pq->tx_qh);
961
962 /* free up all node references and mbufs */
963 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
964 data++) {
965 if (data->m != NULL) {
966 m_freem(data->m);
967 data->m = NULL;
968 }
969 if (data->ni != NULL) {
970 ieee80211_free_node(data->ni);
971 data->ni = NULL;
972 }
973 }
974 }
975
976 static int
mtw_write_ivb(struct mtw_softc * sc,void * buf,uint16_t len)977 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len)
978 {
979 usb_device_request_t req;
980 uint16_t actlen;
981 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
982 req.bRequest = MTW_RESET;
983 USETW(req.wValue, 0x12);
984 USETW(req.wIndex, 0);
985 USETW(req.wLength, len);
986
987 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf,
988 0, &actlen, 1000);
989
990 return (error);
991 }
992
993 static int
mtw_write_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t val)994 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val)
995 {
996 usb_device_request_t req;
997 int error;
998
999 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1000 req.bRequest = MTW_WRITE_CFG;
1001 USETW(req.wValue, 0);
1002 USETW(req.wIndex, reg);
1003 USETW(req.wLength, 4);
1004 val = htole32(val);
1005 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val);
1006 return (error);
1007 }
1008
1009 static int
mtw_usb_dma_write(struct mtw_softc * sc,uint32_t val)1010 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val)
1011 {
1012 // if (sc->asic_ver == 0x7612)
1013 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val);
1014 // else
1015 return (mtw_write(sc, MTW_USB_DMA_CFG, val));
1016 }
1017
1018 static void
mtw_ucode_setup(struct mtw_softc * sc)1019 mtw_ucode_setup(struct mtw_softc *sc)
1020 {
1021
1022 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN));
1023 mtw_write(sc, MTW_FCE_PSE_CTRL, 1);
1024 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230);
1025 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1);
1026 mtw_write(sc, MTW_MCU_FW_IDX, 1);
1027 mtw_write(sc, MTW_FCE_PDMA, 0x44);
1028 mtw_write(sc, MTW_FCE_SKIP_FS, 3);
1029 }
1030 static int
mtw_ucode_write(struct mtw_softc * sc,const uint8_t * fw,const uint8_t * ivb,int32_t len,uint32_t offset)1031 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb,
1032 int32_t len, uint32_t offset)
1033 {
1034
1035 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev);
1036 #if 0 // firmware not tested
1037
1038 if (sc->asic_ver == 0x7612 && offset >= 0x90000)
1039 blksz = 0x800; /* MT7612 ROM Patch */
1040
1041 xfer = usbd_alloc_xfer(sc->sc_udev);
1042 if (xfer == NULL) {
1043 error = ENOMEM;
1044 goto fail;
1045 }
1046 buf = usbd_alloc_buffer(xfer, blksz + 12);
1047 if (buf == NULL) {
1048 error = ENOMEM;
1049 goto fail;
1050 }
1051 #endif
1052
1053
1054
1055 int mlen;
1056 int idx = 0;
1057
1058 mlen = 0x2c44;
1059
1060 while (len > 0) {
1061
1062 if (len < 0x2c44 && len > 0) {
1063 mlen = len;
1064 }
1065
1066 sc->txd_fw[idx]->len = htole16(mlen);
1067 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU);
1068
1069 memcpy(&sc->txd_fw[idx]->fw, fw, mlen);
1070 // memcpy(&txd[1], fw, mlen);
1071 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD);
1072 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset
1073 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16));
1074
1075 // sc->sc_fw_data[idx]->len=htole16(mlen);
1076
1077 // memcpy(tmpbuf,fw,mlen);
1078 // memset(tmpbuf+mlen,0,MTW_DMA_PAD);
1079 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen);
1080
1081 fw += mlen;
1082 len -= mlen;
1083 // sent+=mlen;
1084 idx++;
1085 }
1086 sc->sc_sent = 0;
1087 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN);
1088
1089 usbd_transfer_start(sc->sc_xfer[7]);
1090
1091 return (0);
1092 }
1093
1094 static void
mtw_load_microcode(void * arg)1095 mtw_load_microcode(void *arg)
1096 {
1097
1098 struct mtw_softc *sc = (struct mtw_softc *)arg;
1099 const struct mtw_ucode_hdr *hdr;
1100 // onst struct mtw_ucode *fw = NULL;
1101 const char *fwname;
1102 size_t size;
1103 int error = 0;
1104 uint32_t tmp, iofs = 0x40;
1105 // int ntries;
1106 int dlen, ilen;
1107 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
1108 /* is firmware already running? */
1109 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
1110 if (tmp == MTW_MCU_READY) {
1111 return;
1112 }
1113 if (sc->asic_ver == 0x7612) {
1114 fwname = "mtw-mt7662u_rom_patch";
1115
1116 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN);
1117 if (firmware == NULL) {
1118 device_printf(sc->sc_dev,
1119 "failed loadfirmware of file %s (error %d)\n",
1120 fwname, error);
1121 return;
1122 }
1123 size = firmware->datasize;
1124
1125 const struct mtw_ucode *fw = (const struct mtw_ucode *)
1126 firmware->data;
1127 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1128 // memcpy(fw,(const unsigned char*)firmware->data +
1129 // 0x1e,size-0x1e);
1130 ilen = size - 0x1e;
1131
1132 mtw_ucode_setup(sc);
1133
1134 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen,
1135 0x90000)) != 0) {
1136 goto fail;
1137 }
1138 mtw_usb_dma_write(sc, 0x00e41814);
1139 }
1140
1141 fwname = "/mediatek/mt7601u.bin";
1142 iofs = 0x40;
1143 // dofs = 0;
1144 if (sc->asic_ver == 0x7612) {
1145 fwname = "mtw-mt7662u";
1146 iofs = 0x80040;
1147 // dofs = 0x110800;
1148 } else if (sc->asic_ver == 0x7610) {
1149 fwname = "mt7610u";
1150 // dofs = 0x80000;
1151 }
1152 MTW_UNLOCK(sc);
1153 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN);
1154
1155 if (firmware == NULL) {
1156 device_printf(sc->sc_dev,
1157 "failed loadfirmware of file %s (error %d)\n", fwname,
1158 error);
1159 MTW_LOCK(sc);
1160 return;
1161 }
1162 MTW_LOCK(sc);
1163 size = firmware->datasize;
1164 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size);
1165 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data;
1166
1167 if (size < sizeof(struct mtw_ucode_hdr)) {
1168 device_printf(sc->sc_dev, "firmware header too short\n");
1169 goto fail;
1170 }
1171
1172 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1173
1174 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) +
1175 le32toh(hdr->dlm_len)) {
1176 device_printf(sc->sc_dev, "firmware payload too short\n");
1177 goto fail;
1178 }
1179
1180 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN;
1181 dlen = le32toh(hdr->dlm_len);
1182
1183 if (ilen > size || dlen > size) {
1184 device_printf(sc->sc_dev, "firmware payload too large\n");
1185 goto fail;
1186 }
1187
1188 mtw_write(sc, MTW_FCE_PDMA, 0);
1189 mtw_write(sc, MTW_FCE_PSE_CTRL, 0);
1190 mtw_ucode_setup(sc);
1191
1192 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0)
1193 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n",
1194 error);
1195
1196 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n",
1197 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time);
1198
1199 return;
1200 fail:
1201 return;
1202 }
1203 static usb_error_t
mtw_do_request(struct mtw_softc * sc,struct usb_device_request * req,void * data)1204 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data)
1205 {
1206 usb_error_t err;
1207 int ntries = 5;
1208
1209 MTW_LOCK_ASSERT(sc, MA_OWNED);
1210
1211 while (ntries--) {
1212 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data,
1213 0, NULL, 2000); // ms seconds
1214 if (err == 0)
1215 break;
1216 MTW_DPRINTF(sc, MTW_DEBUG_USB,
1217 "Control request failed, %s (retrying)\n",
1218 usbd_errstr(err));
1219 mtw_delay(sc, 10);
1220 }
1221 return (err);
1222 }
1223
1224 static int
mtw_read(struct mtw_softc * sc,uint16_t reg,uint32_t * val)1225 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
1226 {
1227 uint32_t tmp;
1228 int error;
1229
1230 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
1231 if (error == 0)
1232 *val = le32toh(tmp);
1233 else
1234 *val = 0xffffffff;
1235 return (error);
1236 }
1237
1238 static int
mtw_read_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1239 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1240 {
1241 usb_device_request_t req;
1242
1243 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1244 req.bRequest = MTW_READ_REGION_1;
1245 USETW(req.wValue, 0);
1246 USETW(req.wIndex, reg);
1247 USETW(req.wLength, len);
1248
1249 return (mtw_do_request(sc, &req, buf));
1250 }
1251
1252 static int
mtw_write_2(struct mtw_softc * sc,uint16_t reg,uint16_t val)1253 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val)
1254 {
1255
1256 usb_device_request_t req;
1257 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1258 req.bRequest = MTW_WRITE_2;
1259 USETW(req.wValue, val);
1260 USETW(req.wIndex, reg);
1261 USETW(req.wLength, 0);
1262 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
1263 }
1264
1265 static int
mtw_write(struct mtw_softc * sc,uint16_t reg,uint32_t val)1266 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
1267 {
1268
1269 int error;
1270
1271 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) {
1272
1273 error = mtw_write_2(sc, reg + 2, val >> 16);
1274 }
1275
1276 return (error);
1277 }
1278
1279 static int
mtw_write_region_1(struct mtw_softc * sc,uint16_t reg,const uint8_t * buf,int len)1280 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, const uint8_t *buf,
1281 int len)
1282 {
1283
1284 usb_device_request_t req;
1285 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1286 req.bRequest = MTW_WRITE_REGION_1;
1287 USETW(req.wValue, 0);
1288 USETW(req.wIndex, reg);
1289 USETW(req.wLength, len);
1290 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req,
1291 __DECONST(uint8_t *, buf)));
1292 }
1293
1294 static int
mtw_set_region_4(struct mtw_softc * sc,uint16_t reg,uint32_t val,int count)1295 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count)
1296 {
1297 int i, error = 0;
1298
1299 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n"));
1300 for (i = 0; i < count && error == 0; i += 4)
1301 error = mtw_write(sc, reg + i, val);
1302 return (error);
1303 }
1304
1305 static int
mtw_efuse_read_2(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1306 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1307 {
1308
1309 uint32_t tmp;
1310 uint16_t reg;
1311 int error, ntries;
1312
1313 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1314 return (error);
1315
1316 addr *= 2;
1317 /*
1318 * Read one 16-byte block into registers EFUSE_DATA[0-3]:
1319 * DATA0: 3 2 1 0
1320 * DATA1: 7 6 5 4
1321 * DATA2: B A 9 8
1322 * DATA3: F E D C
1323 */
1324 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK);
1325 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK;
1326 mtw_write(sc, MTW_EFUSE_CTRL, tmp);
1327 for (ntries = 0; ntries < 100; ntries++) {
1328 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1329 return (error);
1330 if (!(tmp & MTW_EFSROM_KICK))
1331 break;
1332 DELAY(2);
1333 }
1334 if (ntries == 100)
1335 return (ETIMEDOUT);
1336
1337 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) {
1338 *val = 0xffff; // address not found
1339 return (0);
1340 }
1341 // determine to which 32-bit register our 16-bit word belongs
1342 reg = MTW_EFUSE_DATA0 + (addr & 0xc);
1343 if ((error = mtw_read(sc, reg, &tmp)) != 0)
1344 return (error);
1345
1346 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff;
1347 return (0);
1348 }
1349
1350 static __inline int
mtw_srom_read(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1351 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1352 {
1353 /* either eFUSE ROM or EEPROM */
1354 return (sc->sc_srom_read(sc, addr, val));
1355 }
1356
1357 static int
mtw_bbp_read(struct mtw_softc * sc,uint8_t reg,uint8_t * val)1358 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val)
1359 {
1360 uint32_t tmp;
1361 int ntries, error;
1362
1363 for (ntries = 0; ntries < 10; ntries++) {
1364 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1365 return (error);
1366 if (!(tmp & MTW_BBP_CSR_KICK))
1367 break;
1368 }
1369 if (ntries == 10)
1370 return (ETIMEDOUT);
1371
1372 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8;
1373 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0)
1374 return (error);
1375
1376 for (ntries = 0; ntries < 10; ntries++) {
1377 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1378 return (error);
1379 if (!(tmp & MTW_BBP_CSR_KICK))
1380 break;
1381 }
1382 if (ntries == 10)
1383 return (ETIMEDOUT);
1384
1385 *val = tmp & 0xff;
1386 return (0);
1387 }
1388
1389 static int
mtw_bbp_write(struct mtw_softc * sc,uint8_t reg,uint8_t val)1390 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val)
1391 {
1392 uint32_t tmp;
1393 int ntries, error;
1394
1395 for (ntries = 0; ntries < 10; ntries++) {
1396 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1397 return (error);
1398 if (!(tmp & MTW_BBP_CSR_KICK))
1399 break;
1400 }
1401 if (ntries == 10)
1402 return (ETIMEDOUT);
1403
1404 tmp = MTW_BBP_CSR_KICK | reg << 8 | val;
1405 return (mtw_write(sc, MTW_BBP_CSR, tmp));
1406 }
1407
1408 static int
mtw_mcu_cmd(struct mtw_softc * sc,u_int8_t cmd,void * buf,int len)1409 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len)
1410 {
1411 sc->sc_idx = 0;
1412 sc->txd_fw[sc->sc_idx]->len = htole16(
1413 len + 8);
1414 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU |
1415 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf));
1416
1417 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004);
1418 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len);
1419 usbd_transfer_start(sc->sc_xfer[7]);
1420 return (0);
1421 }
1422
1423 /*
1424 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
1425 * Used to adjust per-rate Tx power registers.
1426 */
1427 static __inline uint32_t
b4inc(uint32_t b32,int8_t delta)1428 b4inc(uint32_t b32, int8_t delta)
1429 {
1430 int8_t i, b4;
1431
1432 for (i = 0; i < 8; i++) {
1433 b4 = b32 & 0xf;
1434 b4 += delta;
1435 if (b4 < 0)
1436 b4 = 0;
1437 else if (b4 > 0xf)
1438 b4 = 0xf;
1439 b32 = b32 >> 4 | b4 << 28;
1440 }
1441 return (b32);
1442 }
1443 static void
mtw_get_txpower(struct mtw_softc * sc)1444 mtw_get_txpower(struct mtw_softc *sc)
1445 {
1446 uint16_t val;
1447 int i;
1448
1449 /* Read power settings for 2GHz channels. */
1450 for (i = 0; i < 14; i += 2) {
1451 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
1452 sc->txpow1[i + 0] = (int8_t)(val & 0xff);
1453 sc->txpow1[i + 1] = (int8_t)(val >> 8);
1454 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
1455 sc->txpow2[i + 0] = (int8_t)(val & 0xff);
1456 sc->txpow2[i + 1] = (int8_t)(val >> 8);
1457 }
1458 /* Fix broken Tx power entries. */
1459 for (i = 0; i < 14; i++) {
1460 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
1461 sc->txpow1[i] = 5;
1462 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
1463 sc->txpow2[i] = 5;
1464 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR,
1465 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan,
1466 sc->txpow1[i], sc->txpow2[i]);
1467 }
1468 }
1469
1470 struct ieee80211_node *
mtw_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])1471 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1472 {
1473 return (malloc(sizeof(struct mtw_node), M_80211_NODE,
1474 M_NOWAIT | M_ZERO));
1475 }
1476 static int
mtw_read_eeprom(struct mtw_softc * sc)1477 mtw_read_eeprom(struct mtw_softc *sc)
1478 {
1479 struct ieee80211com *ic = &sc->sc_ic;
1480 int8_t delta_2ghz, delta_5ghz;
1481 uint16_t val;
1482 int ridx, ant;
1483
1484 sc->sc_srom_read = mtw_efuse_read_2;
1485
1486 /* read RF information */
1487 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val);
1488 sc->rf_rev = val;
1489 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val);
1490 sc->ntxchains = (val >> 4) & 0xf;
1491 sc->nrxchains = val & 0xf;
1492 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n",
1493 sc->rf_rev, sc->ntxchains, sc->nrxchains);
1494
1495 /* read ROM version */
1496 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val);
1497 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff,
1498 val >> 8);
1499
1500 /* read MAC address */
1501 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val);
1502 ic->ic_macaddr[0] = val & 0xff;
1503 ic->ic_macaddr[1] = val >> 8;
1504 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val);
1505 ic->ic_macaddr[2] = val & 0xff;
1506 ic->ic_macaddr[3] = val >> 8;
1507 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val);
1508 ic->ic_macaddr[4] = val & 0xff;
1509 ic->ic_macaddr[5] = val >> 8;
1510 #if 0
1511 printf("eFUSE ROM\n00: ");
1512 for (int i = 0; i < 256; i++) {
1513 if (((i % 8) == 0) && i > 0)
1514 printf("\n%02x: ", i);
1515 mtw_srom_read(sc, i, &val);
1516 printf(" %04x", val);
1517 }
1518 printf("\n");
1519 #endif
1520 /* check if RF supports automatic Tx access gain control */
1521 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val);
1522 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val);
1523 if ((val & 0xff) != 0xff) {
1524 sc->ext_5ghz_lna = (val >> 3) & 1;
1525 sc->ext_2ghz_lna = (val >> 2) & 1;
1526 /* check if RF supports automatic Tx access gain control */
1527 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
1528 /* check if we have a hardware radio switch */
1529 sc->rfswitch = val & 1;
1530 }
1531
1532 /* read RF frequency offset from EEPROM */
1533 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val);
1534 if ((val & 0xff) != 0xff)
1535 sc->rf_freq_offset = val;
1536 else
1537 sc->rf_freq_offset = 0;
1538 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n",
1539 sc->rf_freq_offset);
1540
1541 /* Read Tx power settings. */
1542 mtw_get_txpower(sc);
1543
1544 /* read Tx power compensation for each Tx rate */
1545 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val);
1546 delta_2ghz = delta_5ghz = 0;
1547 if ((val & 0xff) != 0xff && (val & 0x80)) {
1548 delta_2ghz = val & 0xf;
1549 if (!(val & 0x40)) /* negative number */
1550 delta_2ghz = -delta_2ghz;
1551 }
1552 val >>= 8;
1553 if ((val & 0xff) != 0xff && (val & 0x80)) {
1554 delta_5ghz = val & 0xf;
1555 if (!(val & 0x40)) /* negative number */
1556 delta_5ghz = -delta_5ghz;
1557 }
1558 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1559 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz,
1560 delta_5ghz);
1561
1562 for (ridx = 0; ridx < 5; ridx++) {
1563 uint32_t reg;
1564
1565 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val);
1566 reg = val;
1567 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val);
1568 reg |= (uint32_t)val << 16;
1569
1570 sc->txpow20mhz[ridx] = reg;
1571 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
1572 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
1573
1574 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1575 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
1576 "40MHz/5GHz=0x%08x\n",
1577 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx],
1578 sc->txpow40mhz_5ghz[ridx]);
1579 }
1580
1581 /* read RSSI offsets and LNA gains from EEPROM */
1582 val = 0;
1583 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val);
1584 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
1585 sc->rssi_2ghz[1] = val >> 8; /* Ant B */
1586 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val);
1587 /*
1588 * On RT3070 chips (limited to 2 Rx chains), this ROM
1589 * field contains the Tx mixer gain for the 2GHz band.
1590 */
1591 if ((val & 0xff) != 0xff)
1592 sc->txmixgain_2ghz = val & 0x7;
1593 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n",
1594 sc->txmixgain_2ghz);
1595 sc->lna[2] = val >> 8; /* channel group 2 */
1596 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val);
1597 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
1598 sc->rssi_5ghz[1] = val >> 8; /* Ant B */
1599 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val);
1600 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
1601
1602 sc->lna[3] = val >> 8; /* channel group 3 */
1603
1604 mtw_srom_read(sc, MTW_EEPROM_LNA, &val);
1605 sc->lna[0] = val & 0xff; /* channel group 0 */
1606 sc->lna[1] = val >> 8; /* channel group 1 */
1607 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]);
1608
1609 /* fix broken 5GHz LNA entries */
1610 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
1611 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1612 "invalid LNA for channel group %d\n", 2);
1613 sc->lna[2] = sc->lna[1];
1614 }
1615 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
1616 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1617 "invalid LNA for channel group %d\n", 3);
1618 sc->lna[3] = sc->lna[1];
1619 }
1620
1621 /* fix broken RSSI offset entries */
1622 for (ant = 0; ant < 3; ant++) {
1623 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
1624 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1625 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1,
1626 sc->rssi_2ghz[ant]);
1627 sc->rssi_2ghz[ant] = 0;
1628 }
1629 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
1630 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1631 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1,
1632 sc->rssi_5ghz[ant]);
1633 sc->rssi_5ghz[ant] = 0;
1634 }
1635 }
1636 return (0);
1637 }
1638 static int
mtw_media_change(if_t ifp)1639 mtw_media_change(if_t ifp)
1640 {
1641 struct ieee80211vap *vap = if_getsoftc(ifp);
1642 struct ieee80211com *ic = vap->iv_ic;
1643 const struct ieee80211_txparam *tp;
1644 struct mtw_softc *sc = ic->ic_softc;
1645 uint8_t rate, ridx;
1646
1647 MTW_LOCK(sc);
1648 ieee80211_media_change(ifp);
1649 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
1650 tp = &vap->iv_txparms[ic->ic_curmode];
1651 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
1652 struct ieee80211_node *ni;
1653 struct mtw_node *rn;
1654 /* XXX TODO: methodize with MCS rates */
1655 rate =
1656 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] &
1657 IEEE80211_RATE_VAL;
1658 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
1659 if (rt2860_rates[ridx].rate == rate)
1660 break;
1661 }
1662 ni = ieee80211_ref_node(vap->iv_bss);
1663 rn = MTW_NODE(ni);
1664 rn->fix_ridx = ridx;
1665
1666 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate,
1667 rn->fix_ridx);
1668 ieee80211_free_node(ni);
1669 }
1670 MTW_UNLOCK(sc);
1671
1672 return (0);
1673 }
1674
1675 void
mtw_set_leds(struct mtw_softc * sc,uint16_t which)1676 mtw_set_leds(struct mtw_softc *sc, uint16_t which)
1677 {
1678 struct mtw_mcu_cmd_8 cmd;
1679 cmd.func = htole32(0x1);
1680 cmd.val = htole32(which);
1681 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8));
1682 }
1683 static void
mtw_abort_tsf_sync(struct mtw_softc * sc)1684 mtw_abort_tsf_sync(struct mtw_softc *sc)
1685 {
1686 uint32_t tmp;
1687
1688 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
1689 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN);
1690 mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
1691 }
1692 static int
mtw_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)1693 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1694 {
1695 const struct ieee80211_txparam *tp;
1696 struct ieee80211com *ic = vap->iv_ic;
1697 struct mtw_softc *sc = ic->ic_softc;
1698 struct mtw_vap *rvp = MTW_VAP(vap);
1699 enum ieee80211_state ostate;
1700 uint32_t sta[3];
1701 uint8_t ratectl = 0;
1702 uint8_t restart_ratectl = 0;
1703 uint8_t bid = 1 << rvp->rvp_id;
1704
1705
1706 ostate = vap->iv_state;
1707 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n",
1708 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
1709 IEEE80211_UNLOCK(ic);
1710 MTW_LOCK(sc);
1711 ratectl = sc->ratectl_run; /* remember current state */
1712 usb_callout_stop(&sc->ratectl_ch);
1713 sc->ratectl_run = MTW_RATECTL_OFF;
1714 if (ostate == IEEE80211_S_RUN) {
1715 /* turn link LED off */
1716 }
1717
1718 switch (nstate) {
1719 case IEEE80211_S_INIT:
1720 restart_ratectl = 1;
1721 if (ostate != IEEE80211_S_RUN)
1722 break;
1723
1724 ratectl &= ~bid;
1725 sc->runbmap &= ~bid;
1726
1727 /* abort TSF synchronization if there is no vap running */
1728 if (--sc->running == 0)
1729 mtw_abort_tsf_sync(sc);
1730 break;
1731
1732 case IEEE80211_S_RUN:
1733 if (!(sc->runbmap & bid)) {
1734 if (sc->running++)
1735 restart_ratectl = 1;
1736 sc->runbmap |= bid;
1737 }
1738
1739 m_freem(rvp->beacon_mbuf);
1740 rvp->beacon_mbuf = NULL;
1741
1742 switch (vap->iv_opmode) {
1743 case IEEE80211_M_HOSTAP:
1744 case IEEE80211_M_MBSS:
1745 sc->ap_running |= bid;
1746 ic->ic_opmode = vap->iv_opmode;
1747 mtw_update_beacon_cb(vap);
1748 break;
1749 case IEEE80211_M_IBSS:
1750 sc->adhoc_running |= bid;
1751 if (!sc->ap_running)
1752 ic->ic_opmode = vap->iv_opmode;
1753 mtw_update_beacon_cb(vap);
1754 break;
1755 case IEEE80211_M_STA:
1756 sc->sta_running |= bid;
1757 if (!sc->ap_running && !sc->adhoc_running)
1758 ic->ic_opmode = vap->iv_opmode;
1759
1760 /* read statistic counters (clear on read) */
1761 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
1762 sizeof sta);
1763
1764 break;
1765 default:
1766 ic->ic_opmode = vap->iv_opmode;
1767 break;
1768 }
1769
1770 if (vap->iv_opmode != IEEE80211_M_MONITOR) {
1771 struct ieee80211_node *ni;
1772
1773 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
1774 MTW_UNLOCK(sc);
1775 IEEE80211_LOCK(ic);
1776 return (-1);
1777 }
1778 mtw_updateslot(ic);
1779 mtw_enable_mrr(sc);
1780 mtw_set_txpreamble(sc);
1781 mtw_set_basicrates(sc);
1782 ni = ieee80211_ref_node(vap->iv_bss);
1783 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
1784 mtw_set_bssid(sc, sc->sc_bssid);
1785 ieee80211_free_node(ni);
1786 mtw_enable_tsf_sync(sc);
1787
1788 /* enable automatic rate adaptation */
1789 tp = &vap->iv_txparms[ieee80211_chan2mode(
1790 ic->ic_curchan)];
1791 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
1792 ratectl |= bid;
1793 } else {
1794 mtw_enable_tsf_sync(sc);
1795 }
1796
1797 break;
1798 default:
1799 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n");
1800 break;
1801 }
1802
1803 /* restart amrr for running VAPs */
1804 if ((sc->ratectl_run = ratectl) && restart_ratectl) {
1805 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
1806 }
1807 MTW_UNLOCK(sc);
1808 IEEE80211_LOCK(ic);
1809 return (rvp->newstate(vap, nstate, arg));
1810 }
1811
1812 static int
mtw_wme_update(struct ieee80211com * ic)1813 mtw_wme_update(struct ieee80211com *ic)
1814 {
1815 struct chanAccParams chp;
1816 struct mtw_softc *sc = ic->ic_softc;
1817 const struct wmeParams *ac;
1818 int aci, error = 0;
1819 ieee80211_wme_ic_getparams(ic, &chp);
1820 ac = chp.cap_wmeParams;
1821
1822 MTW_LOCK(sc);
1823 /* update MAC TX configuration registers */
1824 for (aci = 0; aci < WME_NUM_AC; aci++) {
1825 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci),
1826 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 |
1827 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit);
1828 if (error)
1829 goto err;
1830 }
1831
1832 /* update SCH/DMA registers too */
1833 error = mtw_write(sc, MTW_WMM_AIFSN_CFG,
1834 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 |
1835 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn);
1836 if (error)
1837 goto err;
1838 error = mtw_write(sc, MTW_WMM_CWMIN_CFG,
1839 ac[WME_AC_VO].wmep_logcwmin << 12 |
1840 ac[WME_AC_VI].wmep_logcwmin << 8 |
1841 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin);
1842 if (error)
1843 goto err;
1844 error = mtw_write(sc, MTW_WMM_CWMAX_CFG,
1845 ac[WME_AC_VO].wmep_logcwmax << 12 |
1846 ac[WME_AC_VI].wmep_logcwmax << 8 |
1847 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax);
1848 if (error)
1849 goto err;
1850 error = mtw_write(sc, MTW_WMM_TXOP0_CFG,
1851 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit);
1852 if (error)
1853 goto err;
1854 error = mtw_write(sc, MTW_WMM_TXOP1_CFG,
1855 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit);
1856
1857 err:
1858 MTW_UNLOCK(sc);
1859 if (error)
1860 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n");
1861
1862 return (error);
1863 }
1864
1865 static int
mtw_key_set(struct ieee80211vap * vap,struct ieee80211_key * k)1866 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k)
1867 {
1868 struct ieee80211com *ic = vap->iv_ic;
1869 struct mtw_softc *sc = ic->ic_softc;
1870 uint32_t i;
1871
1872 i = MTW_CMDQ_GET(&sc->cmdq_store);
1873 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
1874 sc->cmdq[i].func = mtw_key_set_cb;
1875 sc->cmdq[i].arg0 = NULL;
1876 sc->cmdq[i].arg1 = vap;
1877 sc->cmdq[i].k = k;
1878 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr);
1879 ieee80211_runtask(ic, &sc->cmdq_task);
1880
1881 /*
1882 * To make sure key will be set when hostapd
1883 * calls iv_key_set() before if_init().
1884 */
1885 if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1886 MTW_LOCK(sc);
1887 sc->cmdq_key_set = MTW_CMDQ_GO;
1888 MTW_UNLOCK(sc);
1889 }
1890
1891 return (1);
1892 }
1893 static void
mtw_key_set_cb(void * arg)1894 mtw_key_set_cb(void *arg)
1895 {
1896 struct mtw_cmdq *cmdq = arg;
1897 struct ieee80211vap *vap = cmdq->arg1;
1898 struct ieee80211_key *k = cmdq->k;
1899 struct ieee80211com *ic = vap->iv_ic;
1900 struct mtw_softc *sc = ic->ic_softc;
1901 struct ieee80211_node *ni;
1902 u_int cipher = k->wk_cipher->ic_cipher;
1903 uint32_t attr;
1904 uint16_t base;
1905 uint8_t mode, wcid, iv[8];
1906 MTW_LOCK_ASSERT(sc, MA_OWNED);
1907
1908 if (vap->iv_opmode == IEEE80211_M_HOSTAP)
1909 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
1910 else
1911 ni = vap->iv_bss;
1912
1913 /* map net80211 cipher to RT2860 security mode */
1914 switch (cipher) {
1915 case IEEE80211_CIPHER_WEP:
1916 if (ieee80211_crypto_get_key_len(k) < 8)
1917 mode = MTW_MODE_WEP40;
1918 else
1919 mode = MTW_MODE_WEP104;
1920 break;
1921 case IEEE80211_CIPHER_TKIP:
1922 mode = MTW_MODE_TKIP;
1923 break;
1924 case IEEE80211_CIPHER_AES_CCM:
1925 mode = MTW_MODE_AES_CCMP;
1926 break;
1927 default:
1928 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n");
1929 return;
1930 }
1931
1932 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1933 wcid = 0; /* NB: update WCID0 for group keys */
1934 base = MTW_SKEY(0, k->wk_keyix);
1935 } else {
1936 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0;
1937 base = MTW_PKEY(wcid);
1938 }
1939
1940 if (cipher == IEEE80211_CIPHER_TKIP) {
1941 /* TODO: note the direct use of tx/rx mic offsets! ew! */
1942 mtw_write_region_1(sc, base,
1943 ieee80211_crypto_get_key_data(k), 16);
1944 /* rxmic */
1945 mtw_write_region_1(sc, base + 16,
1946 ieee80211_crypto_get_key_rxmic_data(k), 8);
1947 /* txmic */
1948 mtw_write_region_1(sc, base + 24,
1949 ieee80211_crypto_get_key_txmic_data(k), 8);
1950 } else {
1951 /* roundup len to 16-bit: XXX fix write_region_1() instead */
1952 mtw_write_region_1(sc, base, k->wk_key,
1953 (ieee80211_crypto_get_key_len(k) + 1) & ~1);
1954 }
1955
1956 if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
1957 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
1958 /* set initial packet number in IV+EIV */
1959 if (cipher == IEEE80211_CIPHER_WEP) {
1960 memset(iv, 0, sizeof iv);
1961 iv[3] = vap->iv_def_txkey << 6;
1962 } else {
1963 if (cipher == IEEE80211_CIPHER_TKIP) {
1964 iv[0] = k->wk_keytsc >> 8;
1965 iv[1] = (iv[0] | 0x20) & 0x7f;
1966 iv[2] = k->wk_keytsc;
1967 } else { //CCMP
1968 iv[0] = k->wk_keytsc;
1969 iv[1] = k->wk_keytsc >> 8;
1970 iv[2] = 0;
1971 }
1972 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
1973 iv[4] = k->wk_keytsc >> 16;
1974 iv[5] = k->wk_keytsc >> 24;
1975 iv[6] = k->wk_keytsc >> 32;
1976 iv[7] = k->wk_keytsc >> 40;
1977 }
1978 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8);
1979 }
1980
1981 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1982 /* install group key */
1983 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
1984 attr &= ~(0xf << (k->wk_keyix * 4));
1985 attr |= mode << (k->wk_keyix * 4);
1986 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
1987
1988 if (cipher & (IEEE80211_CIPHER_WEP)) {
1989 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr);
1990 attr = (attr & ~0xf) | (mode << 1);
1991 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr);
1992
1993 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4);
1994
1995 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
1996 attr = (attr & ~0xf) | (mode << 1);
1997 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
1998 }
1999 } else {
2000 /* install pairwise key */
2001 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2002 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN;
2003 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2004 }
2005 k->wk_pad = wcid;
2006 }
2007
2008 /*
2009 * If wlan is destroyed without being brought down i.e. without
2010 * wlan down or wpa_cli terminate, this function is called after
2011 * vap is gone. Don't refer it.
2012 */
2013 static void
mtw_key_delete_cb(void * arg)2014 mtw_key_delete_cb(void *arg)
2015 {
2016 struct mtw_cmdq *cmdq = arg;
2017 struct mtw_softc *sc = cmdq->arg1;
2018 struct ieee80211_key *k = &cmdq->key;
2019 uint32_t attr;
2020 uint8_t wcid;
2021
2022 MTW_LOCK_ASSERT(sc, MA_OWNED);
2023
2024 if (k->wk_flags & IEEE80211_KEY_GROUP) {
2025 /* remove group key */
2026 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n");
2027 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
2028 attr &= ~(0xf << (k->wk_keyix * 4));
2029 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
2030 } else {
2031 /* remove pairwise key */
2032 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n",
2033 k->wk_pad);
2034 /* matching wcid was written to wk_pad in mtw_key_set() */
2035 wcid = k->wk_pad;
2036 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2037 attr &= ~0xf;
2038 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2039 }
2040
2041 k->wk_pad = 0;
2042 }
2043
2044 /*
2045 * return 0 on error
2046 */
2047 static int
mtw_key_delete(struct ieee80211vap * vap,struct ieee80211_key * k)2048 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
2049 {
2050 struct ieee80211com *ic = vap->iv_ic;
2051 struct mtw_softc *sc = ic->ic_softc;
2052 struct ieee80211_key *k0;
2053 uint32_t i;
2054 if (sc->sc_flags & MTW_RUNNING)
2055 return (1);
2056
2057 /*
2058 * When called back, key might be gone. So, make a copy
2059 * of some values need to delete keys before deferring.
2060 * But, because of LOR with node lock, cannot use lock here.
2061 * So, use atomic instead.
2062 */
2063 i = MTW_CMDQ_GET(&sc->cmdq_store);
2064 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
2065 sc->cmdq[i].func = mtw_key_delete_cb;
2066 sc->cmdq[i].arg0 = NULL;
2067 sc->cmdq[i].arg1 = sc;
2068 k0 = &sc->cmdq[i].key;
2069 k0->wk_flags = k->wk_flags;
2070 k0->wk_keyix = k->wk_keyix;
2071 /* matching wcid was written to wk_pad in mtw_key_set() */
2072 k0->wk_pad = k->wk_pad;
2073 ieee80211_runtask(ic, &sc->cmdq_task);
2074 return (1); /* return fake success */
2075 }
2076
2077 static void
mtw_ratectl_to(void * arg)2078 mtw_ratectl_to(void *arg)
2079 {
2080 struct mtw_softc *sc = arg;
2081 /* do it in a process context, so it can go sleep */
2082 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
2083 /* next timeout will be rescheduled in the callback task */
2084 }
2085
2086 /* ARGSUSED */
2087 static void
mtw_ratectl_cb(void * arg,int pending)2088 mtw_ratectl_cb(void *arg, int pending)
2089 {
2090
2091 struct mtw_softc *sc = arg;
2092 struct ieee80211com *ic = &sc->sc_ic;
2093 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2094
2095 if (vap == NULL)
2096 return;
2097
2098 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc);
2099
2100 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
2101
2102
2103 }
2104
2105 static void
mtw_drain_fifo(void * arg)2106 mtw_drain_fifo(void *arg)
2107 {
2108 struct mtw_softc *sc = arg;
2109 uint32_t stat;
2110 uint16_t(*wstat)[3];
2111 uint8_t wcid, mcs, pid;
2112 int8_t retry;
2113
2114 MTW_LOCK_ASSERT(sc, MA_OWNED);
2115
2116 for (;;) {
2117 /* drain Tx status FIFO (maxsize = 16) */
2118 mtw_read(sc, MTW_TX_STAT_FIFO, &stat);
2119 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat);
2120 if (!(stat & MTW_TXQ_VLD))
2121 break;
2122
2123 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff;
2124
2125 /* if no ACK was requested, no feedback is available */
2126 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX ||
2127 wcid == 0)
2128 continue;
2129
2130 /*
2131 * Even though each stat is Tx-complete-status like format,
2132 * the device can poll stats. Because there is no guarantee
2133 * that the referring node is still around when read the stats.
2134 * So that, if we use ieee80211_ratectl_tx_update(), we will
2135 * have hard time not to refer already freed node.
2136 *
2137 * To eliminate such page faults, we poll stats in softc.
2138 * Then, update the rates later with
2139 * ieee80211_ratectl_tx_update().
2140 */
2141 wstat = &(sc->wcid_stats[wcid]);
2142 (*wstat)[MTW_TXCNT]++;
2143 if (stat & MTW_TXQ_OK)
2144 (*wstat)[MTW_SUCCESS]++;
2145 else
2146 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
2147 /*
2148 * Check if there were retries, ie if the Tx success rate is
2149 * different from the requested rate. Note that it works only
2150 * because we do not allow rate fallback from OFDM to CCK.
2151 */
2152 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f;
2153 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf;
2154 if ((retry = pid - 1 - mcs) > 0) {
2155 (*wstat)[MTW_TXCNT] += retry;
2156 (*wstat)[MTW_RETRY] += retry;
2157 }
2158 }
2159 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt);
2160
2161 sc->fifo_cnt = 0;
2162 }
2163
2164 static void
mtw_iter_func(void * arg,struct ieee80211_node * ni)2165 mtw_iter_func(void *arg, struct ieee80211_node *ni)
2166 {
2167 struct mtw_softc *sc = arg;
2168 MTW_LOCK(sc);
2169 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs;
2170 struct ieee80211vap *vap = ni->ni_vap;
2171 struct mtw_node *rn = MTW_NODE(ni);
2172 uint32_t sta[3];
2173 uint16_t(*wstat)[3];
2174 int error, ridx;
2175 uint8_t txrate = 0;
2176
2177 /* Check for special case */
2178 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
2179 ni != vap->iv_bss)
2180 goto fail;
2181
2182 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE |
2183 IEEE80211_RATECTL_TX_STATS_RETRIES;
2184 txs->ni = ni;
2185 if (sc->rvp_cnt <= 1 &&
2186 (vap->iv_opmode == IEEE80211_M_IBSS ||
2187 vap->iv_opmode == IEEE80211_M_STA)) {
2188 /*
2189 * read statistic counters (clear on read) and update AMRR state
2190 */
2191 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
2192 sizeof sta);
2193 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error);
2194 if (error != 0)
2195 goto fail;
2196
2197 /* count failed TX as errors */
2198 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
2199 le32toh(sta[0]) & 0xffff);
2200
2201 txs->nretries = (le32toh(sta[1]) >> 16);
2202 txs->nsuccess = (le32toh(sta[1]) & 0xffff);
2203 /* nretries??? */
2204 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff);
2205
2206 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2207 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries,
2208 txs->nsuccess, le32toh(sta[0]) & 0xffff);
2209 } else {
2210 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]);
2211
2212 if (wstat == &(sc->wcid_stats[0]) ||
2213 wstat > &(sc->wcid_stats[MTW_WCID_MAX]))
2214 goto fail;
2215
2216 txs->nretries = (*wstat)[MTW_RETRY];
2217 txs->nsuccess = (*wstat)[MTW_SUCCESS];
2218 txs->nframes = (*wstat)[MTW_TXCNT];
2219 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2220 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries,
2221 txs->nframes, txs->nsuccess);
2222
2223 memset(wstat, 0, sizeof(*wstat));
2224 }
2225
2226 ieee80211_ratectl_tx_update(vap, txs);
2227 ieee80211_ratectl_rate(ni, NULL, 0);
2228 txrate = ieee80211_node_get_txrate_dot11rate(ni);
2229
2230 /* XXX TODO: methodize with MCS rates */
2231 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
2232 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n",
2233 txrate);
2234 if (rt2860_rates[ridx].rate == txrate) {
2235 break;
2236 }
2237 }
2238 rn->amrr_ridx = ridx;
2239 fail:
2240 MTW_UNLOCK(sc);
2241
2242 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n",
2243 txrate, rn->amrr_ridx);
2244 }
2245
2246 static void
mtw_newassoc_cb(void * arg)2247 mtw_newassoc_cb(void *arg)
2248 {
2249 struct mtw_cmdq *cmdq = arg;
2250 struct ieee80211_node *ni = cmdq->arg1;
2251 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc;
2252
2253 uint8_t wcid = cmdq->wcid;
2254
2255 MTW_LOCK_ASSERT(sc, MA_OWNED);
2256
2257 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr,
2258 IEEE80211_ADDR_LEN);
2259
2260 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
2261 }
2262
2263 static void
mtw_newassoc(struct ieee80211_node * ni,int isnew)2264 mtw_newassoc(struct ieee80211_node *ni, int isnew)
2265 {
2266
2267 struct mtw_node *mn = MTW_NODE(ni);
2268 struct ieee80211vap *vap = ni->ni_vap;
2269 struct ieee80211com *ic = vap->iv_ic;
2270 struct mtw_softc *sc = ic->ic_softc;
2271
2272 uint8_t rate;
2273 uint8_t ridx;
2274 uint8_t wcid;
2275 //int i;
2276 // int i,j;
2277 wcid = MTW_AID2WCID(ni->ni_associd);
2278
2279 if (wcid > MTW_WCID_MAX) {
2280 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
2281 return;
2282 }
2283
2284 /* only interested in true associations */
2285 if (isnew && ni->ni_associd != 0) {
2286 /*
2287 * This function could is called though timeout function.
2288 * Need to deferggxr.
2289 */
2290
2291 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store);
2292 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt);
2293 sc->cmdq[cnt].func = mtw_newassoc_cb;
2294 sc->cmdq[cnt].arg0 = NULL;
2295 sc->cmdq[cnt].arg1 = ni;
2296 sc->cmdq[cnt].wcid = wcid;
2297 ieee80211_runtask(ic, &sc->cmdq_task);
2298 }
2299
2300 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
2301 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd,
2302 ether_sprintf(ni->ni_macaddr));
2303 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
2304 /* XXX TODO: methodize with MCS rates */
2305 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
2306 if (rt2860_rates[ridx].rate == rate)
2307 break;
2308 mn->mgt_ridx = ridx;
2309 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE,
2310 "rate=%d, ctl_ridx=%d\n", rate, ridx);
2311 MTW_LOCK(sc);
2312 if (sc->ratectl_run != MTW_RATECTL_OFF) {
2313 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc);
2314 }
2315 MTW_UNLOCK(sc);
2316
2317 }
2318
2319 /*
2320 * Return the Rx chain with the highest RSSI for a given frame.
2321 */
2322 static __inline uint8_t
mtw_maxrssi_chain(struct mtw_softc * sc,const struct mtw_rxwi * rxwi)2323 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi)
2324 {
2325 uint8_t rxchain = 0;
2326
2327 if (sc->nrxchains > 1) {
2328 if (rxwi->rssi[1] > rxwi->rssi[rxchain])
2329 rxchain = 1;
2330 if (sc->nrxchains > 2)
2331 if (rxwi->rssi[2] > rxwi->rssi[rxchain])
2332 rxchain = 2;
2333 }
2334 return (rxchain);
2335 }
2336 static void
mtw_get_tsf(struct mtw_softc * sc,uint64_t * buf)2337 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf)
2338 {
2339 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf));
2340 }
2341
2342 static void
mtw_recv_mgmt(struct ieee80211_node * ni,struct mbuf * m,int subtype,const struct ieee80211_rx_stats * rxs,int rssi,int nf)2343 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
2344 const struct ieee80211_rx_stats *rxs, int rssi, int nf)
2345 {
2346 struct ieee80211vap *vap = ni->ni_vap;
2347 struct mtw_softc *sc = vap->iv_ic->ic_softc;
2348 struct mtw_vap *rvp = MTW_VAP(vap);
2349 uint64_t ni_tstamp, rx_tstamp;
2350
2351 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf);
2352
2353 if (vap->iv_state == IEEE80211_S_RUN &&
2354 (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
2355 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
2356 ni_tstamp = le64toh(ni->ni_tstamp.tsf);
2357 MTW_LOCK(sc);
2358 mtw_get_tsf(sc, &rx_tstamp);
2359 MTW_UNLOCK(sc);
2360 rx_tstamp = le64toh(rx_tstamp);
2361
2362 if (ni_tstamp >= rx_tstamp) {
2363 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON,
2364 "ibss merge, tsf %ju tstamp %ju\n",
2365 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
2366 (void)ieee80211_ibss_merge(ni);
2367 }
2368 }
2369 }
2370 static void
mtw_rx_frame(struct mtw_softc * sc,struct mbuf * m,uint32_t dmalen)2371 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen)
2372 {
2373 struct ieee80211com *ic = &sc->sc_ic;
2374 struct ieee80211_frame *wh;
2375 struct ieee80211_node *ni;
2376 struct epoch_tracker et;
2377
2378 struct mtw_rxwi *rxwi;
2379 uint32_t flags;
2380 uint16_t len, rxwisize;
2381 uint8_t ant, rssi;
2382 int8_t nf;
2383
2384 rxwisize = sizeof(struct mtw_rxwi);
2385
2386 if (__predict_false(
2387 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) {
2388 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2389 "payload is too short: dma length %u < %zu\n", dmalen,
2390 rxwisize + sizeof(struct ieee80211_frame_ack));
2391 goto fail;
2392 }
2393
2394 rxwi = mtod(m, struct mtw_rxwi *);
2395 len = le16toh(rxwi->len) & 0xfff;
2396 flags = le32toh(rxwi->flags);
2397 if (__predict_false(len > dmalen - rxwisize)) {
2398 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n",
2399 len, dmalen);
2400 goto fail;
2401 }
2402
2403 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) {
2404 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n",
2405 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV");
2406 goto fail;
2407 }
2408
2409 if (flags & MTW_RX_L2PAD) {
2410 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2411 "received RT2860_RX_L2PAD frame\n");
2412 len += 2;
2413 }
2414
2415 m->m_data += rxwisize;
2416 m->m_pkthdr.len = m->m_len = len;
2417
2418 wh = mtod(m, struct ieee80211_frame *);
2419 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2420 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2421 m->m_flags |= M_WEP;
2422 }
2423
2424 if (len >= sizeof(struct ieee80211_frame_min)) {
2425 ni = ieee80211_find_rxnode(ic,
2426 mtod(m, struct ieee80211_frame_min *));
2427 } else
2428 ni = NULL;
2429
2430 if (ni && ni->ni_flags & IEEE80211_NODE_HT) {
2431 m->m_flags |= M_AMPDU;
2432 }
2433
2434 if (__predict_false(flags & MTW_RX_MICERR)) {
2435 /* report MIC failures to net80211 for TKIP */
2436 if (ni != NULL)
2437 ieee80211_notify_michael_failure(ni->ni_vap, wh,
2438 rxwi->keyidx);
2439 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2440 "MIC error. Someone is lying.\n");
2441 goto fail;
2442 }
2443
2444 ant = mtw_maxrssi_chain(sc, rxwi);
2445 rssi = rxwi->rssi[ant];
2446 nf = mtw_rssi2dbm(sc, rssi, ant);
2447
2448 if (__predict_false(ieee80211_radiotap_active(ic))) {
2449 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap;
2450 uint16_t phy;
2451
2452 tap->wr_flags = 0;
2453 if (flags & MTW_RX_L2PAD)
2454 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2455 tap->wr_antsignal = rssi;
2456 tap->wr_antenna = ant;
2457 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant);
2458 tap->wr_rate = 2; /* in case it can't be found below */
2459 //MTW_LOCK(sc);
2460
2461 // MTW_UNLOCK(sc);
2462 phy = le16toh(rxwi->phy);
2463 switch (phy >> MT7601_PHY_SHIFT) {
2464 case MTW_PHY_CCK:
2465 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) {
2466 case 0:
2467 tap->wr_rate = 2;
2468 break;
2469 case 1:
2470 tap->wr_rate = 4;
2471 break;
2472 case 2:
2473 tap->wr_rate = 11;
2474 break;
2475 case 3:
2476 tap->wr_rate = 22;
2477 break;
2478 }
2479 if (phy & MTW_PHY_SHPRE)
2480 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2481 break;
2482 case MTW_PHY_OFDM:
2483 switch (phy & MTW_PHY_MCS) {
2484 case 0:
2485 tap->wr_rate = 12;
2486 break;
2487 case 1:
2488 tap->wr_rate = 18;
2489 break;
2490 case 2:
2491 tap->wr_rate = 24;
2492 break;
2493 case 3:
2494 tap->wr_rate = 36;
2495 break;
2496 case 4:
2497 tap->wr_rate = 48;
2498 break;
2499 case 5:
2500 tap->wr_rate = 72;
2501 break;
2502 case 6:
2503 tap->wr_rate = 96;
2504 break;
2505 case 7:
2506 tap->wr_rate = 108;
2507 break;
2508 }
2509 break;
2510 }
2511 }
2512
2513 NET_EPOCH_ENTER(et);
2514 if (ni != NULL) {
2515 (void)ieee80211_input(ni, m, rssi, nf);
2516 ieee80211_free_node(ni);
2517 } else {
2518 (void)ieee80211_input_all(ic, m, rssi, nf);
2519 }
2520 NET_EPOCH_EXIT(et);
2521
2522 return;
2523
2524 fail:
2525 m_freem(m);
2526 counter_u64_add(ic->ic_ierrors, 1);
2527 }
2528
2529 static void
mtw_bulk_rx_callback(struct usb_xfer * xfer,usb_error_t error)2530 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
2531 {
2532 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2533 struct ieee80211com *ic = &sc->sc_ic;
2534 struct mbuf *m = NULL;
2535 struct mbuf *m0;
2536 uint32_t dmalen, mbuf_len;
2537 uint16_t rxwisize;
2538 int xferlen;
2539
2540 rxwisize = sizeof(struct mtw_rxwi);
2541
2542 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
2543
2544 switch (USB_GET_STATE(xfer)) {
2545 case USB_ST_TRANSFERRED:
2546 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n",
2547 xferlen);
2548 if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
2549 sizeof(struct mtw_rxd))) {
2550 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2551 "xfer too short %d %d\n", xferlen,
2552 (int)(sizeof(uint32_t) + rxwisize +
2553 sizeof(struct mtw_rxd)));
2554 goto tr_setup;
2555 }
2556
2557 m = sc->rx_m;
2558 sc->rx_m = NULL;
2559
2560 /* FALLTHROUGH */
2561 case USB_ST_SETUP:
2562 tr_setup:
2563
2564 if (sc->rx_m == NULL) {
2565 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2566 MTW_MAX_RXSZ);
2567 }
2568 if (sc->rx_m == NULL) {
2569 MTW_DPRINTF(sc,
2570 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC |
2571 MTW_DEBUG_USB,
2572 "could not allocate mbuf - idle with stall\n");
2573 counter_u64_add(ic->ic_ierrors, 1);
2574 usbd_xfer_set_stall(xfer);
2575 usbd_xfer_set_frames(xfer, 0);
2576 } else {
2577 /*
2578 * Directly loading a mbuf cluster into DMA to
2579 * save some data copying. This works because
2580 * there is only one cluster.
2581 */
2582 usbd_xfer_set_frame_data(xfer, 0,
2583 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ);
2584 usbd_xfer_set_frames(xfer, 1);
2585 }
2586 usbd_transfer_submit(xfer);
2587 break;
2588
2589 default: /* Error */
2590 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2591 "USB transfer error, %s\n", usbd_errstr(error));
2592
2593 if (error != USB_ERR_CANCELLED) {
2594 /* try to clear stall first */
2595 usbd_xfer_set_stall(xfer);
2596 if (error == USB_ERR_TIMEOUT)
2597 device_printf(sc->sc_dev, "device timeout %s\n",
2598 __func__);
2599 counter_u64_add(ic->ic_ierrors, 1);
2600 goto tr_setup;
2601 }
2602 if (sc->rx_m != NULL) {
2603 m_freem(sc->rx_m);
2604 sc->rx_m = NULL;
2605 }
2606 break;
2607 }
2608
2609 if (m == NULL)
2610 return;
2611
2612 /* inputting all the frames must be last */
2613
2614 MTW_UNLOCK(sc);
2615
2616 m->m_pkthdr.len = m->m_len = xferlen;
2617
2618 /* HW can aggregate multiple 802.11 frames in a single USB xfer */
2619 for (;;) {
2620 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
2621
2622 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
2623 ((dmalen & 3) != 0)) {
2624 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2625 "bad DMA length %u\n", dmalen);
2626 break;
2627 }
2628 if ((dmalen + 8) > (uint32_t)xferlen) {
2629 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2630 "bad DMA length %u > %d\n", dmalen + 8, xferlen);
2631 break;
2632 }
2633
2634 /* If it is the last one or a single frame, we won't copy. */
2635 if ((xferlen -= dmalen + 8) <= 8) {
2636 /* trim 32-bit DMA-len header */
2637 m->m_data += 4;
2638 m->m_pkthdr.len = m->m_len -= 4;
2639 mtw_rx_frame(sc, m, dmalen);
2640 m = NULL; /* don't free source buffer */
2641 break;
2642 }
2643
2644 mbuf_len = dmalen + sizeof(struct mtw_rxd);
2645 if (__predict_false(mbuf_len > MCLBYTES)) {
2646 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2647 "payload is too big: mbuf_len %u\n", mbuf_len);
2648 counter_u64_add(ic->ic_ierrors, 1);
2649 break;
2650 }
2651
2652 /* copy aggregated frames to another mbuf */
2653 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2654 if (__predict_false(m0 == NULL)) {
2655 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC,
2656 "could not allocate mbuf\n");
2657 counter_u64_add(ic->ic_ierrors, 1);
2658 break;
2659 }
2660 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len,
2661 mtod(m0, caddr_t));
2662 m0->m_pkthdr.len = m0->m_len = mbuf_len;
2663 mtw_rx_frame(sc, m0, dmalen);
2664
2665 /* update data ptr */
2666 m->m_data += mbuf_len + 4;
2667 m->m_pkthdr.len = m->m_len -= mbuf_len + 4;
2668 }
2669
2670 /* make sure we free the source buffer, if any */
2671 m_freem(m);
2672
2673 #ifdef IEEE80211_SUPPORT_SUPERG
2674 ieee80211_ff_age_all(ic, 100);
2675 #endif
2676 MTW_LOCK(sc);
2677 }
2678
2679 static void
mtw_tx_free(struct mtw_endpoint_queue * pq,struct mtw_tx_data * data,int txerr)2680 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr)
2681 {
2682
2683 ieee80211_tx_complete(data->ni, data->m, txerr);
2684 data->m = NULL;
2685 data->ni = NULL;
2686
2687 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
2688 pq->tx_nfree++;
2689 }
2690 static void
mtw_bulk_tx_callbackN(struct usb_xfer * xfer,usb_error_t error,u_int index)2691 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
2692 {
2693 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2694 struct ieee80211com *ic = &sc->sc_ic;
2695 struct mtw_tx_data *data;
2696 struct ieee80211vap *vap = NULL;
2697 struct usb_page_cache *pc;
2698 struct mtw_endpoint_queue *pq = &sc->sc_epq[index];
2699 struct mbuf *m;
2700 usb_frlength_t size;
2701 int actlen;
2702 int sumlen;
2703 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
2704
2705 switch (USB_GET_STATE(xfer)) {
2706 case USB_ST_TRANSFERRED:
2707 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2708 "transfer complete: %d bytes @ index %d\n", actlen, index);
2709
2710 data = usbd_xfer_get_priv(xfer);
2711 mtw_tx_free(pq, data, 0);
2712 usbd_xfer_set_priv(xfer, NULL);
2713
2714 /* FALLTHROUGH */
2715 case USB_ST_SETUP:
2716 tr_setup:
2717 data = STAILQ_FIRST(&pq->tx_qh);
2718 if (data == NULL)
2719 break;
2720
2721 STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
2722
2723 m = data->m;
2724
2725 size = sizeof(data->desc);
2726 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) {
2727 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB,
2728 "data overflow, %u bytes\n", m->m_pkthdr.len);
2729 mtw_tx_free(pq, data, 1);
2730 goto tr_setup;
2731 }
2732
2733 pc = usbd_xfer_get_frame(xfer, 0);
2734 usbd_copy_in(pc, 0, &data->desc, size);
2735 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
2736 size += m->m_pkthdr.len;
2737 /*
2738 * Align end on a 4-byte boundary, pad 8 bytes (CRC +
2739 * 4-byte padding), and be sure to zero those trailing
2740 * bytes:
2741 */
2742 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD);
2743 size += ((-size) & 3) + MTW_DMA_PAD;
2744
2745 vap = data->ni->ni_vap;
2746 if (ieee80211_radiotap_active_vap(vap)) {
2747 const struct ieee80211_frame *wh;
2748 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap;
2749 struct mtw_txwi *txwi =
2750 (struct mtw_txwi *)(&data->desc +
2751 sizeof(struct mtw_txd));
2752 int has_l2pad;
2753
2754 wh = mtod(m, struct ieee80211_frame *);
2755 has_l2pad = IEEE80211_HAS_ADDR4(wh) !=
2756 IEEE80211_QOS_HAS_SEQ(wh);
2757
2758 tap->wt_flags = 0;
2759 tap->wt_rate = rt2860_rates[data->ridx].rate;
2760 tap->wt_hwqueue = index;
2761 if (le16toh(txwi->phy) & MTW_PHY_SHPRE)
2762 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2763 if (has_l2pad)
2764 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2765
2766 ieee80211_radiotap_tx(vap, m);
2767 }
2768
2769 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2770 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len,
2771 size, index);
2772
2773 usbd_xfer_set_frame_len(xfer, 0, size);
2774 usbd_xfer_set_priv(xfer, data);
2775 usbd_transfer_submit(xfer);
2776 mtw_start(sc);
2777
2778 break;
2779
2780 default:
2781 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2782 "USB transfer error, %s\n", usbd_errstr(error));
2783
2784 data = usbd_xfer_get_priv(xfer);
2785
2786 if (data != NULL) {
2787 if (data->ni != NULL)
2788 vap = data->ni->ni_vap;
2789 mtw_tx_free(pq, data, error);
2790 usbd_xfer_set_priv(xfer, NULL);
2791 }
2792
2793 if (vap == NULL)
2794 vap = TAILQ_FIRST(&ic->ic_vaps);
2795
2796 if (error != USB_ERR_CANCELLED) {
2797 if (error == USB_ERR_TIMEOUT) {
2798 device_printf(sc->sc_dev, "device timeout %s\n",
2799 __func__);
2800 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
2801 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2802 "cmdq_store=%d\n", i);
2803 sc->cmdq[i].func = mtw_usb_timeout_cb;
2804 sc->cmdq[i].arg0 = vap;
2805 ieee80211_runtask(ic, &sc->cmdq_task);
2806 }
2807
2808 /*
2809 * Try to clear stall first, also if other
2810 * errors occur, hence clearing stall
2811 * introduces a 50 ms delay:
2812 */
2813 usbd_xfer_set_stall(xfer);
2814 goto tr_setup;
2815 }
2816 break;
2817 }
2818 #ifdef IEEE80211_SUPPORT_SUPERG
2819 /* XXX TODO: make this deferred rather than unlock/relock */
2820 /* XXX TODO: should only do the QoS AC this belongs to */
2821 if (pq->tx_nfree >= MTW_TX_RING_COUNT) {
2822 MTW_UNLOCK(sc);
2823 ieee80211_ff_flush_all(ic);
2824 MTW_LOCK(sc);
2825 }
2826 #endif
2827 }
2828
2829 static void
mtw_fw_callback(struct usb_xfer * xfer,usb_error_t error)2830 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
2831 {
2832 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2833
2834 int actlen;
2835 int ntries, tmp;
2836 // struct mtw_txd *data;
2837
2838 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
2839 // data = usbd_xfer_get_priv(xfer);
2840 usbd_xfer_set_priv(xfer, NULL);
2841 switch (USB_GET_STATE(xfer)) {
2842
2843 case USB_ST_TRANSFERRED:
2844 sc->sc_sent += actlen;
2845 memset(sc->txd_fw[sc->sc_idx], 0, actlen);
2846
2847 if (actlen < 0x2c44 && sc->sc_idx == 0) {
2848 return;
2849 }
2850 if (sc->sc_idx == 3) {
2851
2852 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1,
2853 MTW_MCU_IVB_LEN)) != 0) {
2854 device_printf(sc->sc_dev,
2855 "Could not write ivb error: %d\n", error);
2856 }
2857
2858 mtw_delay(sc, 10);
2859 for (ntries = 0; ntries < 100; ntries++) {
2860 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
2861 &tmp)) != 0) {
2862 device_printf(sc->sc_dev,
2863 "Could not read cfg error: %d\n", error);
2864
2865 }
2866 if (tmp == MTW_MCU_READY) {
2867 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE,
2868 "mcu reaady %d\n", tmp);
2869 sc->fwloading = 1;
2870 break;
2871 }
2872
2873 mtw_delay(sc, 10);
2874 }
2875 if (ntries == 100)
2876 sc->fwloading = 0;
2877 wakeup(&sc->fwloading);
2878 return;
2879 }
2880
2881 if (actlen == 0x2c44) {
2882 sc->sc_idx++;
2883 DELAY(1000);
2884 }
2885
2886 case USB_ST_SETUP: {
2887 int dlen = 0;
2888 dlen = sc->txd_fw[sc->sc_idx]->len;
2889
2890 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent);
2891 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16));
2892
2893 usbd_xfer_set_frame_len(xfer, 0, dlen);
2894 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen);
2895
2896 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]);
2897 usbd_transfer_submit(xfer);
2898 break;
2899
2900 default: /* Error */
2901 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__,
2902 usbd_errstr(error));
2903 sc->fwloading = 0;
2904 wakeup(&sc->fwloading);
2905 /*
2906 * Print error message and clear stall
2907 * for example.
2908 */
2909 break;
2910 }
2911 /*
2912 * Here it is safe to do something without the private
2913 * USB mutex locked.
2914 */
2915 }
2916 return;
2917 }
2918 static void
mtw_bulk_tx_callback0(struct usb_xfer * xfer,usb_error_t error)2919 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
2920 {
2921 mtw_bulk_tx_callbackN(xfer, error, 0);
2922 }
2923
2924 static void
mtw_bulk_tx_callback1(struct usb_xfer * xfer,usb_error_t error)2925 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
2926 {
2927
2928
2929 mtw_bulk_tx_callbackN(xfer, error, 1);
2930 }
2931
2932 static void
mtw_bulk_tx_callback2(struct usb_xfer * xfer,usb_error_t error)2933 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
2934 {
2935 mtw_bulk_tx_callbackN(xfer, error, 2);
2936 }
2937
2938 static void
mtw_bulk_tx_callback3(struct usb_xfer * xfer,usb_error_t error)2939 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
2940 {
2941 mtw_bulk_tx_callbackN(xfer, error, 3);
2942 }
2943
2944 static void
mtw_bulk_tx_callback4(struct usb_xfer * xfer,usb_error_t error)2945 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
2946 {
2947 mtw_bulk_tx_callbackN(xfer, error, 4);
2948 }
2949
2950 static void
mtw_bulk_tx_callback5(struct usb_xfer * xfer,usb_error_t error)2951 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
2952 {
2953 mtw_bulk_tx_callbackN(xfer, error, 5);
2954 }
2955
2956 static void
mtw_set_tx_desc(struct mtw_softc * sc,struct mtw_tx_data * data)2957 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data)
2958 {
2959 struct mbuf *m = data->m;
2960 struct ieee80211com *ic = &sc->sc_ic;
2961 struct ieee80211vap *vap = data->ni->ni_vap;
2962 struct ieee80211_frame *wh;
2963 struct mtw_txd *txd;
2964 struct mtw_txwi *txwi;
2965 uint16_t xferlen, txwisize;
2966 uint16_t mcs;
2967 uint8_t ridx = data->ridx;
2968 uint8_t pad;
2969
2970 /* get MCS code from rate index */
2971 mcs = rt2860_rates[ridx].mcs;
2972
2973 txwisize = sizeof(*txwi);
2974 xferlen = txwisize + m->m_pkthdr.len;
2975
2976 /* roundup to 32-bit alignment */
2977 xferlen = (xferlen + 3) & ~3;
2978
2979 txd = (struct mtw_txd *)&data->desc;
2980 txd->len = htole16(xferlen);
2981
2982 wh = mtod(m, struct ieee80211_frame *);
2983
2984 /*
2985 * Ether both are true or both are false, the header
2986 * are nicely aligned to 32-bit. So, no L2 padding.
2987 */
2988 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
2989 pad = 0;
2990 else
2991 pad = 2;
2992
2993 /* setup TX Wireless Information */
2994 txwi = (struct mtw_txwi *)(txd + 1);
2995 txwi->len = htole16(m->m_pkthdr.len - pad);
2996 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
2997 mcs |= MTW_PHY_CCK;
2998 if (ridx != MTW_RIDX_CCK1 &&
2999 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
3000 mcs |= MTW_PHY_SHPRE;
3001 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) {
3002 mcs |= MTW_PHY_OFDM;
3003 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) {
3004 /* XXX TODO: [adrian] set short preamble for MCS? */
3005 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */
3006 }
3007 txwi->phy = htole16(mcs);
3008
3009 /* check if RTS/CTS or CTS-to-self protection is required */
3010 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3011 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) ||
3012 ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3013 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) ||
3014 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
3015 rt2860_rates[ridx].phy == IEEE80211_T_HT)))
3016 txwi->txop |= MTW_TX_TXOP_HT;
3017 else
3018 txwi->txop |= MTW_TX_TXOP_BACKOFF;
3019
3020 }
3021
3022 /* This function must be called locked */
3023 static int
mtw_tx(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3024 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3025 {
3026 struct ieee80211com *ic = &sc->sc_ic;
3027 struct ieee80211vap *vap = ni->ni_vap;
3028 struct ieee80211_frame *wh;
3029
3030
3031 //const struct ieee80211_txparam *tp = ni->ni_txparms;
3032 struct mtw_node *rn = MTW_NODE(ni);
3033 struct mtw_tx_data *data;
3034 struct mtw_txd *txd;
3035 struct mtw_txwi *txwi;
3036 uint16_t qos;
3037 uint16_t dur;
3038 uint16_t qid;
3039 uint8_t type;
3040 uint8_t tid;
3041 uint16_t ridx;
3042 uint8_t ctl_ridx;
3043 uint16_t qflags;
3044 uint8_t xflags = 0;
3045
3046 int hasqos;
3047
3048 MTW_LOCK_ASSERT(sc, MA_OWNED);
3049
3050 wh = mtod(m, struct ieee80211_frame *);
3051 const struct ieee80211_txparam *tp = ni->ni_txparms;
3052 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3053
3054 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 |
3055 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA);
3056
3057 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
3058 uint8_t *frm;
3059 frm = ieee80211_getqos(wh);
3060
3061
3062 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm);
3063 qos = le16toh(*(const uint16_t *)frm);
3064 tid = ieee80211_gettid(wh);
3065 qid = TID_TO_WME_AC(tid);
3066 qflags |= MTW_TXD_QSEL_EDCA;
3067 } else {
3068 qos = 0;
3069 tid = 0;
3070 qid = WME_AC_BE;
3071 }
3072 if (type & IEEE80211_FC0_TYPE_MGT) {
3073 qid = 0;
3074 }
3075
3076 if (type != IEEE80211_FC0_TYPE_DATA)
3077 qflags |= htole16(MTW_TXD_WIV);
3078
3079 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3080 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
3081 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */
3082 ridx = (ic->ic_curmode == IEEE80211_MODE_11A
3083 || ic->ic_curmode == IEEE80211_MODE_11NA) ?
3084 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1;
3085 if (type == IEEE80211_MODE_11NG) {
3086 ridx = 12;
3087 }
3088 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3089 } else {
3090 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3091 ridx = rn->fix_ridx;
3092
3093 } else {
3094 ridx = rn->amrr_ridx;
3095 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3096 }
3097 }
3098
3099 if (hasqos)
3100 xflags = 0;
3101 else
3102 xflags = MTW_TX_NSEQ;
3103
3104 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3105 (!hasqos ||
3106 (qos & IEEE80211_QOS_ACKPOLICY) !=
3107 IEEE80211_QOS_ACKPOLICY_NOACK)) {
3108 xflags |= MTW_TX_ACK;
3109 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3110 dur = rt2860_rates[ctl_ridx].sp_ack_dur;
3111 else
3112 dur = rt2860_rates[ctl_ridx].lp_ack_dur;
3113 USETW(wh->i_dur, dur);
3114 }
3115 /* reserve slots for mgmt packets, just in case */
3116 if (sc->sc_epq[qid].tx_nfree < 3) {
3117 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid);
3118 return (-1);
3119 }
3120
3121 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
3122 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
3123 sc->sc_epq[qid].tx_nfree--;
3124
3125 txd = (struct mtw_txd *)&data->desc;
3126 txd->flags = qflags;
3127
3128 txwi = (struct mtw_txwi *)(txd + 1);
3129 txwi->xflags = xflags;
3130 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ?
3131
3132 MTW_AID2WCID(ni->ni_associd) :
3133 0xff;
3134
3135 /* clear leftover garbage bits */
3136 txwi->flags = 0;
3137 txwi->txop = 0;
3138
3139 data->m = m;
3140 data->ni = ni;
3141 data->ridx = ridx;
3142
3143 ieee80211_output_seqno_assign(ni, -1, m);
3144
3145 mtw_set_tx_desc(sc, data);
3146
3147 /*
3148 * The chip keeps track of 2 kind of Tx stats,
3149 * * TX_STAT_FIFO, for per WCID stats, and
3150 * * TX_STA_CNT0 for all-TX-in-one stats.
3151 *
3152 * To use FIFO stats, we need to store MCS into the driver-private
3153 * PacketID field. So that, we can tell whose stats when we read them.
3154 * We add 1 to the MCS because setting the PacketID field to 0 means
3155 * that we don't want feedback in TX_STAT_FIFO.
3156 * And, that's what we want for STA mode, since TX_STA_CNT0 does the
3157 * job.
3158 *
3159 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in
3160 * run_tx().
3161 */
3162
3163 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
3164 vap->iv_opmode == IEEE80211_M_MBSS) {
3165
3166 /*
3167 * Unlike PCI based devices, we don't get any interrupt from
3168 * USB devices, so we simulate FIFO-is-full interrupt here.
3169 * Ralink recommends to drain FIFO stats every 100 ms, but 16
3170 * slots quickly get fulled. To prevent overflow, increment a
3171 * counter on every FIFO stat request, so we know how many slots
3172 * are left. We do this only in HOSTAP or multiple vap mode
3173 * since FIFO stats are used only in those modes. We just drain
3174 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via
3175 * callout. Call it early. Otherwise overflow.
3176 */
3177 if (sc->fifo_cnt++ == 10) {
3178 /*
3179 * With multiple vaps or if_bridge, if_start() is called
3180 * with a non-sleepable lock, tcpinp. So, need to defer.
3181 */
3182 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
3183 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i);
3184 sc->cmdq[i].func = mtw_drain_fifo;
3185 sc->cmdq[i].arg0 = sc;
3186 ieee80211_runtask(ic, &sc->cmdq_task);
3187 }
3188 }
3189
3190 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
3191 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]);
3192
3193 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3194 "sending data frame len=%d rate=%d qid=%d\n",
3195 m->m_pkthdr.len +
3196 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3197 rt2860_rates[ridx].rate, qid);
3198
3199 return (0);
3200 }
3201
3202 static int
mtw_tx_mgt(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3203 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3204 {
3205 struct ieee80211com *ic = &sc->sc_ic;
3206 struct mtw_node *rn = MTW_NODE(ni);
3207 struct mtw_tx_data *data;
3208 struct ieee80211_frame *wh;
3209 struct mtw_txd *txd;
3210 struct mtw_txwi *txwi;
3211 uint8_t type;
3212 uint16_t dur;
3213 uint8_t ridx = rn->mgt_ridx;
3214 uint8_t xflags = 0;
3215 uint8_t wflags = 0;
3216
3217 MTW_LOCK_ASSERT(sc, MA_OWNED);
3218
3219 wh = mtod(m, struct ieee80211_frame *);
3220
3221 /* tell hardware to add timestamp for probe responses */
3222 if ((wh->i_fc[0] &
3223 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3224 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
3225 wflags |= MTW_TX_TS;
3226 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3227 xflags |= MTW_TX_ACK;
3228
3229 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
3230 ic->ic_flags & IEEE80211_F_SHPREAMBLE);
3231 USETW(wh->i_dur, dur);
3232 }
3233 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3234 if (sc->sc_epq[0].tx_nfree == 0)
3235 /* let caller free mbuf */
3236 return (EIO);
3237 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3238 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3239 sc->sc_epq[0].tx_nfree--;
3240
3241 txd = (struct mtw_txd *)&data->desc;
3242 txd->flags = htole16(
3243 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3244 if (type != IEEE80211_FC0_TYPE_DATA)
3245 txd->flags |= htole16(MTW_TXD_WIV);
3246
3247 txwi = (struct mtw_txwi *)(txd + 1);
3248 txwi->wcid = 0xff;
3249 txwi->xflags = xflags;
3250 txwi->flags = wflags;
3251
3252 txwi->txop = 0; /* clear leftover garbage bits */
3253
3254 data->m = m;
3255 data->ni = ni;
3256 data->ridx = ridx;
3257
3258 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
3259 m->m_pkthdr.len +
3260 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3261 rt2860_rates[ridx].rate);
3262
3263 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3264
3265 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]);
3266
3267 return (0);
3268 }
3269
3270 static int
mtw_sendprot(struct mtw_softc * sc,const struct mbuf * m,struct ieee80211_node * ni,int prot,int rate)3271 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m,
3272 struct ieee80211_node *ni, int prot, int rate)
3273 {
3274 struct ieee80211com *ic = ni->ni_ic;
3275 struct mtw_tx_data *data;
3276 struct mtw_txd *txd;
3277 struct mtw_txwi *txwi;
3278 struct mbuf *mprot;
3279 int ridx;
3280 int protrate;
3281 uint8_t wflags = 0;
3282 uint8_t xflags = 0;
3283
3284 MTW_LOCK_ASSERT(sc, MA_OWNED);
3285
3286 /* check that there are free slots before allocating the mbuf */
3287 if (sc->sc_epq[0].tx_nfree == 0)
3288 /* let caller free mbuf */
3289 return (ENOBUFS);
3290
3291 mprot = ieee80211_alloc_prot(ni, m, rate, prot);
3292 if (mprot == NULL) {
3293 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3294 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n");
3295 return (ENOBUFS);
3296 }
3297
3298 protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
3299 wflags = MTW_TX_FRAG;
3300 xflags = 0;
3301 if (prot == IEEE80211_PROT_RTSCTS)
3302 xflags |= MTW_TX_ACK;
3303
3304 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3305 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3306 sc->sc_epq[0].tx_nfree--;
3307
3308 txd = (struct mtw_txd *)&data->desc;
3309 txd->flags = RT2860_TX_QSEL_EDCA;
3310 txwi = (struct mtw_txwi *)(txd + 1);
3311 txwi->wcid = 0xff;
3312 txwi->flags = wflags;
3313 txwi->xflags = xflags;
3314 txwi->txop = 0; /* clear leftover garbage bits */
3315
3316 data->m = mprot;
3317 data->ni = ieee80211_ref_node(ni);
3318
3319 /* XXX TODO: methodize with MCS rates */
3320 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3321 if (rt2860_rates[ridx].rate == protrate)
3322 break;
3323 data->ridx = ridx;
3324
3325 mtw_set_tx_desc(sc, data);
3326 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n",
3327 m->m_pkthdr.len, rate);
3328
3329 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3330
3331 usbd_transfer_start(sc->sc_xfer[0]);
3332
3333 return (0);
3334 }
3335
3336 static int
mtw_tx_param(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni,const struct ieee80211_bpf_params * params)3337 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3338 const struct ieee80211_bpf_params *params)
3339 {
3340 struct ieee80211com *ic = ni->ni_ic;
3341 struct mtw_tx_data *data;
3342 struct mtw_txd *txd;
3343 struct mtw_txwi *txwi;
3344 uint8_t ridx;
3345 uint8_t rate;
3346 uint8_t opflags = 0;
3347 uint8_t xflags = 0;
3348 int error;
3349
3350 MTW_LOCK_ASSERT(sc, MA_OWNED);
3351
3352 KASSERT(params != NULL, ("no raw xmit params"));
3353
3354 rate = params->ibp_rate0;
3355 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3356 /* let caller free mbuf */
3357 return (EINVAL);
3358 }
3359
3360 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3361 xflags |= MTW_TX_ACK;
3362 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) {
3363 error = mtw_sendprot(sc, m, ni,
3364 params->ibp_flags & IEEE80211_BPF_RTS ?
3365 IEEE80211_PROT_RTSCTS :
3366 IEEE80211_PROT_CTSONLY,
3367 rate);
3368 if (error) {
3369 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__,
3370 __LINE__, error);
3371 return (error);
3372 }
3373 opflags |= MTW_TX_TXOP_SIFS;
3374 }
3375
3376 if (sc->sc_epq[0].tx_nfree == 0) {
3377 /* let caller free mbuf */
3378 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3379 "sending raw frame, but tx ring is full\n");
3380 return (EIO);
3381 }
3382 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3383 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3384 sc->sc_epq[0].tx_nfree--;
3385
3386 txd = (struct mtw_txd *)&data->desc;
3387 txd->flags = htole16(
3388 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3389 // txd->flags = htole16(MTW_TXD_QSEL_EDCA);
3390 txwi = (struct mtw_txwi *)(txd + 1);
3391 txwi->wcid = 0xff;
3392 txwi->xflags = xflags;
3393 txwi->txop = opflags;
3394 txwi->flags = 0; /* clear leftover garbage bits */
3395
3396 data->m = m;
3397 data->ni = ni;
3398 /* XXX TODO: methodize with MCS rates */
3399 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3400 if (rt2860_rates[ridx].rate == rate)
3401 break;
3402 data->ridx = ridx;
3403
3404 ieee80211_output_seqno_assign(ni, -1, m);
3405
3406 mtw_set_tx_desc(sc, data);
3407
3408 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
3409 m->m_pkthdr.len, rate);
3410
3411 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3412
3413 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]);
3414
3415 return (0);
3416 }
3417
3418 static int
mtw_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3419 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3420 const struct ieee80211_bpf_params *params)
3421 {
3422 struct mtw_softc *sc = ni->ni_ic->ic_softc;
3423 int error = 0;
3424 MTW_LOCK(sc);
3425 /* prevent management frames from being sent if we're not ready */
3426 if (!(sc->sc_flags & MTW_RUNNING)) {
3427 error = ENETDOWN;
3428 goto done;
3429 }
3430
3431 if (params == NULL) {
3432 /* tx mgt packet */
3433 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) {
3434 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n");
3435 goto done;
3436 }
3437 } else {
3438 /* tx raw packet with param */
3439 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) {
3440 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3441 "tx with param failed\n");
3442 goto done;
3443 }
3444 }
3445
3446 done:
3447
3448 MTW_UNLOCK(sc);
3449
3450 if (error != 0) {
3451 if (m != NULL)
3452 m_freem(m);
3453 }
3454
3455 return (error);
3456 }
3457
3458 static int
mtw_transmit(struct ieee80211com * ic,struct mbuf * m)3459 mtw_transmit(struct ieee80211com *ic, struct mbuf *m)
3460 {
3461 struct mtw_softc *sc = ic->ic_softc;
3462 int error;
3463 MTW_LOCK(sc);
3464 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3465 MTW_UNLOCK(sc);
3466 return (ENXIO);
3467 }
3468 error = mbufq_enqueue(&sc->sc_snd, m);
3469 if (error) {
3470 MTW_UNLOCK(sc);
3471 return (error);
3472 }
3473 mtw_start(sc);
3474 MTW_UNLOCK(sc);
3475
3476 return (0);
3477 }
3478
3479 static void
mtw_start(struct mtw_softc * sc)3480 mtw_start(struct mtw_softc *sc)
3481 {
3482 struct ieee80211_node *ni;
3483 struct mbuf *m;
3484
3485 MTW_LOCK_ASSERT(sc, MA_OWNED);
3486
3487 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3488
3489 return;
3490 }
3491 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3492 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3493 if (mtw_tx(sc, m, ni) != 0) {
3494 mbufq_prepend(&sc->sc_snd, m);
3495 break;
3496 }
3497 }
3498 }
3499
3500 static void
mtw_parent(struct ieee80211com * ic)3501 mtw_parent(struct ieee80211com *ic)
3502 {
3503
3504 struct mtw_softc *sc = ic->ic_softc;
3505
3506 MTW_LOCK(sc);
3507 if (sc->sc_detached) {
3508 MTW_UNLOCK(sc);
3509 return;
3510 }
3511
3512 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3513 mtw_init_locked(sc);
3514 MTW_UNLOCK(sc);
3515 ieee80211_start_all(ic);
3516 return;
3517 }
3518 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3519 mtw_update_promisc_locked(sc);
3520 MTW_UNLOCK(sc);
3521 return;
3522 }
3523 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 &&
3524 ic->ic_nrunning == 0) {
3525 mtw_stop(sc);
3526 MTW_UNLOCK(sc);
3527 return;
3528 }
3529 return;
3530 }
3531
3532 static void
mt7601_set_agc(struct mtw_softc * sc,uint8_t agc)3533 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc)
3534 {
3535 uint8_t bbp;
3536
3537 mtw_bbp_write(sc, 66, agc);
3538 mtw_bbp_write(sc, 195, 0x87);
3539 bbp = (agc & 0xf0) | 0x08;
3540 mtw_bbp_write(sc, 196, bbp);
3541 }
3542
3543 static int
mtw_mcu_calibrate(struct mtw_softc * sc,int func,uint32_t val)3544 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val)
3545 {
3546 struct mtw_mcu_cmd_8 cmd;
3547
3548 cmd.func = htole32(func);
3549 cmd.val = htole32(val);
3550 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8)));
3551 }
3552
3553 static int
mtw_rf_write(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t val)3554 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val)
3555 {
3556 uint32_t tmp;
3557 int error, ntries, shift;
3558
3559 for (ntries = 0; ntries < 10; ntries++) {
3560 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3561 return (error);
3562 if (!(tmp & MTW_RF_CSR_KICK))
3563 break;
3564 }
3565 if (ntries == 10)
3566 return (ETIMEDOUT);
3567
3568 if (sc->asic_ver == 0x7601)
3569 shift = MT7601_BANK_SHIFT;
3570 else
3571 shift = MT7610_BANK_SHIFT;
3572
3573 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift |
3574 reg << 8 | val;
3575 return (mtw_write(sc, MTW_RF_CSR, tmp));
3576 }
3577
3578 void
mtw_select_chan_group(struct mtw_softc * sc,int group)3579 mtw_select_chan_group(struct mtw_softc *sc, int group)
3580 {
3581 uint32_t tmp;
3582 uint8_t bbp;
3583
3584 /* Tx band 20MHz 2G */
3585 mtw_read(sc, MTW_TX_BAND_CFG, &tmp);
3586 tmp &= ~(
3587 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M);
3588 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G;
3589 mtw_write(sc, MTW_TX_BAND_CFG, tmp);
3590
3591 /* select 20 MHz bandwidth */
3592 mtw_bbp_read(sc, 4, &bbp);
3593 bbp &= ~0x18;
3594 bbp |= 0x40;
3595 mtw_bbp_write(sc, 4, bbp);
3596
3597 /* calibrate BBP */
3598 mtw_bbp_write(sc, 69, 0x12);
3599 mtw_bbp_write(sc, 91, 0x07);
3600 mtw_bbp_write(sc, 195, 0x23);
3601 mtw_bbp_write(sc, 196, 0x17);
3602 mtw_bbp_write(sc, 195, 0x24);
3603 mtw_bbp_write(sc, 196, 0x06);
3604 mtw_bbp_write(sc, 195, 0x81);
3605 mtw_bbp_write(sc, 196, 0x12);
3606 mtw_bbp_write(sc, 195, 0x83);
3607 mtw_bbp_write(sc, 196, 0x17);
3608 mtw_rf_write(sc, 5, 8, 0x00);
3609 // mtw_mcu_calibrate(sc, 0x6, 0x10001);
3610
3611 /* set initial AGC value */
3612 mt7601_set_agc(sc, 0x14);
3613 }
3614
3615 static int
mtw_rf_read(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t * val)3616 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val)
3617 {
3618 uint32_t tmp;
3619 int error, ntries, shift;
3620
3621 for (ntries = 0; ntries < 100; ntries++) {
3622 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3623 return (error);
3624 if (!(tmp & MTW_RF_CSR_KICK))
3625 break;
3626 }
3627 if (ntries == 100)
3628 return (ETIMEDOUT);
3629
3630 if (sc->asic_ver == 0x7601)
3631 shift = MT7601_BANK_SHIFT;
3632 else
3633 shift = MT7610_BANK_SHIFT;
3634
3635 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8;
3636 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0)
3637 return (error);
3638
3639 for (ntries = 0; ntries < 100; ntries++) {
3640 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3641 return (error);
3642 if (!(tmp & MTW_RF_CSR_KICK))
3643 break;
3644 }
3645 if (ntries == 100)
3646 return (ETIMEDOUT);
3647
3648 *val = tmp & 0xff;
3649 return (0);
3650 }
3651 static void
mt7601_set_chan(struct mtw_softc * sc,u_int chan)3652 mt7601_set_chan(struct mtw_softc *sc, u_int chan)
3653 {
3654 uint32_t tmp;
3655 uint8_t bbp, rf, txpow1;
3656 int i;
3657 /* find the settings for this channel */
3658 for (i = 0; mt7601_rf_chan[i].chan != chan; i++)
3659 ;
3660
3661 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17);
3662 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18);
3663 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19);
3664 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20);
3665
3666 /* use Tx power values from EEPROM */
3667 txpow1 = sc->txpow1[i];
3668
3669 /* Tx automatic level control */
3670 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp);
3671 tmp &= ~0x3f3f;
3672 tmp |= (txpow1 & 0x3f);
3673 mtw_write(sc, MTW_TX_ALC_CFG0, tmp);
3674
3675 /* LNA */
3676 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]);
3677 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]);
3678 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]);
3679
3680 /* VCO calibration */
3681 mtw_rf_write(sc, 0, 4, 0x0a);
3682 mtw_rf_write(sc, 0, 5, 0x20);
3683 mtw_rf_read(sc, 0, 4, &rf);
3684 mtw_rf_write(sc, 0, 4, rf | 0x80);
3685
3686 /* select 20 MHz bandwidth */
3687 mtw_bbp_read(sc, 4, &bbp);
3688 bbp &= ~0x18;
3689 bbp |= 0x40;
3690 mtw_bbp_write(sc, 4, bbp);
3691 mtw_bbp_write(sc, 178, 0xff);
3692 }
3693
3694 static int
mtw_set_chan(struct mtw_softc * sc,struct ieee80211_channel * c)3695 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c)
3696 {
3697 struct ieee80211com *ic = &sc->sc_ic;
3698 u_int chan, group;
3699
3700 chan = ieee80211_chan2ieee(ic, c);
3701 if (chan == 0 || chan == IEEE80211_CHAN_ANY)
3702 return (EINVAL);
3703
3704 /* determine channel group */
3705 if (chan <= 14)
3706 group = 0;
3707 else if (chan <= 64)
3708 group = 1;
3709 else if (chan <= 128)
3710 group = 2;
3711 else
3712 group = 3;
3713
3714 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated)
3715 mtw_select_chan_group(sc, group);
3716
3717 sc->sc_chan_group = group;
3718
3719 /* chipset specific */
3720 if (sc->asic_ver == 0x7601)
3721 mt7601_set_chan(sc, chan);
3722
3723 DELAY(1000);
3724 return (0);
3725 }
3726
3727 static void
mtw_set_channel(struct ieee80211com * ic)3728 mtw_set_channel(struct ieee80211com *ic)
3729 {
3730 struct mtw_softc *sc = ic->ic_softc;
3731
3732 MTW_LOCK(sc);
3733 mtw_set_chan(sc, ic->ic_curchan);
3734 MTW_UNLOCK(sc);
3735
3736 return;
3737 }
3738
3739 static void
mtw_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3740 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans,
3741 struct ieee80211_channel chans[])
3742 {
3743 // struct mtw_softc *sc = ic->ic_softc;
3744 uint8_t bands[IEEE80211_MODE_BYTES];
3745
3746 memset(bands, 0, sizeof(bands));
3747 setbit(bands, IEEE80211_MODE_11B);
3748 setbit(bands, IEEE80211_MODE_11G);
3749 setbit(bands, IEEE80211_MODE_11NG);
3750
3751 /* Note: for now, only support HT20 channels */
3752 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0);
3753 }
3754
3755 static void
mtw_scan_start(struct ieee80211com * ic)3756 mtw_scan_start(struct ieee80211com *ic)
3757 {
3758 struct mtw_softc *sc = ic->ic_softc;
3759 MTW_LOCK(sc);
3760 /* abort TSF synchronization */
3761 mtw_abort_tsf_sync(sc);
3762 mtw_set_bssid(sc, ieee80211broadcastaddr);
3763
3764 MTW_UNLOCK(sc);
3765
3766 return;
3767 }
3768
3769 static void
mtw_scan_end(struct ieee80211com * ic)3770 mtw_scan_end(struct ieee80211com *ic)
3771 {
3772 struct mtw_softc *sc = ic->ic_softc;
3773
3774 MTW_LOCK(sc);
3775
3776 mtw_enable_tsf_sync(sc);
3777 mtw_set_bssid(sc, sc->sc_bssid);
3778
3779 MTW_UNLOCK(sc);
3780
3781 return;
3782 }
3783
3784 /*
3785 * Could be called from ieee80211_node_timeout()
3786 * (non-sleepable thread)
3787 */
3788 static void
mtw_update_beacon(struct ieee80211vap * vap,int item)3789 mtw_update_beacon(struct ieee80211vap *vap, int item)
3790 {
3791 struct ieee80211com *ic = vap->iv_ic;
3792 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
3793 struct ieee80211_node *ni = vap->iv_bss;
3794 struct mtw_softc *sc = ic->ic_softc;
3795 struct mtw_vap *rvp = MTW_VAP(vap);
3796 int mcast = 0;
3797 uint32_t i;
3798
3799 switch (item) {
3800 case IEEE80211_BEACON_ERP:
3801 mtw_updateslot(ic);
3802 break;
3803 case IEEE80211_BEACON_HTINFO:
3804 mtw_updateprot(ic);
3805 break;
3806 case IEEE80211_BEACON_TIM:
3807 mcast = 1; /*TODO*/
3808 break;
3809 default:
3810 break;
3811 }
3812
3813 setbit(bo->bo_flags, item);
3814 if (rvp->beacon_mbuf == NULL) {
3815 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3816 if (rvp->beacon_mbuf == NULL)
3817 return;
3818 }
3819 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast);
3820
3821 i = MTW_CMDQ_GET(&sc->cmdq_store);
3822 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
3823 sc->cmdq[i].func = mtw_update_beacon_cb;
3824 sc->cmdq[i].arg0 = vap;
3825 ieee80211_runtask(ic, &sc->cmdq_task);
3826
3827 return;
3828 }
3829
3830 static void
mtw_update_beacon_cb(void * arg)3831 mtw_update_beacon_cb(void *arg)
3832 {
3833
3834 struct ieee80211vap *vap = arg;
3835 struct ieee80211_node *ni = vap->iv_bss;
3836 struct mtw_vap *rvp = MTW_VAP(vap);
3837 struct ieee80211com *ic = vap->iv_ic;
3838 struct mtw_softc *sc = ic->ic_softc;
3839 struct mtw_txwi txwi;
3840 struct mbuf *m;
3841 uint16_t txwisize;
3842 uint8_t ridx;
3843 if (ni->ni_chan == IEEE80211_CHAN_ANYC)
3844 return;
3845 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
3846 return;
3847
3848 /*
3849 * No need to call ieee80211_beacon_update(), mtw_update_beacon()
3850 * is taking care of appropriate calls.
3851 */
3852 if (rvp->beacon_mbuf == NULL) {
3853 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3854 if (rvp->beacon_mbuf == NULL)
3855 return;
3856 }
3857 m = rvp->beacon_mbuf;
3858
3859 memset(&txwi, 0, sizeof(txwi));
3860 txwi.wcid = 0xff;
3861 txwi.len = htole16(m->m_pkthdr.len);
3862
3863 /* send beacons at the lowest available rate */
3864 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 :
3865 MTW_RIDX_CCK1;
3866 txwi.phy = htole16(rt2860_rates[ridx].mcs);
3867 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
3868 txwi.phy |= htole16(MTW_PHY_OFDM);
3869 txwi.txop = MTW_TX_TXOP_HT;
3870 txwi.flags = MTW_TX_TS;
3871 txwi.xflags = MTW_TX_NSEQ;
3872
3873 txwisize = sizeof(txwi);
3874 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize);
3875 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *),
3876 (m->m_pkthdr.len + 1) & ~1);
3877 }
3878
3879 static void
mtw_updateprot(struct ieee80211com * ic)3880 mtw_updateprot(struct ieee80211com *ic)
3881 {
3882 struct mtw_softc *sc = ic->ic_softc;
3883 uint32_t i;
3884
3885 i = MTW_CMDQ_GET(&sc->cmdq_store);
3886 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i);
3887 sc->cmdq[i].func = mtw_updateprot_cb;
3888 sc->cmdq[i].arg0 = ic;
3889 ieee80211_runtask(ic, &sc->cmdq_task);
3890 }
3891
3892 static void
mtw_updateprot_cb(void * arg)3893 mtw_updateprot_cb(void *arg)
3894 {
3895
3896 struct ieee80211com *ic = arg;
3897 struct mtw_softc *sc = ic->ic_softc;
3898 uint32_t tmp;
3899
3900 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
3901 /* setup protection frame rate (MCS code) */
3902 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
3903 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM :
3904 rt2860_rates[MTW_RIDX_CCK11].mcs;
3905
3906 /* CCK frames don't require protection */
3907 mtw_write(sc, MTW_CCK_PROT_CFG, tmp);
3908 if (ic->ic_flags & IEEE80211_F_USEPROT) {
3909 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3910 tmp |= RT2860_PROT_CTRL_RTS_CTS;
3911 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3912 tmp |= RT2860_PROT_CTRL_CTS;
3913 }
3914 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp);
3915 }
3916
3917 static void
mtw_usb_timeout_cb(void * arg)3918 mtw_usb_timeout_cb(void *arg)
3919 {
3920 struct ieee80211vap *vap = arg;
3921 struct mtw_softc *sc = vap->iv_ic->ic_softc;
3922
3923 MTW_LOCK_ASSERT(sc, MA_OWNED);
3924
3925 if (vap->iv_state == IEEE80211_S_SCAN) {
3926 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3927 "timeout caused by scan\n");
3928 /* cancel bgscan */
3929 ieee80211_cancel_scan(vap);
3930 } else {
3931 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3932 "timeout by unknown cause\n");
3933 }
3934 }
mtw_reset(struct mtw_softc * sc)3935 static int mtw_reset(struct mtw_softc *sc)
3936 {
3937
3938 usb_device_request_t req;
3939 uint16_t tmp;
3940 uint16_t actlen;
3941
3942 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
3943 req.bRequest = MTW_RESET;
3944 USETW(req.wValue, 1);
3945 USETW(req.wIndex, 0);
3946 USETW(req.wLength, 0);
3947 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
3948 &req, &tmp, 0, &actlen, 1000));
3949
3950 }
3951
3952
3953 static void
mtw_update_promisc_locked(struct mtw_softc * sc)3954 mtw_update_promisc_locked(struct mtw_softc *sc)
3955 {
3956
3957 uint32_t tmp;
3958
3959 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp);
3960
3961 tmp |= MTW_DROP_UC_NOME;
3962 if (sc->sc_ic.ic_promisc > 0)
3963 tmp &= ~MTW_DROP_UC_NOME;
3964
3965 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
3966
3967 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n",
3968 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving");
3969 }
3970
3971 static void
mtw_update_promisc(struct ieee80211com * ic)3972 mtw_update_promisc(struct ieee80211com *ic)
3973 {
3974 struct mtw_softc *sc = ic->ic_softc;
3975
3976 if ((sc->sc_flags & MTW_RUNNING) == 0)
3977 return;
3978
3979 MTW_LOCK(sc);
3980 mtw_update_promisc_locked(sc);
3981 MTW_UNLOCK(sc);
3982 }
3983
3984 static void
mtw_enable_tsf_sync(struct mtw_softc * sc)3985 mtw_enable_tsf_sync(struct mtw_softc *sc)
3986 {
3987 struct ieee80211com *ic = &sc->sc_ic;
3988 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3989 uint32_t tmp;
3990 int error;
3991 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
3992 tmp &= ~0x1fffff;
3993 tmp |= vap->iv_bss->ni_intval * 16;
3994 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN;
3995
3996 /* local TSF is always updated with remote TSF on beacon reception */
3997 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT;
3998 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
3999 if (error != 0) {
4000 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n",
4001 error);
4002 }
4003 return;
4004 }
4005
4006 static void
mtw_enable_mrr(struct mtw_softc * sc)4007 mtw_enable_mrr(struct mtw_softc *sc)
4008 {
4009 #define CCK(mcs) (mcs)
4010
4011 #define OFDM(mcs) (1 << 3 | (mcs))
4012 mtw_write(sc, MTW_LG_FBK_CFG0,
4013 OFDM(6) << 28 | /* 54->48 */
4014 OFDM(5) << 24 | /* 48->36 */
4015 OFDM(4) << 20 | /* 36->24 */
4016 OFDM(3) << 16 | /* 24->18 */
4017 OFDM(2) << 12 | /* 18->12 */
4018 OFDM(1) << 8 | /* 12-> 9 */
4019 OFDM(0) << 4 | /* 9-> 6 */
4020 OFDM(0)); /* 6-> 6 */
4021
4022 mtw_write(sc, MTW_LG_FBK_CFG1,
4023 CCK(2) << 12 | /* 11->5.5 */
4024 CCK(1) << 8 | /* 5.5-> 2 */
4025 CCK(0) << 4 | /* 2-> 1 */
4026 CCK(0)); /* 1-> 1 */
4027 #undef OFDM
4028 #undef CCK
4029 }
4030
4031 static void
mtw_set_txpreamble(struct mtw_softc * sc)4032 mtw_set_txpreamble(struct mtw_softc *sc)
4033 {
4034 struct ieee80211com *ic = &sc->sc_ic;
4035 uint32_t tmp;
4036
4037 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp);
4038 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4039 tmp |= MTW_CCK_SHORT_EN;
4040 else
4041 tmp &= ~MTW_CCK_SHORT_EN;
4042 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp);
4043 }
4044
4045 static void
mtw_set_basicrates(struct mtw_softc * sc)4046 mtw_set_basicrates(struct mtw_softc *sc)
4047 {
4048 struct ieee80211com *ic = &sc->sc_ic;
4049
4050 /* set basic rates mask */
4051 if (ic->ic_curmode == IEEE80211_MODE_11B)
4052 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003);
4053 else if (ic->ic_curmode == IEEE80211_MODE_11A)
4054 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150);
4055 else /* 11g */
4056 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f);
4057 }
4058
4059 static void
mtw_set_bssid(struct mtw_softc * sc,const uint8_t * bssid)4060 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid)
4061 {
4062 mtw_write(sc, MTW_MAC_BSSID_DW0,
4063 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
4064 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8);
4065 }
4066
4067 static void
mtw_set_macaddr(struct mtw_softc * sc,const uint8_t * addr)4068 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr)
4069 {
4070 mtw_write(sc, MTW_MAC_ADDR_DW0,
4071 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4072 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16);
4073 }
4074
4075 static void
mtw_updateslot(struct ieee80211com * ic)4076 mtw_updateslot(struct ieee80211com *ic)
4077 {
4078
4079 struct mtw_softc *sc = ic->ic_softc;
4080 uint32_t i;
4081
4082 i = MTW_CMDQ_GET(&sc->cmdq_store);
4083 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
4084 sc->cmdq[i].func = mtw_updateslot_cb;
4085 sc->cmdq[i].arg0 = ic;
4086 ieee80211_runtask(ic, &sc->cmdq_task);
4087
4088 return;
4089 }
4090
4091 /* ARGSUSED */
4092 static void
mtw_updateslot_cb(void * arg)4093 mtw_updateslot_cb(void *arg)
4094 {
4095 struct ieee80211com *ic = arg;
4096 struct mtw_softc *sc = ic->ic_softc;
4097 uint32_t tmp;
4098 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp);
4099 tmp &= ~0xff;
4100 tmp |= IEEE80211_GET_SLOTTIME(ic);
4101 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp);
4102 }
4103
4104 static void
mtw_update_mcast(struct ieee80211com * ic)4105 mtw_update_mcast(struct ieee80211com *ic)
4106 {
4107 }
4108
4109 static int8_t
mtw_rssi2dbm(struct mtw_softc * sc,uint8_t rssi,uint8_t rxchain)4110 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain)
4111 {
4112 struct ieee80211com *ic = &sc->sc_ic;
4113 struct ieee80211_channel *c = ic->ic_curchan;
4114 int delta;
4115
4116 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4117 u_int chan = ieee80211_chan2ieee(ic, c);
4118 delta = sc->rssi_5ghz[rxchain];
4119
4120 /* determine channel group */
4121 if (chan <= 64)
4122 delta -= sc->lna[1];
4123 else if (chan <= 128)
4124 delta -= sc->lna[2];
4125 else
4126 delta -= sc->lna[3];
4127 } else
4128 delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
4129
4130 return (-12 - delta - rssi);
4131 }
4132 static int
mt7601_bbp_init(struct mtw_softc * sc)4133 mt7601_bbp_init(struct mtw_softc *sc)
4134 {
4135 uint8_t bbp;
4136 int i, error, ntries;
4137
4138 /* wait for BBP to wake up */
4139 for (ntries = 0; ntries < 20; ntries++) {
4140 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0)
4141 return (error);
4142 if (bbp != 0 && bbp != 0xff)
4143 break;
4144 }
4145
4146 if (ntries == 20)
4147 return (ETIMEDOUT);
4148
4149 mtw_bbp_read(sc, 3, &bbp);
4150 mtw_bbp_write(sc, 3, 0);
4151 mtw_bbp_read(sc, 105, &bbp);
4152 mtw_bbp_write(sc, 105, 0);
4153
4154 /* initialize BBP registers to default values */
4155 for (i = 0; i < nitems(mt7601_def_bbp); i++) {
4156 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg,
4157 mt7601_def_bbp[i].val)) != 0)
4158 return (error);
4159 }
4160
4161 sc->sc_bw_calibrated = 0;
4162
4163 return (0);
4164 }
4165
4166 static int
mt7601_rf_init(struct mtw_softc * sc)4167 mt7601_rf_init(struct mtw_softc *sc)
4168 {
4169 int i, error;
4170
4171 /* RF bank 0 */
4172 for (i = 0; i < nitems(mt7601_rf_bank0); i++) {
4173 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg,
4174 mt7601_rf_bank0[i].val);
4175 if (error != 0)
4176 return (error);
4177 }
4178 /* RF bank 4 */
4179 for (i = 0; i < nitems(mt7601_rf_bank4); i++) {
4180 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg,
4181 mt7601_rf_bank4[i].val);
4182 if (error != 0)
4183 return (error);
4184 }
4185 /* RF bank 5 */
4186 for (i = 0; i < nitems(mt7601_rf_bank5); i++) {
4187 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg,
4188 mt7601_rf_bank5[i].val);
4189 if (error != 0)
4190 return (error);
4191 }
4192 return (0);
4193 }
4194
4195 static int
mtw_txrx_enable(struct mtw_softc * sc)4196 mtw_txrx_enable(struct mtw_softc *sc)
4197 {
4198 struct ieee80211com *ic = &sc->sc_ic;
4199 uint32_t tmp;
4200 int error, ntries;
4201 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN);
4202 for (ntries = 0; ntries < 200; ntries++) {
4203 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) {
4204 return (error);
4205 }
4206 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4207 break;
4208 mtw_delay(sc, 50);
4209 }
4210 if (ntries == 200) {
4211 return (ETIMEDOUT);
4212 }
4213
4214 DELAY(50);
4215
4216 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE;
4217 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4218
4219 /* enable Rx bulk aggregation (set timeout and limit) */
4220 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4221 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2);
4222 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4223
4224 /* set Rx filter */
4225 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR;
4226 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
4227 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS |
4228 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR |
4229 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND;
4230 if (ic->ic_opmode == IEEE80211_M_STA)
4231 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL;
4232 }
4233 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
4234
4235 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4236 return (0);
4237 }
4238 static int
mt7601_rxdc_cal(struct mtw_softc * sc)4239 mt7601_rxdc_cal(struct mtw_softc *sc)
4240 {
4241 uint32_t tmp;
4242 uint8_t bbp;
4243 int ntries;
4244
4245 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4246 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN);
4247 mtw_bbp_write(sc, 158, 0x8d);
4248 mtw_bbp_write(sc, 159, 0xfc);
4249 mtw_bbp_write(sc, 158, 0x8c);
4250 mtw_bbp_write(sc, 159, 0x4c);
4251
4252 for (ntries = 0; ntries < 20; ntries++) {
4253 DELAY(300);
4254 mtw_bbp_write(sc, 158, 0x8c);
4255 mtw_bbp_read(sc, 159, &bbp);
4256 if (bbp == 0x0c)
4257 break;
4258 }
4259
4260 if (ntries == 20)
4261 return (ETIMEDOUT);
4262
4263 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4264 mtw_bbp_write(sc, 158, 0x8d);
4265 mtw_bbp_write(sc, 159, 0xe0);
4266 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4267 return (0);
4268 }
4269
4270 static int
mt7601_r49_read(struct mtw_softc * sc,uint8_t flag,int8_t * val)4271 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val)
4272 {
4273 uint8_t bbp;
4274
4275 mtw_bbp_read(sc, 47, &bbp);
4276 bbp = 0x90;
4277 mtw_bbp_write(sc, 47, bbp);
4278 bbp &= ~0x0f;
4279 bbp |= flag;
4280 mtw_bbp_write(sc, 47, bbp);
4281 return (mtw_bbp_read(sc, 49, val));
4282 }
4283
4284 static int
mt7601_rf_temperature(struct mtw_softc * sc,int8_t * val)4285 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val)
4286 {
4287 uint32_t rfb, rfs;
4288 uint8_t bbp;
4289 int ntries;
4290
4291 mtw_read(sc, MTW_RF_BYPASS0, &rfb);
4292 mtw_read(sc, MTW_RF_SETTING0, &rfs);
4293 mtw_write(sc, MTW_RF_BYPASS0, 0);
4294 mtw_write(sc, MTW_RF_SETTING0, 0x10);
4295 mtw_write(sc, MTW_RF_BYPASS0, 0x10);
4296
4297 mtw_bbp_read(sc, 47, &bbp);
4298 bbp &= ~0x7f;
4299 bbp |= 0x10;
4300 mtw_bbp_write(sc, 47, bbp);
4301
4302 mtw_bbp_write(sc, 22, 0x40);
4303
4304 for (ntries = 0; ntries < 10; ntries++) {
4305 mtw_bbp_read(sc, 47, &bbp);
4306 if ((bbp & 0x10) == 0)
4307 break;
4308 }
4309 if (ntries == 10)
4310 return (ETIMEDOUT);
4311
4312 mt7601_r49_read(sc, MT7601_R47_TEMP, val);
4313
4314 mtw_bbp_write(sc, 22, 0);
4315
4316 mtw_bbp_read(sc, 21, &bbp);
4317 bbp |= 0x02;
4318 mtw_bbp_write(sc, 21, bbp);
4319 bbp &= ~0x02;
4320 mtw_bbp_write(sc, 21, bbp);
4321
4322 mtw_write(sc, MTW_RF_BYPASS0, 0);
4323 mtw_write(sc, MTW_RF_SETTING0, rfs);
4324 mtw_write(sc, MTW_RF_BYPASS0, rfb);
4325 return (0);
4326 }
4327
4328 static int
mt7601_rf_setup(struct mtw_softc * sc)4329 mt7601_rf_setup(struct mtw_softc *sc)
4330 {
4331 uint32_t tmp;
4332 uint8_t rf;
4333 int error;
4334
4335 if (sc->sc_rf_calibrated)
4336 return (0);
4337
4338 /* init RF registers */
4339 if ((error = mt7601_rf_init(sc)) != 0)
4340 return (error);
4341
4342 /* init frequency offset */
4343 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset);
4344 mtw_rf_read(sc, 0, 12, &rf);
4345
4346 /* read temperature */
4347 mt7601_rf_temperature(sc, &rf);
4348 sc->bbp_temp = rf;
4349 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf);
4350
4351 mtw_rf_read(sc, 0, 7, &rf);
4352 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0)
4353 return (error);
4354 mtw_delay(sc, 100);
4355 mtw_rf_read(sc, 0, 7, &rf);
4356
4357 /* Calibrate VCO RF 0/4 */
4358 mtw_rf_write(sc, 0, 4, 0x0a);
4359 mtw_rf_write(sc, 0, 4, 0x20);
4360 mtw_rf_read(sc, 0, 4, &rf);
4361 mtw_rf_write(sc, 0, 4, rf | 0x80);
4362
4363 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0)
4364 return (error);
4365 if ((error = mt7601_rxdc_cal(sc)) != 0)
4366 return (error);
4367 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0)
4368 return (error);
4369 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0)
4370 return (error);
4371 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0)
4372 return (error);
4373 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0)
4374 return (error);
4375
4376 mtw_read(sc, MTW_LDO_CFG0, &tmp);
4377 tmp &= ~(1 << 4);
4378 tmp |= (1 << 2);
4379 mtw_write(sc, MTW_LDO_CFG0, tmp);
4380
4381 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0)
4382 return (error);
4383 if ((error = mt7601_rxdc_cal(sc)) != 0)
4384 return (error);
4385
4386 sc->sc_rf_calibrated = 1;
4387 return (0);
4388 }
4389
4390 static void
mtw_set_txrts(struct mtw_softc * sc)4391 mtw_set_txrts(struct mtw_softc *sc)
4392 {
4393 uint32_t tmp;
4394
4395 /* set RTS threshold */
4396 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4397 tmp &= ~0xffff00;
4398 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT;
4399 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4400 }
4401 static int
mtw_mcu_radio(struct mtw_softc * sc,int func,uint32_t val)4402 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val)
4403 {
4404 struct mtw_mcu_cmd_16 cmd;
4405
4406 cmd.r1 = htole32(func);
4407 cmd.r2 = htole32(val);
4408 cmd.r3 = 0;
4409 cmd.r4 = 0;
4410 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16)));
4411 }
4412 static void
mtw_init_locked(struct mtw_softc * sc)4413 mtw_init_locked(struct mtw_softc *sc)
4414 {
4415
4416 struct ieee80211com *ic = &sc->sc_ic;
4417 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4418 uint32_t tmp;
4419 int i, error, ridx, ntries;
4420 if (ic->ic_nrunning > 1)
4421 return;
4422 mtw_stop(sc);
4423
4424 for (i = 0; i != MTW_EP_QUEUES; i++)
4425 mtw_setup_tx_list(sc, &sc->sc_epq[i]);
4426
4427 for (ntries = 0; ntries < 100; ntries++) {
4428 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0)
4429 goto fail;
4430 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4431 break;
4432 DELAY(1000);
4433 }
4434 if (ntries == 100) {
4435 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4436 error = ETIMEDOUT;
4437 goto fail;
4438 }
4439 tmp &= 0xff0;
4440 tmp |= MTW_TX_WB_DDONE;
4441 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4442
4443 mtw_set_leds(sc, MTW_LED_MODE_ON);
4444 /* reset MAC and baseband */
4445 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST);
4446 mtw_write(sc, MTW_USB_DMA_CFG, 0);
4447 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4448
4449 /* init MAC values */
4450 if (sc->asic_ver == 0x7601) {
4451 for (i = 0; i < nitems(mt7601_def_mac); i++)
4452 mtw_write(sc, mt7601_def_mac[i].reg,
4453 mt7601_def_mac[i].val);
4454 }
4455
4456 /* wait while MAC is busy */
4457 for (ntries = 0; ntries < 100; ntries++) {
4458 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0)
4459 goto fail;
4460 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY)))
4461 break;
4462 DELAY(1000);
4463 }
4464 if (ntries == 100) {
4465 error = ETIMEDOUT;
4466 goto fail;
4467 }
4468
4469 /* set MAC address */
4470
4471 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
4472
4473 /* clear WCID attribute table */
4474 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32);
4475
4476 mtw_write(sc, 0x1648, 0x00830083);
4477 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp);
4478 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN;
4479 mtw_write(sc, MTW_FCE_L2_STUFF, tmp);
4480
4481 /* RTS config */
4482 mtw_set_txrts(sc);
4483
4484 /* clear Host to MCU mailbox */
4485 mtw_write(sc, MTW_BBP_CSR, 0);
4486 mtw_write(sc, MTW_H2M_MAILBOX, 0);
4487
4488 /* clear RX WCID search table */
4489 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512);
4490
4491 /* abort TSF synchronization */
4492 mtw_abort_tsf_sync(sc);
4493
4494 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4495 tmp = (tmp & ~0xff);
4496 if (sc->asic_ver == 0x7601)
4497 tmp |= 0x1e;
4498 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4499
4500 /* clear shared key table */
4501 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32);
4502
4503 /* clear IV/EIV table */
4504 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32);
4505
4506 /* clear shared key mode */
4507 mtw_write(sc, MTW_SKEY_MODE_0_7, 0);
4508 mtw_write(sc, MTW_SKEY_MODE_8_15, 0);
4509
4510 /* txop truncation */
4511 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f);
4512
4513 /* init Tx power for all Tx rates */
4514 for (ridx = 0; ridx < 5; ridx++) {
4515 if (sc->txpow20mhz[ridx] == 0xffffffff)
4516 continue;
4517 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
4518 }
4519 mtw_write(sc, MTW_TX_PWR_CFG7, 0);
4520 mtw_write(sc, MTW_TX_PWR_CFG9, 0);
4521
4522 mtw_read(sc, MTW_CMB_CTRL, &tmp);
4523 tmp &= ~(1 << 18 | 1 << 14);
4524 mtw_write(sc, MTW_CMB_CTRL, tmp);
4525
4526 /* clear USB DMA */
4527 mtw_write(sc, MTW_USB_DMA_CFG,
4528 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4529 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4530 mtw_delay(sc, 50);
4531 mtw_read(sc, MTW_USB_DMA_CFG, &tmp);
4532 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4533 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4534
4535 /* enable radio */
4536 mtw_mcu_radio(sc, 0x31, 0);
4537
4538 /* init RF registers */
4539 if (sc->asic_ver == 0x7601)
4540 mt7601_rf_init(sc);
4541
4542 /* init baseband registers */
4543 if (sc->asic_ver == 0x7601)
4544 error = mt7601_bbp_init(sc);
4545
4546 if (error != 0) {
4547 device_printf(sc->sc_dev, "could not initialize BBP\n");
4548 goto fail;
4549 }
4550
4551 /* setup and calibrate RF */
4552 error = mt7601_rf_setup(sc);
4553
4554 if (error != 0) {
4555 device_printf(sc->sc_dev, "could not initialize RF\n");
4556 goto fail;
4557 }
4558
4559 /* select default channel */
4560 mtw_set_chan(sc, ic->ic_curchan);
4561
4562 /* setup initial protection mode */
4563 mtw_updateprot_cb(ic);
4564
4565 sc->sc_flags |= MTW_RUNNING;
4566 sc->cmdq_run = MTW_CMDQ_GO;
4567 for (i = 0; i != MTW_N_XFER; i++)
4568 usbd_xfer_set_stall(sc->sc_xfer[i]);
4569
4570 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]);
4571
4572 error = mtw_txrx_enable(sc);
4573 if (error != 0) {
4574 goto fail;
4575 }
4576
4577 return;
4578
4579 fail:
4580
4581 mtw_stop(sc);
4582 return;
4583 }
4584
4585 static void
mtw_stop(void * arg)4586 mtw_stop(void *arg)
4587 {
4588 struct mtw_softc *sc = (struct mtw_softc *)arg;
4589 uint32_t tmp;
4590 int i, ntries, error;
4591
4592 MTW_LOCK_ASSERT(sc, MA_OWNED);
4593
4594 sc->sc_flags &= ~MTW_RUNNING;
4595
4596 sc->ratectl_run = MTW_RATECTL_OFF;
4597 sc->cmdq_run = sc->cmdq_key_set;
4598
4599 MTW_UNLOCK(sc);
4600
4601 for (i = 0; i < MTW_N_XFER; i++)
4602 usbd_transfer_drain(sc->sc_xfer[i]);
4603
4604 MTW_LOCK(sc);
4605
4606 mtw_drain_mbufq(sc);
4607
4608 if (sc->rx_m != NULL) {
4609 m_free(sc->rx_m);
4610 sc->rx_m = NULL;
4611 }
4612
4613 /* Disable Tx/Rx DMA. */
4614 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp);
4615 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN);
4616 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4617 // mtw_usb_dma_write(sc, 0);
4618
4619 for (ntries = 0; ntries < 100; ntries++) {
4620 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0)
4621 break;
4622 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4623 break;
4624 DELAY(10);
4625 }
4626 if (ntries == 100) {
4627 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4628 }
4629
4630 /* stop MAC Tx/Rx */
4631 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4632 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4633 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4634
4635 /* disable RTS retry */
4636 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4637 tmp &= ~0xff;
4638 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4639
4640 /* US_CYC_CFG */
4641 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4642 tmp = (tmp & ~0xff);
4643 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4644
4645 /* stop PBF */
4646 mtw_read(sc, MTW_PBF_CFG, &tmp);
4647 tmp &= ~0x3;
4648 mtw_write(sc, MTW_PBF_CFG, tmp);
4649
4650 /* wait for pending Tx to complete */
4651 for (ntries = 0; ntries < 100; ntries++) {
4652 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0)
4653 break;
4654 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0)
4655 break;
4656 }
4657
4658 }
4659
4660 static void
mtw_delay(struct mtw_softc * sc,u_int ms)4661 mtw_delay(struct mtw_softc *sc, u_int ms)
4662 {
4663 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL,
4664 USB_MS_TO_TICKS(ms));
4665 }
4666
4667 static void
mtw_update_chw(struct ieee80211com * ic)4668 mtw_update_chw(struct ieee80211com *ic)
4669 {
4670
4671 printf("%s: TODO\n", __func__);
4672 }
4673
4674 static int
mtw_ampdu_enable(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)4675 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
4676 {
4677
4678 /* For now, no A-MPDU TX support in the driver */
4679 return (0);
4680 }
4681
4682 static device_method_t mtw_methods[] = {
4683 /* Device interface */
4684 DEVMETHOD(device_probe, mtw_match),
4685 DEVMETHOD(device_attach, mtw_attach),
4686 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END
4687 };
4688
4689 static driver_t mtw_driver = { .name = "mtw",
4690 .methods = mtw_methods,
4691 .size = sizeof(struct mtw_softc) };
4692
4693 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL);
4694 MODULE_DEPEND(mtw, wlan, 1, 1, 1);
4695 MODULE_DEPEND(mtw, usb, 1, 1, 1);
4696 MODULE_DEPEND(mtw, firmware, 1, 1, 1);
4697 MODULE_VERSION(mtw, 1);
4698 USB_PNP_HOST_INFO(mtw_devs);
4699