1 /*- 2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2013-2014 Kevin Lo 4 * Copyright (c) 2021 James Hastings 5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * MediaTek MT7601U 802.11b/g/n WLAN. 22 */ 23 24 #include "opt_wlan.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/bus.h> 29 #include <sys/endian.h> 30 #include <sys/eventhandler.h> 31 #include <sys/firmware.h> 32 #include <sys/kdb.h> 33 #include <sys/kernel.h> 34 #include <sys/linker.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 44 #include <net/bpf.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_arp.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 #include <net/if_types.h> 51 #include <net/if_var.h> 52 #include <net80211/ieee80211_var.h> 53 #include <net80211/ieee80211_radiotap.h> 54 #include <net80211/ieee80211_ratectl.h> 55 #include <net80211/ieee80211_regdomain.h> 56 #include <netinet/if_ether.h> 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 62 #include <dev/usb/usb.h> 63 #include <dev/usb/usbdi.h> 64 65 #include "usbdevs.h" 66 67 #define USB_DEBUG_VAR mtw_debug 68 #include <dev/usb/usb_debug.h> 69 #include <dev/usb/usb_msctest.h> 70 71 #include "if_mtwreg.h" 72 #include "if_mtwvar.h" 73 74 #define MTW_DEBUG 75 76 #ifdef MTW_DEBUG 77 int mtw_debug; 78 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 79 "USB mtw"); 80 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0, 81 "mtw debug level"); 82 83 enum { 84 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 85 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 86 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */ 87 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 88 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */ 89 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */ 90 MTW_DEBUG_USB = 0x00000040, /* usb requests */ 91 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */ 92 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */ 93 MTW_DEBUG_INTR = 0x00000200, /* ISR */ 94 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */ 95 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */ 96 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */ 97 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */ 98 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */ 99 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */ 100 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */ 101 MTW_DEBUG_CMD = 0x00020000, /* command queue */ 102 MTW_DEBUG_ANY = 0xffffffff 103 }; 104 105 #define MTW_DPRINTF(_sc, _m, ...) \ 106 do { \ 107 if (mtw_debug & (_m)) \ 108 device_printf((_sc)->sc_dev, __VA_ARGS__); \ 109 } while (0) 110 111 #else 112 #define MTW_DPRINTF(_sc, _m, ...) \ 113 do { \ 114 (void)_sc; \ 115 } while (0) 116 #endif 117 118 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh) 119 120 /* NB: "11" is the maximum number of padding bytes needed for Tx */ 121 #define MTW_MAX_TXSZ \ 122 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11) 123 124 /* 125 * Because of LOR in mtw_key_delete(), use atomic instead. 126 * '& MTW_CMDQ_MASQ' is to loop cmdq[]. 127 */ 128 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ) 129 130 static const STRUCT_USB_HOST_ID mtw_devs[] = { 131 #define MTW_DEV(v, p) \ 132 { \ 133 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \ 134 } 135 MTW_DEV(EDIMAX, MT7601U), 136 MTW_DEV(RALINK, MT7601U), 137 MTW_DEV(XIAOMI, MT7601U) 138 }; 139 #undef MTW_DEV 140 141 static device_probe_t mtw_match; 142 static device_attach_t mtw_attach; 143 static device_detach_t mtw_detach; 144 145 static usb_callback_t mtw_bulk_rx_callback; 146 static usb_callback_t mtw_bulk_tx_callback0; 147 static usb_callback_t mtw_bulk_tx_callback1; 148 static usb_callback_t mtw_bulk_tx_callback2; 149 static usb_callback_t mtw_bulk_tx_callback3; 150 static usb_callback_t mtw_bulk_tx_callback4; 151 static usb_callback_t mtw_bulk_tx_callback5; 152 static usb_callback_t mtw_fw_callback; 153 154 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *); 155 static int mtw_driver_loaded(struct module *, int, void *); 156 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, 157 u_int index); 158 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *, 159 const char[IFNAMSIZ], int, enum ieee80211_opmode, int, 160 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]); 161 static void mtw_vap_delete(struct ieee80211vap *); 162 static void mtw_cmdq_cb(void *, int); 163 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *); 164 static void mtw_unsetup_tx_list(struct mtw_softc *, 165 struct mtw_endpoint_queue *); 166 static void mtw_load_microcode(void *arg); 167 168 static usb_error_t mtw_do_request(struct mtw_softc *, 169 struct usb_device_request *, void *); 170 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *); 171 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int); 172 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t); 173 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t); 174 static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int); 175 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int); 176 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *); 177 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *); 178 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t); 179 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len); 180 static void mtw_get_txpower(struct mtw_softc *); 181 static int mtw_read_eeprom(struct mtw_softc *); 182 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *, 183 const uint8_t mac[IEEE80211_ADDR_LEN]); 184 static int mtw_media_change(if_t); 185 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static int mtw_wme_update(struct ieee80211com *); 187 static void mtw_key_set_cb(void *); 188 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *); 189 static void mtw_key_delete_cb(void *); 190 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *); 191 static void mtw_ratectl_to(void *); 192 static void mtw_ratectl_cb(void *, int); 193 static void mtw_drain_fifo(void *); 194 static void mtw_iter_func(void *, struct ieee80211_node *); 195 static void mtw_newassoc_cb(void *); 196 static void mtw_newassoc(struct ieee80211_node *, int); 197 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val); 198 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 199 const struct ieee80211_rx_stats *, int, int); 200 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t); 201 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *, 202 int); 203 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *); 204 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *); 205 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *, 206 struct ieee80211_node *); 207 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *, 208 struct ieee80211_node *, int, int); 209 static int mtw_tx_param(struct mtw_softc *, struct mbuf *, 210 struct ieee80211_node *, const struct ieee80211_bpf_params *); 211 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *, 212 const struct ieee80211_bpf_params *); 213 static int mtw_transmit(struct ieee80211com *, struct mbuf *); 214 static void mtw_start(struct mtw_softc *); 215 static void mtw_parent(struct ieee80211com *); 216 static void mtw_select_chan_group(struct mtw_softc *, int); 217 218 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *); 219 static void mtw_set_channel(struct ieee80211com *); 220 static void mtw_getradiocaps(struct ieee80211com *, int, int *, 221 struct ieee80211_channel[]); 222 static void mtw_scan_start(struct ieee80211com *); 223 static void mtw_scan_end(struct ieee80211com *); 224 static void mtw_update_beacon(struct ieee80211vap *, int); 225 static void mtw_update_beacon_cb(void *); 226 static void mtw_updateprot(struct ieee80211com *); 227 static void mtw_updateprot_cb(void *); 228 static void mtw_usb_timeout_cb(void *); 229 static int mtw_reset(struct mtw_softc *sc); 230 static void mtw_enable_tsf_sync(struct mtw_softc *); 231 232 233 static void mtw_enable_mrr(struct mtw_softc *); 234 static void mtw_set_txpreamble(struct mtw_softc *); 235 static void mtw_set_basicrates(struct mtw_softc *); 236 static void mtw_set_leds(struct mtw_softc *, uint16_t); 237 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *); 238 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *); 239 static void mtw_updateslot(struct ieee80211com *); 240 static void mtw_updateslot_cb(void *); 241 static void mtw_update_mcast(struct ieee80211com *); 242 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t); 243 static void mtw_update_promisc_locked(struct mtw_softc *); 244 static void mtw_update_promisc(struct ieee80211com *); 245 static int mtw_txrx_enable(struct mtw_softc *); 246 static void mtw_init_locked(struct mtw_softc *); 247 static void mtw_stop(void *); 248 static void mtw_delay(struct mtw_softc *, u_int); 249 static void mtw_update_chw(struct ieee80211com *ic); 250 static int mtw_ampdu_enable(struct ieee80211_node *ni, 251 struct ieee80211_tx_ampdu *tap); 252 253 static eventhandler_tag mtw_etag; 254 255 static const struct { 256 uint8_t reg; 257 uint8_t val; 258 } mt7601_rf_bank0[] = { MT7601_BANK0_RF }, 259 mt7601_rf_bank4[] = { MT7601_BANK4_RF }, 260 mt7601_rf_bank5[] = { MT7601_BANK5_RF }; 261 static const struct { 262 uint32_t reg; 263 uint32_t val; 264 } mt7601_def_mac[] = { MT7601_DEF_MAC }; 265 static const struct { 266 uint8_t reg; 267 uint8_t val; 268 } mt7601_def_bbp[] = { MT7601_DEF_BBP }; 269 270 271 static const struct { 272 u_int chan; 273 uint8_t r17, r18, r19, r20; 274 } mt7601_rf_chan[] = { MT7601_RF_CHAN }; 275 276 277 static const struct usb_config mtw_config[MTW_N_XFER] = { 278 [MTW_BULK_RX] = { 279 .type = UE_BULK, 280 .endpoint = UE_ADDR_ANY, 281 .direction = UE_DIR_IN, 282 .bufsize = MTW_MAX_RXSZ, 283 .flags = {.pipe_bof = 1, 284 .short_xfer_ok = 1,}, 285 .callback = mtw_bulk_rx_callback, 286 }, 287 [MTW_BULK_TX_BE] = { 288 .type = UE_BULK, 289 .endpoint = UE_ADDR_ANY, 290 .direction = UE_DIR_OUT, 291 .bufsize = MTW_MAX_TXSZ, 292 .flags = {.pipe_bof = 1, 293 .force_short_xfer = 0,}, 294 .callback = mtw_bulk_tx_callback0, 295 .timeout = 5000, /* ms */ 296 }, 297 [MTW_BULK_TX_BK] = { 298 .type = UE_BULK, 299 .endpoint = UE_ADDR_ANY, 300 .direction = UE_DIR_OUT, 301 .bufsize = MTW_MAX_TXSZ, 302 .flags = {.pipe_bof = 1, 303 .force_short_xfer = 1,}, 304 .callback = mtw_bulk_tx_callback1, 305 .timeout = 5000, /* ms */ 306 }, 307 [MTW_BULK_TX_VI] = { 308 .type = UE_BULK, 309 .endpoint = UE_ADDR_ANY, 310 .direction = UE_DIR_OUT, 311 .bufsize = MTW_MAX_TXSZ, 312 .flags = {.pipe_bof = 1, 313 .force_short_xfer = 1,}, 314 .callback = mtw_bulk_tx_callback2, 315 .timeout = 5000, /* ms */ 316 }, 317 [MTW_BULK_TX_VO] = { 318 .type = UE_BULK, 319 .endpoint = UE_ADDR_ANY, 320 .direction = UE_DIR_OUT, 321 .bufsize = MTW_MAX_TXSZ, 322 .flags = {.pipe_bof = 1, 323 .force_short_xfer = 1,}, 324 .callback = mtw_bulk_tx_callback3, 325 .timeout = 5000, /* ms */ 326 }, 327 [MTW_BULK_TX_HCCA] = { 328 .type = UE_BULK, 329 .endpoint = UE_ADDR_ANY, 330 .direction = UE_DIR_OUT, 331 .bufsize = MTW_MAX_TXSZ, 332 .flags = {.pipe_bof = 1, 333 .force_short_xfer = 1, .no_pipe_ok = 1,}, 334 .callback = mtw_bulk_tx_callback4, 335 .timeout = 5000, /* ms */ 336 }, 337 [MTW_BULK_TX_PRIO] = { 338 .type = UE_BULK, 339 .endpoint = UE_ADDR_ANY, 340 .direction = UE_DIR_OUT, 341 .bufsize = MTW_MAX_TXSZ, 342 .flags = {.pipe_bof = 1, 343 .force_short_xfer = 1, .no_pipe_ok = 1,}, 344 .callback = mtw_bulk_tx_callback5, 345 .timeout = 5000, /* ms */ 346 }, 347 348 [MTW_BULK_FW_CMD] = { 349 .type = UE_BULK, 350 .endpoint = UE_ADDR_ANY, 351 .direction = UE_DIR_OUT, 352 .bufsize = 0x2c44, 353 .flags = {.pipe_bof = 1, 354 .force_short_xfer = 1, .no_pipe_ok = 1,}, 355 .callback = mtw_fw_callback, 356 357 }, 358 359 [MTW_BULK_RAW_TX] = { 360 .type = UE_BULK, 361 .ep_index = 0, 362 .endpoint = UE_ADDR_ANY, 363 .direction = UE_DIR_OUT, 364 .bufsize = MTW_MAX_TXSZ, 365 .flags = {.pipe_bof = 1, 366 .force_short_xfer = 1, .no_pipe_ok = 1,}, 367 .callback = mtw_bulk_tx_callback0, 368 .timeout = 5000, /* ms */ 369 }, 370 371 }; 372 static uint8_t mtw_wme_ac_xfer_map[4] = { 373 [WME_AC_BE] = MTW_BULK_TX_BE, 374 [WME_AC_BK] = MTW_BULK_TX_BK, 375 [WME_AC_VI] = MTW_BULK_TX_VI, 376 [WME_AC_VO] = MTW_BULK_TX_VO, 377 }; 378 static void 379 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa) 380 { 381 struct usb_interface *iface; 382 struct usb_interface_descriptor *id; 383 384 if (uaa->dev_state != UAA_DEV_READY) 385 return; 386 387 iface = usbd_get_iface(udev, 0); 388 if (iface == NULL) 389 return; 390 id = iface->idesc; 391 if (id == NULL || id->bInterfaceClass != UICLASS_MASS) 392 return; 393 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa)) 394 return; 395 396 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0) 397 uaa->dev_state = UAA_DEV_EJECTING; 398 } 399 400 static int 401 mtw_driver_loaded(struct module *mod, int what, void *arg) 402 { 403 switch (what) { 404 case MOD_LOAD: 405 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured, 406 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY); 407 break; 408 case MOD_UNLOAD: 409 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag); 410 break; 411 default: 412 return (EOPNOTSUPP); 413 } 414 return (0); 415 } 416 417 static const char * 418 mtw_get_rf(int rev) 419 { 420 switch (rev) { 421 case MT7601_RF_7601: 422 return ("MT7601"); 423 case MT7610_RF_7610: 424 return ("MT7610"); 425 case MT7612_RF_7612: 426 return ("MT7612"); 427 } 428 return ("unknown"); 429 } 430 static int 431 mtw_wlan_enable(struct mtw_softc *sc, int enable) 432 { 433 uint32_t tmp; 434 int error = 0; 435 436 if (enable) { 437 mtw_read(sc, MTW_WLAN_CTRL, &tmp); 438 if (sc->asic_ver == 0x7612) 439 tmp &= ~0xfffff000; 440 441 tmp &= ~MTW_WLAN_CLK_EN; 442 tmp |= MTW_WLAN_EN; 443 mtw_write(sc, MTW_WLAN_CTRL, tmp); 444 mtw_delay(sc, 2); 445 446 tmp |= MTW_WLAN_CLK_EN; 447 if (sc->asic_ver == 0x7612) { 448 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF); 449 } 450 mtw_write(sc, MTW_WLAN_CTRL, tmp); 451 mtw_delay(sc, 2); 452 453 mtw_read(sc, MTW_OSC_CTRL, &tmp); 454 tmp |= MTW_OSC_EN; 455 mtw_write(sc, MTW_OSC_CTRL, tmp); 456 tmp |= MTW_OSC_CAL_REQ; 457 mtw_write(sc, MTW_OSC_CTRL, tmp); 458 } else { 459 mtw_read(sc, MTW_WLAN_CTRL, &tmp); 460 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN); 461 mtw_write(sc, MTW_WLAN_CTRL, tmp); 462 463 mtw_read(sc, MTW_OSC_CTRL, &tmp); 464 tmp &= ~MTW_OSC_EN; 465 mtw_write(sc, MTW_OSC_CTRL, tmp); 466 } 467 return (error); 468 } 469 470 static int 471 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val) 472 { 473 usb_device_request_t req; 474 uint32_t tmp; 475 uint16_t actlen; 476 int error; 477 478 req.bmRequestType = UT_READ_VENDOR_DEVICE; 479 req.bRequest = MTW_READ_CFG; 480 USETW(req.wValue, 0); 481 USETW(req.wIndex, reg); 482 USETW(req.wLength, 4); 483 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0, 484 &actlen, 1000); 485 486 if (error == 0) 487 *val = le32toh(tmp); 488 else 489 *val = 0xffffffff; 490 return (error); 491 } 492 493 static int 494 mtw_match(device_t self) 495 { 496 struct usb_attach_arg *uaa = device_get_ivars(self); 497 498 if (uaa->usb_mode != USB_MODE_HOST) 499 return (ENXIO); 500 if (uaa->info.bConfigIndex != 0) 501 return (ENXIO); 502 if (uaa->info.bIfaceIndex != 0) 503 return (ENXIO); 504 505 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa)); 506 } 507 508 static int 509 mtw_attach(device_t self) 510 { 511 struct mtw_softc *sc = device_get_softc(self); 512 struct usb_attach_arg *uaa = device_get_ivars(self); 513 struct ieee80211com *ic = &sc->sc_ic; 514 uint32_t ver; 515 int i, ret; 516 // uint32_t tmp; 517 uint8_t iface_index; 518 int ntries, error; 519 520 device_set_usb_desc(self); 521 sc->sc_udev = uaa->device; 522 sc->sc_dev = self; 523 sc->sc_sent = 0; 524 525 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), 526 MTX_NETWORK_LOCK, MTX_DEF); 527 528 iface_index = 0; 529 530 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, 531 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx); 532 if (error) { 533 device_printf(sc->sc_dev, 534 "could not allocate USB transfers, " 535 "err=%s\n", 536 usbd_errstr(error)); 537 goto detach; 538 } 539 for (i = 0; i < 4; i++) { 540 sc->txd_fw[i] = (struct mtw_txd_fw *) 541 malloc(sizeof(struct mtw_txd_fw), 542 M_USBDEV, M_NOWAIT | M_ZERO); 543 } 544 MTW_LOCK(sc); 545 sc->sc_idx = 0; 546 mbufq_init(&sc->sc_snd, ifqmaxlen); 547 548 /*enable WLAN core */ 549 if ((error = mtw_wlan_enable(sc, 1)) != 0) { 550 device_printf(sc->sc_dev, "could not enable WLAN core\n"); 551 return (ENXIO); 552 } 553 554 /* wait for the chip to settle */ 555 DELAY(100); 556 for (ntries = 0; ntries < 100; ntries++) { 557 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) { 558 goto detach; 559 } 560 if (ver != 0 && ver != 0xffffffff) 561 break; 562 DELAY(10); 563 } 564 if (ntries == 100) { 565 device_printf(sc->sc_dev, 566 "timeout waiting for NIC to initialize\n"); 567 goto detach; 568 } 569 sc->asic_ver = ver >> 16; 570 sc->asic_rev = ver & 0xffff; 571 DELAY(100); 572 if (sc->asic_ver != 0x7601) { 573 device_printf(sc->sc_dev, 574 "Your revision 0x04%x is not supported yet\n", 575 sc->asic_rev); 576 goto detach; 577 } 578 579 mtw_load_microcode(sc); 580 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz); 581 if (ret == EWOULDBLOCK || sc->fwloading != 1) { 582 device_printf(sc->sc_dev, 583 "timeout waiting for MCU to initialize\n"); 584 goto detach; 585 } 586 587 sc->sc_srom_read = mtw_efuse_read_2; 588 /* retrieve RF rev. no and various other things from EEPROM */ 589 mtw_read_eeprom(sc); 590 591 device_printf(sc->sc_dev, 592 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n", 593 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains, 594 sc->nrxchains, ether_sprintf(ic->ic_macaddr)); 595 DELAY(100); 596 597 //mtw_set_leds(sc,5); 598 // mtw_mcu_radio(sc,0x31,0); 599 MTW_UNLOCK(sc); 600 601 602 ic->ic_softc = sc; 603 ic->ic_name = device_get_nameunit(self); 604 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 605 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 606 607 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */ 608 IEEE80211_C_MONITOR | /* monitor mode supported */ 609 IEEE80211_C_IBSS | 610 IEEE80211_C_HOSTAP | 611 IEEE80211_C_WDS | /* 4-address traffic works */ 612 IEEE80211_C_MBSS | 613 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 614 IEEE80211_C_SHSLOT | /* short slot time supported */ 615 IEEE80211_C_WME | /* WME */ 616 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */ 617 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n"); 618 ic->ic_htcaps = IEEE80211_HTC_HT 619 | IEEE80211_HTC_AMPDU 620 | IEEE80211_HTC_AMSDU 621 | IEEE80211_HTCAP_MAXAMSDU_3839 622 | IEEE80211_HTCAP_SMPS_OFF; 623 624 ic->ic_rxstream = sc->nrxchains; 625 ic->ic_txstream = sc->ntxchains; 626 627 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | 628 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP | 629 IEEE80211_CRYPTO_TKIPMIC; 630 631 ic->ic_flags |= IEEE80211_F_DATAPAD; 632 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; 633 634 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 635 ic->ic_channels); 636 637 ieee80211_ifattach(ic); 638 639 ic->ic_scan_start = mtw_scan_start; 640 ic->ic_scan_end = mtw_scan_end; 641 ic->ic_set_channel = mtw_set_channel; 642 ic->ic_getradiocaps = mtw_getradiocaps; 643 ic->ic_node_alloc = mtw_node_alloc; 644 ic->ic_newassoc = mtw_newassoc; 645 ic->ic_update_mcast = mtw_update_mcast; 646 ic->ic_updateslot = mtw_updateslot; 647 ic->ic_wme.wme_update = mtw_wme_update; 648 ic->ic_raw_xmit = mtw_raw_xmit; 649 ic->ic_update_promisc = mtw_update_promisc; 650 ic->ic_vap_create = mtw_vap_create; 651 ic->ic_vap_delete = mtw_vap_delete; 652 ic->ic_transmit = mtw_transmit; 653 ic->ic_parent = mtw_parent; 654 655 ic->ic_update_chw = mtw_update_chw; 656 ic->ic_ampdu_enable = mtw_ampdu_enable; 657 658 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, 659 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT, 660 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 661 MTW_RX_RADIOTAP_PRESENT); 662 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc); 663 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc); 664 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0); 665 666 if (bootverbose) 667 ieee80211_announce(ic); 668 669 return (0); 670 671 detach: 672 MTW_UNLOCK(sc); 673 mtw_detach(self); 674 return (ENXIO); 675 } 676 677 static void 678 mtw_drain_mbufq(struct mtw_softc *sc) 679 { 680 struct mbuf *m; 681 struct ieee80211_node *ni; 682 683 MTW_LOCK_ASSERT(sc, MA_OWNED); 684 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 685 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 686 m->m_pkthdr.rcvif = NULL; 687 ieee80211_free_node(ni); 688 m_freem(m); 689 } 690 } 691 692 static int 693 mtw_detach(device_t self) 694 { 695 struct mtw_softc *sc = device_get_softc(self); 696 struct ieee80211com *ic = &sc->sc_ic; 697 int i; 698 MTW_LOCK(sc); 699 mtw_reset(sc); 700 DELAY(10000); 701 sc->sc_detached = 1; 702 MTW_UNLOCK(sc); 703 704 705 /* stop all USB transfers */ 706 for (i = 0; i < MTW_N_XFER; i++) 707 usbd_transfer_drain(sc->sc_xfer[i]); 708 709 MTW_LOCK(sc); 710 sc->ratectl_run = MTW_RATECTL_OFF; 711 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT; 712 713 /* free TX list, if any */ 714 if (ic->ic_nrunning > 0) 715 for (i = 0; i < MTW_EP_QUEUES; i++) 716 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]); 717 718 /* Free TX queue */ 719 mtw_drain_mbufq(sc); 720 MTW_UNLOCK(sc); 721 if (sc->sc_ic.ic_softc == sc) { 722 /* drain tasks */ 723 usb_callout_drain(&sc->ratectl_ch); 724 ieee80211_draintask(ic, &sc->cmdq_task); 725 ieee80211_draintask(ic, &sc->ratectl_task); 726 ieee80211_ifdetach(ic); 727 } 728 for (i = 0; i < 4; i++) { 729 free(sc->txd_fw[i], M_USBDEV); 730 } 731 firmware_unregister("/mediatek/mt7601u"); 732 mtx_destroy(&sc->sc_mtx); 733 734 return (0); 735 } 736 737 static struct ieee80211vap * 738 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 739 enum ieee80211_opmode opmode, int flags, 740 const uint8_t bssid[IEEE80211_ADDR_LEN], 741 const uint8_t mac[IEEE80211_ADDR_LEN]) 742 { 743 struct mtw_softc *sc = ic->ic_softc; 744 struct mtw_vap *rvp; 745 struct ieee80211vap *vap; 746 int i; 747 748 if (sc->rvp_cnt >= MTW_VAP_MAX) { 749 device_printf(sc->sc_dev, "number of VAPs maxed out\n"); 750 return (NULL); 751 } 752 753 switch (opmode) { 754 case IEEE80211_M_STA: 755 /* enable s/w bmiss handling for sta mode */ 756 flags |= IEEE80211_CLONE_NOBEACONS; 757 /* fall though */ 758 case IEEE80211_M_IBSS: 759 case IEEE80211_M_MONITOR: 760 case IEEE80211_M_HOSTAP: 761 case IEEE80211_M_MBSS: 762 /* other than WDS vaps, only one at a time */ 763 if (!TAILQ_EMPTY(&ic->ic_vaps)) 764 return (NULL); 765 break; 766 case IEEE80211_M_WDS: 767 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 768 if (vap->iv_opmode != IEEE80211_M_HOSTAP) 769 continue; 770 /* WDS vap's always share the local mac address. */ 771 flags &= ~IEEE80211_CLONE_BSSID; 772 break; 773 } 774 if (vap == NULL) { 775 device_printf(sc->sc_dev, 776 "wds only supported in ap mode\n"); 777 return (NULL); 778 } 779 break; 780 default: 781 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 782 return (NULL); 783 } 784 785 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO); 786 vap = &rvp->vap; 787 788 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) != 789 0) { 790 /* out of memory */ 791 free(rvp, M_80211_VAP); 792 return (NULL); 793 } 794 795 vap->iv_update_beacon = mtw_update_beacon; 796 vap->iv_max_aid = MTW_WCID_MAX; 797 798 /* 799 * The linux rt2800 driver limits 1 stream devices to a 32KB 800 * RX AMPDU. 801 */ 802 if (ic->ic_rxstream > 1) 803 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 804 else 805 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 806 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */ 807 808 /* 809 * To delete the right key from h/w, we need wcid. 810 * Luckily, there is unused space in ieee80211_key{}, wk_pad, 811 * and matching wcid will be written into there. So, cast 812 * some spells to remove 'const' from ieee80211_key{} 813 */ 814 vap->iv_key_delete = (void *)mtw_key_delete; 815 vap->iv_key_set = (void *)mtw_key_set; 816 817 // override state transition machine 818 rvp->newstate = vap->iv_newstate; 819 vap->iv_newstate = mtw_newstate; 820 if (opmode == IEEE80211_M_IBSS) { 821 rvp->recv_mgmt = vap->iv_recv_mgmt; 822 vap->iv_recv_mgmt = mtw_recv_mgmt; 823 } 824 825 ieee80211_ratectl_init(vap); 826 ieee80211_ratectl_setinterval(vap, 1000); // 1 second 827 828 /* complete setup */ 829 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status, 830 mac); 831 832 /* make sure id is always unique */ 833 for (i = 0; i < MTW_VAP_MAX; i++) { 834 if ((sc->rvp_bmap & 1 << i) == 0) { 835 sc->rvp_bmap |= 1 << i; 836 rvp->rvp_id = i; 837 break; 838 } 839 } 840 if (sc->rvp_cnt++ == 0) 841 ic->ic_opmode = opmode; 842 843 if (opmode == IEEE80211_M_HOSTAP) 844 sc->cmdq_run = MTW_CMDQ_GO; 845 846 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n", 847 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt); 848 849 return (vap); 850 } 851 852 static void 853 mtw_vap_delete(struct ieee80211vap *vap) 854 { 855 struct mtw_vap *rvp = MTW_VAP(vap); 856 struct ieee80211com *ic; 857 struct mtw_softc *sc; 858 uint8_t rvp_id; 859 860 if (vap == NULL) 861 return; 862 863 ic = vap->iv_ic; 864 sc = ic->ic_softc; 865 866 MTW_LOCK(sc); 867 m_freem(rvp->beacon_mbuf); 868 rvp->beacon_mbuf = NULL; 869 870 rvp_id = rvp->rvp_id; 871 sc->ratectl_run &= ~(1 << rvp_id); 872 sc->rvp_bmap &= ~(1 << rvp_id); 873 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256); 874 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512); 875 --sc->rvp_cnt; 876 877 MTW_DPRINTF(sc, MTW_DEBUG_STATE, 878 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap, 879 sc->rvp_cnt); 880 881 MTW_UNLOCK(sc); 882 883 ieee80211_ratectl_deinit(vap); 884 ieee80211_vap_detach(vap); 885 free(rvp, M_80211_VAP); 886 } 887 888 /* 889 * There are numbers of functions need to be called in context thread. 890 * Rather than creating taskqueue event for each of those functions, 891 * here is all-for-one taskqueue callback function. This function 892 * guarantees deferred functions are executed in the same order they 893 * were enqueued. 894 * '& MTW_CMDQ_MASQ' is to loop cmdq[]. 895 */ 896 static void 897 mtw_cmdq_cb(void *arg, int pending) 898 { 899 struct mtw_softc *sc = arg; 900 uint8_t i; 901 /* call cmdq[].func locked */ 902 MTW_LOCK(sc); 903 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending; 904 i = sc->cmdq_exec, pending--) { 905 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i, 906 pending); 907 if (sc->cmdq_run == MTW_CMDQ_GO) { 908 /* 909 * If arg0 is NULL, callback func needs more 910 * than one arg. So, pass ptr to cmdq struct. 911 */ 912 if (sc->cmdq[i].arg0) 913 sc->cmdq[i].func(sc->cmdq[i].arg0); 914 else 915 sc->cmdq[i].func(&sc->cmdq[i]); 916 } 917 sc->cmdq[i].arg0 = NULL; 918 sc->cmdq[i].func = NULL; 919 sc->cmdq_exec++; 920 sc->cmdq_exec &= MTW_CMDQ_MASQ; 921 } 922 MTW_UNLOCK(sc); 923 } 924 925 static void 926 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq) 927 { 928 struct mtw_tx_data *data; 929 930 memset(pq, 0, sizeof(*pq)); 931 932 STAILQ_INIT(&pq->tx_qh); 933 STAILQ_INIT(&pq->tx_fh); 934 935 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT]; 936 data++) { 937 data->sc = sc; 938 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); 939 } 940 pq->tx_nfree = MTW_TX_RING_COUNT; 941 } 942 943 static void 944 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq) 945 { 946 struct mtw_tx_data *data; 947 /* make sure any subsequent use of the queues will fail */ 948 pq->tx_nfree = 0; 949 950 STAILQ_INIT(&pq->tx_fh); 951 STAILQ_INIT(&pq->tx_qh); 952 953 /* free up all node references and mbufs */ 954 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT]; 955 data++) { 956 if (data->m != NULL) { 957 m_freem(data->m); 958 data->m = NULL; 959 } 960 if (data->ni != NULL) { 961 ieee80211_free_node(data->ni); 962 data->ni = NULL; 963 } 964 } 965 } 966 967 static int 968 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len) 969 { 970 usb_device_request_t req; 971 uint16_t actlen; 972 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 973 req.bRequest = MTW_RESET; 974 USETW(req.wValue, 0x12); 975 USETW(req.wIndex, 0); 976 USETW(req.wLength, len); 977 978 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf, 979 0, &actlen, 1000); 980 981 return (error); 982 } 983 984 static int 985 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val) 986 { 987 usb_device_request_t req; 988 int error; 989 990 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 991 req.bRequest = MTW_WRITE_CFG; 992 USETW(req.wValue, 0); 993 USETW(req.wIndex, reg); 994 USETW(req.wLength, 4); 995 val = htole32(val); 996 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val); 997 return (error); 998 } 999 1000 static int 1001 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val) 1002 { 1003 // if (sc->asic_ver == 0x7612) 1004 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val); 1005 // else 1006 return (mtw_write(sc, MTW_USB_DMA_CFG, val)); 1007 } 1008 1009 static void 1010 mtw_ucode_setup(struct mtw_softc *sc) 1011 { 1012 1013 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN)); 1014 mtw_write(sc, MTW_FCE_PSE_CTRL, 1); 1015 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230); 1016 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1); 1017 mtw_write(sc, MTW_MCU_FW_IDX, 1); 1018 mtw_write(sc, MTW_FCE_PDMA, 0x44); 1019 mtw_write(sc, MTW_FCE_SKIP_FS, 3); 1020 } 1021 static int 1022 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb, 1023 int32_t len, uint32_t offset) 1024 { 1025 1026 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev); 1027 #if 0 // firmware not tested 1028 1029 if (sc->asic_ver == 0x7612 && offset >= 0x90000) 1030 blksz = 0x800; /* MT7612 ROM Patch */ 1031 1032 xfer = usbd_alloc_xfer(sc->sc_udev); 1033 if (xfer == NULL) { 1034 error = ENOMEM; 1035 goto fail; 1036 } 1037 buf = usbd_alloc_buffer(xfer, blksz + 12); 1038 if (buf == NULL) { 1039 error = ENOMEM; 1040 goto fail; 1041 } 1042 #endif 1043 1044 1045 1046 int mlen; 1047 int idx = 0; 1048 1049 mlen = 0x2c44; 1050 1051 while (len > 0) { 1052 1053 if (len < 0x2c44 && len > 0) { 1054 mlen = len; 1055 } 1056 1057 sc->txd_fw[idx]->len = htole16(mlen); 1058 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU); 1059 1060 memcpy(&sc->txd_fw[idx]->fw, fw, mlen); 1061 // memcpy(&txd[1], fw, mlen); 1062 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD); 1063 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset 1064 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16)); 1065 1066 // sc->sc_fw_data[idx]->len=htole16(mlen); 1067 1068 // memcpy(tmpbuf,fw,mlen); 1069 // memset(tmpbuf+mlen,0,MTW_DMA_PAD); 1070 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen); 1071 1072 fw += mlen; 1073 len -= mlen; 1074 // sent+=mlen; 1075 idx++; 1076 } 1077 sc->sc_sent = 0; 1078 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN); 1079 1080 usbd_transfer_start(sc->sc_xfer[7]); 1081 1082 return (0); 1083 } 1084 1085 static void 1086 mtw_load_microcode(void *arg) 1087 { 1088 1089 struct mtw_softc *sc = (struct mtw_softc *)arg; 1090 const struct mtw_ucode_hdr *hdr; 1091 // onst struct mtw_ucode *fw = NULL; 1092 const char *fwname; 1093 size_t size; 1094 int error = 0; 1095 uint32_t tmp, iofs = 0x40; 1096 // int ntries; 1097 int dlen, ilen; 1098 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver); 1099 /* is firmware already running? */ 1100 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp); 1101 if (tmp == MTW_MCU_READY) { 1102 return; 1103 } 1104 if (sc->asic_ver == 0x7612) { 1105 fwname = "mtw-mt7662u_rom_patch"; 1106 1107 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN); 1108 if (firmware == NULL) { 1109 device_printf(sc->sc_dev, 1110 "failed loadfirmware of file %s (error %d)\n", 1111 fwname, error); 1112 return; 1113 } 1114 size = firmware->datasize; 1115 1116 const struct mtw_ucode *fw = (const struct mtw_ucode *) 1117 firmware->data; 1118 hdr = (const struct mtw_ucode_hdr *)&fw->hdr; 1119 // memcpy(fw,(const unsigned char*)firmware->data + 1120 // 0x1e,size-0x1e); 1121 ilen = size - 0x1e; 1122 1123 mtw_ucode_setup(sc); 1124 1125 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen, 1126 0x90000)) != 0) { 1127 goto fail; 1128 } 1129 mtw_usb_dma_write(sc, 0x00e41814); 1130 } 1131 1132 fwname = "/mediatek/mt7601u.bin"; 1133 iofs = 0x40; 1134 // dofs = 0; 1135 if (sc->asic_ver == 0x7612) { 1136 fwname = "mtw-mt7662u"; 1137 iofs = 0x80040; 1138 // dofs = 0x110800; 1139 } else if (sc->asic_ver == 0x7610) { 1140 fwname = "mt7610u"; 1141 // dofs = 0x80000; 1142 } 1143 MTW_UNLOCK(sc); 1144 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN); 1145 1146 if (firmware == NULL) { 1147 device_printf(sc->sc_dev, 1148 "failed loadfirmware of file %s (error %d)\n", fwname, 1149 error); 1150 MTW_LOCK(sc); 1151 return; 1152 } 1153 MTW_LOCK(sc); 1154 size = firmware->datasize; 1155 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size); 1156 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data; 1157 1158 if (size < sizeof(struct mtw_ucode_hdr)) { 1159 device_printf(sc->sc_dev, "firmware header too short\n"); 1160 goto fail; 1161 } 1162 1163 hdr = (const struct mtw_ucode_hdr *)&fw->hdr; 1164 1165 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) + 1166 le32toh(hdr->dlm_len)) { 1167 device_printf(sc->sc_dev, "firmware payload too short\n"); 1168 goto fail; 1169 } 1170 1171 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN; 1172 dlen = le32toh(hdr->dlm_len); 1173 1174 if (ilen > size || dlen > size) { 1175 device_printf(sc->sc_dev, "firmware payload too large\n"); 1176 goto fail; 1177 } 1178 1179 mtw_write(sc, MTW_FCE_PDMA, 0); 1180 mtw_write(sc, MTW_FCE_PSE_CTRL, 0); 1181 mtw_ucode_setup(sc); 1182 1183 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0) 1184 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n", 1185 error); 1186 1187 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n", 1188 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time); 1189 1190 return; 1191 fail: 1192 return; 1193 } 1194 static usb_error_t 1195 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data) 1196 { 1197 usb_error_t err; 1198 int ntries = 5; 1199 1200 MTW_LOCK_ASSERT(sc, MA_OWNED); 1201 1202 while (ntries--) { 1203 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 1204 0, NULL, 2000); // ms seconds 1205 if (err == 0) 1206 break; 1207 MTW_DPRINTF(sc, MTW_DEBUG_USB, 1208 "Control request failed, %s (retrying)\n", 1209 usbd_errstr(err)); 1210 mtw_delay(sc, 10); 1211 } 1212 return (err); 1213 } 1214 1215 static int 1216 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val) 1217 { 1218 uint32_t tmp; 1219 int error; 1220 1221 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp); 1222 if (error == 0) 1223 *val = le32toh(tmp); 1224 else 1225 *val = 0xffffffff; 1226 return (error); 1227 } 1228 1229 static int 1230 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len) 1231 { 1232 usb_device_request_t req; 1233 1234 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1235 req.bRequest = MTW_READ_REGION_1; 1236 USETW(req.wValue, 0); 1237 USETW(req.wIndex, reg); 1238 USETW(req.wLength, len); 1239 1240 return (mtw_do_request(sc, &req, buf)); 1241 } 1242 1243 static int 1244 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val) 1245 { 1246 1247 usb_device_request_t req; 1248 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1249 req.bRequest = MTW_WRITE_2; 1250 USETW(req.wValue, val); 1251 USETW(req.wIndex, reg); 1252 USETW(req.wLength, 0); 1253 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)); 1254 } 1255 1256 static int 1257 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val) 1258 { 1259 1260 int error; 1261 1262 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) { 1263 1264 error = mtw_write_2(sc, reg + 2, val >> 16); 1265 } 1266 1267 return (error); 1268 } 1269 1270 static int 1271 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len) 1272 { 1273 1274 usb_device_request_t req; 1275 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1276 req.bRequest = MTW_WRITE_REGION_1; 1277 USETW(req.wValue, 0); 1278 USETW(req.wIndex, reg); 1279 USETW(req.wLength, len); 1280 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf)); 1281 } 1282 1283 static int 1284 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count) 1285 { 1286 int i, error = 0; 1287 1288 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n")); 1289 for (i = 0; i < count && error == 0; i += 4) 1290 error = mtw_write(sc, reg + i, val); 1291 return (error); 1292 } 1293 1294 static int 1295 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val) 1296 { 1297 1298 uint32_t tmp; 1299 uint16_t reg; 1300 int error, ntries; 1301 1302 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0) 1303 return (error); 1304 1305 addr *= 2; 1306 /* 1307 * Read one 16-byte block into registers EFUSE_DATA[0-3]: 1308 * DATA0: 3 2 1 0 1309 * DATA1: 7 6 5 4 1310 * DATA2: B A 9 8 1311 * DATA3: F E D C 1312 */ 1313 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK); 1314 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK; 1315 mtw_write(sc, MTW_EFUSE_CTRL, tmp); 1316 for (ntries = 0; ntries < 100; ntries++) { 1317 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0) 1318 return (error); 1319 if (!(tmp & MTW_EFSROM_KICK)) 1320 break; 1321 DELAY(2); 1322 } 1323 if (ntries == 100) 1324 return (ETIMEDOUT); 1325 1326 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) { 1327 *val = 0xffff; // address not found 1328 return (0); 1329 } 1330 // determine to which 32-bit register our 16-bit word belongs 1331 reg = MTW_EFUSE_DATA0 + (addr & 0xc); 1332 if ((error = mtw_read(sc, reg, &tmp)) != 0) 1333 return (error); 1334 1335 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff; 1336 return (0); 1337 } 1338 1339 static __inline int 1340 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val) 1341 { 1342 /* either eFUSE ROM or EEPROM */ 1343 return (sc->sc_srom_read(sc, addr, val)); 1344 } 1345 1346 static int 1347 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val) 1348 { 1349 uint32_t tmp; 1350 int ntries, error; 1351 1352 for (ntries = 0; ntries < 10; ntries++) { 1353 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1354 return (error); 1355 if (!(tmp & MTW_BBP_CSR_KICK)) 1356 break; 1357 } 1358 if (ntries == 10) 1359 return (ETIMEDOUT); 1360 1361 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8; 1362 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0) 1363 return (error); 1364 1365 for (ntries = 0; ntries < 10; ntries++) { 1366 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1367 return (error); 1368 if (!(tmp & MTW_BBP_CSR_KICK)) 1369 break; 1370 } 1371 if (ntries == 10) 1372 return (ETIMEDOUT); 1373 1374 *val = tmp & 0xff; 1375 return (0); 1376 } 1377 1378 static int 1379 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val) 1380 { 1381 uint32_t tmp; 1382 int ntries, error; 1383 1384 for (ntries = 0; ntries < 10; ntries++) { 1385 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1386 return (error); 1387 if (!(tmp & MTW_BBP_CSR_KICK)) 1388 break; 1389 } 1390 if (ntries == 10) 1391 return (ETIMEDOUT); 1392 1393 tmp = MTW_BBP_CSR_KICK | reg << 8 | val; 1394 return (mtw_write(sc, MTW_BBP_CSR, tmp)); 1395 } 1396 1397 static int 1398 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len) 1399 { 1400 sc->sc_idx = 0; 1401 sc->txd_fw[sc->sc_idx]->len = htole16( 1402 len + 8); 1403 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU | 1404 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf)); 1405 1406 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004); 1407 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len); 1408 usbd_transfer_start(sc->sc_xfer[7]); 1409 return (0); 1410 } 1411 1412 /* 1413 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. 1414 * Used to adjust per-rate Tx power registers. 1415 */ 1416 static __inline uint32_t 1417 b4inc(uint32_t b32, int8_t delta) 1418 { 1419 int8_t i, b4; 1420 1421 for (i = 0; i < 8; i++) { 1422 b4 = b32 & 0xf; 1423 b4 += delta; 1424 if (b4 < 0) 1425 b4 = 0; 1426 else if (b4 > 0xf) 1427 b4 = 0xf; 1428 b32 = b32 >> 4 | b4 << 28; 1429 } 1430 return (b32); 1431 } 1432 static void 1433 mtw_get_txpower(struct mtw_softc *sc) 1434 { 1435 uint16_t val; 1436 int i; 1437 1438 /* Read power settings for 2GHz channels. */ 1439 for (i = 0; i < 14; i += 2) { 1440 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val); 1441 sc->txpow1[i + 0] = (int8_t)(val & 0xff); 1442 sc->txpow1[i + 1] = (int8_t)(val >> 8); 1443 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val); 1444 sc->txpow2[i + 0] = (int8_t)(val & 0xff); 1445 sc->txpow2[i + 1] = (int8_t)(val >> 8); 1446 } 1447 /* Fix broken Tx power entries. */ 1448 for (i = 0; i < 14; i++) { 1449 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27) 1450 sc->txpow1[i] = 5; 1451 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27) 1452 sc->txpow2[i] = 5; 1453 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR, 1454 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan, 1455 sc->txpow1[i], sc->txpow2[i]); 1456 } 1457 } 1458 1459 struct ieee80211_node * 1460 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1461 { 1462 return (malloc(sizeof(struct mtw_node), M_80211_NODE, 1463 M_NOWAIT | M_ZERO)); 1464 } 1465 static int 1466 mtw_read_eeprom(struct mtw_softc *sc) 1467 { 1468 struct ieee80211com *ic = &sc->sc_ic; 1469 int8_t delta_2ghz, delta_5ghz; 1470 uint16_t val; 1471 int ridx, ant; 1472 1473 sc->sc_srom_read = mtw_efuse_read_2; 1474 1475 /* read RF information */ 1476 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val); 1477 sc->rf_rev = val; 1478 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val); 1479 sc->ntxchains = (val >> 4) & 0xf; 1480 sc->nrxchains = val & 0xf; 1481 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n", 1482 sc->rf_rev, sc->ntxchains, sc->nrxchains); 1483 1484 /* read ROM version */ 1485 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val); 1486 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff, 1487 val >> 8); 1488 1489 /* read MAC address */ 1490 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val); 1491 ic->ic_macaddr[0] = val & 0xff; 1492 ic->ic_macaddr[1] = val >> 8; 1493 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val); 1494 ic->ic_macaddr[2] = val & 0xff; 1495 ic->ic_macaddr[3] = val >> 8; 1496 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val); 1497 ic->ic_macaddr[4] = val & 0xff; 1498 ic->ic_macaddr[5] = val >> 8; 1499 #if 0 1500 printf("eFUSE ROM\n00: "); 1501 for (int i = 0; i < 256; i++) { 1502 if (((i % 8) == 0) && i > 0) 1503 printf("\n%02x: ", i); 1504 mtw_srom_read(sc, i, &val); 1505 printf(" %04x", val); 1506 } 1507 printf("\n"); 1508 #endif 1509 /* check if RF supports automatic Tx access gain control */ 1510 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val); 1511 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val); 1512 if ((val & 0xff) != 0xff) { 1513 sc->ext_5ghz_lna = (val >> 3) & 1; 1514 sc->ext_2ghz_lna = (val >> 2) & 1; 1515 /* check if RF supports automatic Tx access gain control */ 1516 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1; 1517 /* check if we have a hardware radio switch */ 1518 sc->rfswitch = val & 1; 1519 } 1520 1521 /* read RF frequency offset from EEPROM */ 1522 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val); 1523 if ((val & 0xff) != 0xff) 1524 sc->rf_freq_offset = val; 1525 else 1526 sc->rf_freq_offset = 0; 1527 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n", 1528 sc->rf_freq_offset); 1529 1530 /* Read Tx power settings. */ 1531 mtw_get_txpower(sc); 1532 1533 /* read Tx power compensation for each Tx rate */ 1534 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val); 1535 delta_2ghz = delta_5ghz = 0; 1536 if ((val & 0xff) != 0xff && (val & 0x80)) { 1537 delta_2ghz = val & 0xf; 1538 if (!(val & 0x40)) /* negative number */ 1539 delta_2ghz = -delta_2ghz; 1540 } 1541 val >>= 8; 1542 if ((val & 0xff) != 0xff && (val & 0x80)) { 1543 delta_5ghz = val & 0xf; 1544 if (!(val & 0x40)) /* negative number */ 1545 delta_5ghz = -delta_5ghz; 1546 } 1547 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR, 1548 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz, 1549 delta_5ghz); 1550 1551 for (ridx = 0; ridx < 5; ridx++) { 1552 uint32_t reg; 1553 1554 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val); 1555 reg = val; 1556 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val); 1557 reg |= (uint32_t)val << 16; 1558 1559 sc->txpow20mhz[ridx] = reg; 1560 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); 1561 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); 1562 1563 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR, 1564 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " 1565 "40MHz/5GHz=0x%08x\n", 1566 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx], 1567 sc->txpow40mhz_5ghz[ridx]); 1568 } 1569 1570 /* read RSSI offsets and LNA gains from EEPROM */ 1571 val = 0; 1572 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val); 1573 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ 1574 sc->rssi_2ghz[1] = val >> 8; /* Ant B */ 1575 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val); 1576 /* 1577 * On RT3070 chips (limited to 2 Rx chains), this ROM 1578 * field contains the Tx mixer gain for the 2GHz band. 1579 */ 1580 if ((val & 0xff) != 0xff) 1581 sc->txmixgain_2ghz = val & 0x7; 1582 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n", 1583 sc->txmixgain_2ghz); 1584 sc->lna[2] = val >> 8; /* channel group 2 */ 1585 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val); 1586 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ 1587 sc->rssi_5ghz[1] = val >> 8; /* Ant B */ 1588 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val); 1589 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ 1590 1591 sc->lna[3] = val >> 8; /* channel group 3 */ 1592 1593 mtw_srom_read(sc, MTW_EEPROM_LNA, &val); 1594 sc->lna[0] = val & 0xff; /* channel group 0 */ 1595 sc->lna[1] = val >> 8; /* channel group 1 */ 1596 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]); 1597 1598 /* fix broken 5GHz LNA entries */ 1599 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { 1600 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1601 "invalid LNA for channel group %d\n", 2); 1602 sc->lna[2] = sc->lna[1]; 1603 } 1604 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { 1605 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1606 "invalid LNA for channel group %d\n", 3); 1607 sc->lna[3] = sc->lna[1]; 1608 } 1609 1610 /* fix broken RSSI offset entries */ 1611 for (ant = 0; ant < 3; ant++) { 1612 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { 1613 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1614 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1, 1615 sc->rssi_2ghz[ant]); 1616 sc->rssi_2ghz[ant] = 0; 1617 } 1618 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { 1619 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1620 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1, 1621 sc->rssi_5ghz[ant]); 1622 sc->rssi_5ghz[ant] = 0; 1623 } 1624 } 1625 return (0); 1626 } 1627 static int 1628 mtw_media_change(if_t ifp) 1629 { 1630 struct ieee80211vap *vap = if_getsoftc(ifp); 1631 struct ieee80211com *ic = vap->iv_ic; 1632 const struct ieee80211_txparam *tp; 1633 struct mtw_softc *sc = ic->ic_softc; 1634 uint8_t rate, ridx; 1635 1636 MTW_LOCK(sc); 1637 ieee80211_media_change(ifp); 1638 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 1639 tp = &vap->iv_txparms[ic->ic_curmode]; 1640 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 1641 struct ieee80211_node *ni; 1642 struct mtw_node *rn; 1643 /* XXX TODO: methodize with MCS rates */ 1644 rate = 1645 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] & 1646 IEEE80211_RATE_VAL; 1647 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) { 1648 if (rt2860_rates[ridx].rate == rate) 1649 break; 1650 } 1651 ni = ieee80211_ref_node(vap->iv_bss); 1652 rn = MTW_NODE(ni); 1653 rn->fix_ridx = ridx; 1654 1655 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate, 1656 rn->fix_ridx); 1657 ieee80211_free_node(ni); 1658 } 1659 MTW_UNLOCK(sc); 1660 1661 return (0); 1662 } 1663 1664 void 1665 mtw_set_leds(struct mtw_softc *sc, uint16_t which) 1666 { 1667 struct mtw_mcu_cmd_8 cmd; 1668 cmd.func = htole32(0x1); 1669 cmd.val = htole32(which); 1670 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8)); 1671 } 1672 static void 1673 mtw_abort_tsf_sync(struct mtw_softc *sc) 1674 { 1675 uint32_t tmp; 1676 1677 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp); 1678 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN); 1679 mtw_write(sc, MTW_BCN_TIME_CFG, tmp); 1680 } 1681 static int 1682 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1683 { 1684 const struct ieee80211_txparam *tp; 1685 struct ieee80211com *ic = vap->iv_ic; 1686 struct mtw_softc *sc = ic->ic_softc; 1687 struct mtw_vap *rvp = MTW_VAP(vap); 1688 enum ieee80211_state ostate; 1689 uint32_t sta[3]; 1690 uint8_t ratectl = 0; 1691 uint8_t restart_ratectl = 0; 1692 uint8_t bid = 1 << rvp->rvp_id; 1693 1694 1695 ostate = vap->iv_state; 1696 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n", 1697 ieee80211_state_name[ostate], ieee80211_state_name[nstate]); 1698 IEEE80211_UNLOCK(ic); 1699 MTW_LOCK(sc); 1700 ratectl = sc->ratectl_run; /* remember current state */ 1701 usb_callout_stop(&sc->ratectl_ch); 1702 sc->ratectl_run = MTW_RATECTL_OFF; 1703 if (ostate == IEEE80211_S_RUN) { 1704 /* turn link LED off */ 1705 } 1706 1707 switch (nstate) { 1708 case IEEE80211_S_INIT: 1709 restart_ratectl = 1; 1710 if (ostate != IEEE80211_S_RUN) 1711 break; 1712 1713 ratectl &= ~bid; 1714 sc->runbmap &= ~bid; 1715 1716 /* abort TSF synchronization if there is no vap running */ 1717 if (--sc->running == 0) 1718 mtw_abort_tsf_sync(sc); 1719 break; 1720 1721 case IEEE80211_S_RUN: 1722 if (!(sc->runbmap & bid)) { 1723 if (sc->running++) 1724 restart_ratectl = 1; 1725 sc->runbmap |= bid; 1726 } 1727 1728 m_freem(rvp->beacon_mbuf); 1729 rvp->beacon_mbuf = NULL; 1730 1731 switch (vap->iv_opmode) { 1732 case IEEE80211_M_HOSTAP: 1733 case IEEE80211_M_MBSS: 1734 sc->ap_running |= bid; 1735 ic->ic_opmode = vap->iv_opmode; 1736 mtw_update_beacon_cb(vap); 1737 break; 1738 case IEEE80211_M_IBSS: 1739 sc->adhoc_running |= bid; 1740 if (!sc->ap_running) 1741 ic->ic_opmode = vap->iv_opmode; 1742 mtw_update_beacon_cb(vap); 1743 break; 1744 case IEEE80211_M_STA: 1745 sc->sta_running |= bid; 1746 if (!sc->ap_running && !sc->adhoc_running) 1747 ic->ic_opmode = vap->iv_opmode; 1748 1749 /* read statistic counters (clear on read) */ 1750 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta, 1751 sizeof sta); 1752 1753 break; 1754 default: 1755 ic->ic_opmode = vap->iv_opmode; 1756 break; 1757 } 1758 1759 if (vap->iv_opmode != IEEE80211_M_MONITOR) { 1760 struct ieee80211_node *ni; 1761 1762 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { 1763 MTW_UNLOCK(sc); 1764 IEEE80211_LOCK(ic); 1765 return (-1); 1766 } 1767 mtw_updateslot(ic); 1768 mtw_enable_mrr(sc); 1769 mtw_set_txpreamble(sc); 1770 mtw_set_basicrates(sc); 1771 ni = ieee80211_ref_node(vap->iv_bss); 1772 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); 1773 mtw_set_bssid(sc, sc->sc_bssid); 1774 ieee80211_free_node(ni); 1775 mtw_enable_tsf_sync(sc); 1776 1777 /* enable automatic rate adaptation */ 1778 tp = &vap->iv_txparms[ieee80211_chan2mode( 1779 ic->ic_curchan)]; 1780 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) 1781 ratectl |= bid; 1782 } else { 1783 mtw_enable_tsf_sync(sc); 1784 } 1785 1786 break; 1787 default: 1788 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n"); 1789 break; 1790 } 1791 1792 /* restart amrr for running VAPs */ 1793 if ((sc->ratectl_run = ratectl) && restart_ratectl) { 1794 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc); 1795 } 1796 MTW_UNLOCK(sc); 1797 IEEE80211_LOCK(ic); 1798 return (rvp->newstate(vap, nstate, arg)); 1799 } 1800 1801 static int 1802 mtw_wme_update(struct ieee80211com *ic) 1803 { 1804 struct chanAccParams chp; 1805 struct mtw_softc *sc = ic->ic_softc; 1806 const struct wmeParams *ac; 1807 int aci, error = 0; 1808 ieee80211_wme_ic_getparams(ic, &chp); 1809 ac = chp.cap_wmeParams; 1810 1811 MTW_LOCK(sc); 1812 /* update MAC TX configuration registers */ 1813 for (aci = 0; aci < WME_NUM_AC; aci++) { 1814 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci), 1815 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 | 1816 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit); 1817 if (error) 1818 goto err; 1819 } 1820 1821 /* update SCH/DMA registers too */ 1822 error = mtw_write(sc, MTW_WMM_AIFSN_CFG, 1823 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 | 1824 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn); 1825 if (error) 1826 goto err; 1827 error = mtw_write(sc, MTW_WMM_CWMIN_CFG, 1828 ac[WME_AC_VO].wmep_logcwmin << 12 | 1829 ac[WME_AC_VI].wmep_logcwmin << 8 | 1830 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin); 1831 if (error) 1832 goto err; 1833 error = mtw_write(sc, MTW_WMM_CWMAX_CFG, 1834 ac[WME_AC_VO].wmep_logcwmax << 12 | 1835 ac[WME_AC_VI].wmep_logcwmax << 8 | 1836 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax); 1837 if (error) 1838 goto err; 1839 error = mtw_write(sc, MTW_WMM_TXOP0_CFG, 1840 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit); 1841 if (error) 1842 goto err; 1843 error = mtw_write(sc, MTW_WMM_TXOP1_CFG, 1844 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit); 1845 1846 err: 1847 MTW_UNLOCK(sc); 1848 if (error) 1849 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n"); 1850 1851 return (error); 1852 } 1853 1854 static int 1855 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k) 1856 { 1857 struct ieee80211com *ic = vap->iv_ic; 1858 struct mtw_softc *sc = ic->ic_softc; 1859 uint32_t i; 1860 1861 i = MTW_CMDQ_GET(&sc->cmdq_store); 1862 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i); 1863 sc->cmdq[i].func = mtw_key_set_cb; 1864 sc->cmdq[i].arg0 = NULL; 1865 sc->cmdq[i].arg1 = vap; 1866 sc->cmdq[i].k = k; 1867 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr); 1868 ieee80211_runtask(ic, &sc->cmdq_task); 1869 1870 /* 1871 * To make sure key will be set when hostapd 1872 * calls iv_key_set() before if_init(). 1873 */ 1874 if (vap->iv_opmode == IEEE80211_M_HOSTAP) { 1875 MTW_LOCK(sc); 1876 sc->cmdq_key_set = MTW_CMDQ_GO; 1877 MTW_UNLOCK(sc); 1878 } 1879 1880 return (1); 1881 } 1882 static void 1883 mtw_key_set_cb(void *arg) 1884 { 1885 struct mtw_cmdq *cmdq = arg; 1886 struct ieee80211vap *vap = cmdq->arg1; 1887 struct ieee80211_key *k = cmdq->k; 1888 struct ieee80211com *ic = vap->iv_ic; 1889 struct mtw_softc *sc = ic->ic_softc; 1890 struct ieee80211_node *ni; 1891 u_int cipher = k->wk_cipher->ic_cipher; 1892 uint32_t attr; 1893 uint16_t base; 1894 uint8_t mode, wcid, iv[8]; 1895 MTW_LOCK_ASSERT(sc, MA_OWNED); 1896 1897 if (vap->iv_opmode == IEEE80211_M_HOSTAP) 1898 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac); 1899 else 1900 ni = vap->iv_bss; 1901 1902 /* map net80211 cipher to RT2860 security mode */ 1903 switch (cipher) { 1904 case IEEE80211_CIPHER_WEP: 1905 if (k->wk_keylen < 8) 1906 mode = MTW_MODE_WEP40; 1907 else 1908 mode = MTW_MODE_WEP104; 1909 break; 1910 case IEEE80211_CIPHER_TKIP: 1911 mode = MTW_MODE_TKIP; 1912 break; 1913 case IEEE80211_CIPHER_AES_CCM: 1914 mode = MTW_MODE_AES_CCMP; 1915 break; 1916 default: 1917 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n"); 1918 return; 1919 } 1920 1921 if (k->wk_flags & IEEE80211_KEY_GROUP) { 1922 wcid = 0; /* NB: update WCID0 for group keys */ 1923 base = MTW_SKEY(0, k->wk_keyix); 1924 } else { 1925 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0; 1926 base = MTW_PKEY(wcid); 1927 } 1928 1929 if (cipher == IEEE80211_CIPHER_TKIP) { 1930 mtw_write_region_1(sc, base, k->wk_key, 16); 1931 mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8); 1932 mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8); 1933 } else { 1934 /* roundup len to 16-bit: XXX fix write_region_1() instead */ 1935 mtw_write_region_1(sc, base, k->wk_key, 1936 (k->wk_keylen + 1) & ~1); 1937 } 1938 1939 if (!(k->wk_flags & IEEE80211_KEY_GROUP) || 1940 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) { 1941 /* set initial packet number in IV+EIV */ 1942 if (cipher == IEEE80211_CIPHER_WEP) { 1943 memset(iv, 0, sizeof iv); 1944 iv[3] = vap->iv_def_txkey << 6; 1945 } else { 1946 if (cipher == IEEE80211_CIPHER_TKIP) { 1947 iv[0] = k->wk_keytsc >> 8; 1948 iv[1] = (iv[0] | 0x20) & 0x7f; 1949 iv[2] = k->wk_keytsc; 1950 } else { //CCMP 1951 iv[0] = k->wk_keytsc; 1952 iv[1] = k->wk_keytsc >> 8; 1953 iv[2] = 0; 1954 } 1955 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV; 1956 iv[4] = k->wk_keytsc >> 16; 1957 iv[5] = k->wk_keytsc >> 24; 1958 iv[6] = k->wk_keytsc >> 32; 1959 iv[7] = k->wk_keytsc >> 40; 1960 } 1961 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8); 1962 } 1963 1964 if (k->wk_flags & IEEE80211_KEY_GROUP) { 1965 /* install group key */ 1966 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr); 1967 attr &= ~(0xf << (k->wk_keyix * 4)); 1968 attr |= mode << (k->wk_keyix * 4); 1969 mtw_write(sc, MTW_SKEY_MODE_0_7, attr); 1970 1971 if (cipher & (IEEE80211_CIPHER_WEP)) { 1972 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr); 1973 attr = (attr & ~0xf) | (mode << 1); 1974 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr); 1975 1976 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4); 1977 1978 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 1979 attr = (attr & ~0xf) | (mode << 1); 1980 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 1981 } 1982 } else { 1983 /* install pairwise key */ 1984 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 1985 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN; 1986 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 1987 } 1988 k->wk_pad = wcid; 1989 } 1990 1991 /* 1992 * If wlan is destroyed without being brought down i.e. without 1993 * wlan down or wpa_cli terminate, this function is called after 1994 * vap is gone. Don't refer it. 1995 */ 1996 static void 1997 mtw_key_delete_cb(void *arg) 1998 { 1999 struct mtw_cmdq *cmdq = arg; 2000 struct mtw_softc *sc = cmdq->arg1; 2001 struct ieee80211_key *k = &cmdq->key; 2002 uint32_t attr; 2003 uint8_t wcid; 2004 2005 MTW_LOCK_ASSERT(sc, MA_OWNED); 2006 2007 if (k->wk_flags & IEEE80211_KEY_GROUP) { 2008 /* remove group key */ 2009 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n"); 2010 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr); 2011 attr &= ~(0xf << (k->wk_keyix * 4)); 2012 mtw_write(sc, MTW_SKEY_MODE_0_7, attr); 2013 } else { 2014 /* remove pairwise key */ 2015 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n", 2016 k->wk_pad); 2017 /* matching wcid was written to wk_pad in mtw_key_set() */ 2018 wcid = k->wk_pad; 2019 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 2020 attr &= ~0xf; 2021 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 2022 } 2023 2024 k->wk_pad = 0; 2025 } 2026 2027 /* 2028 * return 0 on error 2029 */ 2030 static int 2031 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k) 2032 { 2033 struct ieee80211com *ic = vap->iv_ic; 2034 struct mtw_softc *sc = ic->ic_softc; 2035 struct ieee80211_key *k0; 2036 uint32_t i; 2037 if (sc->sc_flags & MTW_RUNNING) 2038 return (1); 2039 2040 /* 2041 * When called back, key might be gone. So, make a copy 2042 * of some values need to delete keys before deferring. 2043 * But, because of LOR with node lock, cannot use lock here. 2044 * So, use atomic instead. 2045 */ 2046 i = MTW_CMDQ_GET(&sc->cmdq_store); 2047 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i); 2048 sc->cmdq[i].func = mtw_key_delete_cb; 2049 sc->cmdq[i].arg0 = NULL; 2050 sc->cmdq[i].arg1 = sc; 2051 k0 = &sc->cmdq[i].key; 2052 k0->wk_flags = k->wk_flags; 2053 k0->wk_keyix = k->wk_keyix; 2054 /* matching wcid was written to wk_pad in mtw_key_set() */ 2055 k0->wk_pad = k->wk_pad; 2056 ieee80211_runtask(ic, &sc->cmdq_task); 2057 return (1); /* return fake success */ 2058 } 2059 2060 static void 2061 mtw_ratectl_to(void *arg) 2062 { 2063 struct mtw_softc *sc = arg; 2064 /* do it in a process context, so it can go sleep */ 2065 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task); 2066 /* next timeout will be rescheduled in the callback task */ 2067 } 2068 2069 /* ARGSUSED */ 2070 static void 2071 mtw_ratectl_cb(void *arg, int pending) 2072 { 2073 2074 struct mtw_softc *sc = arg; 2075 struct ieee80211com *ic = &sc->sc_ic; 2076 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2077 2078 if (vap == NULL) 2079 return; 2080 2081 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc); 2082 2083 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc); 2084 2085 2086 } 2087 2088 static void 2089 mtw_drain_fifo(void *arg) 2090 { 2091 struct mtw_softc *sc = arg; 2092 uint32_t stat; 2093 uint16_t(*wstat)[3]; 2094 uint8_t wcid, mcs, pid; 2095 int8_t retry; 2096 2097 MTW_LOCK_ASSERT(sc, MA_OWNED); 2098 2099 for (;;) { 2100 /* drain Tx status FIFO (maxsize = 16) */ 2101 mtw_read(sc, MTW_TX_STAT_FIFO, &stat); 2102 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat); 2103 if (!(stat & MTW_TXQ_VLD)) 2104 break; 2105 2106 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff; 2107 2108 /* if no ACK was requested, no feedback is available */ 2109 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX || 2110 wcid == 0) 2111 continue; 2112 2113 /* 2114 * Even though each stat is Tx-complete-status like format, 2115 * the device can poll stats. Because there is no guarantee 2116 * that the referring node is still around when read the stats. 2117 * So that, if we use ieee80211_ratectl_tx_update(), we will 2118 * have hard time not to refer already freed node. 2119 * 2120 * To eliminate such page faults, we poll stats in softc. 2121 * Then, update the rates later with 2122 * ieee80211_ratectl_tx_update(). 2123 */ 2124 wstat = &(sc->wcid_stats[wcid]); 2125 (*wstat)[MTW_TXCNT]++; 2126 if (stat & MTW_TXQ_OK) 2127 (*wstat)[MTW_SUCCESS]++; 2128 else 2129 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 2130 /* 2131 * Check if there were retries, ie if the Tx success rate is 2132 * different from the requested rate. Note that it works only 2133 * because we do not allow rate fallback from OFDM to CCK. 2134 */ 2135 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f; 2136 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf; 2137 if ((retry = pid - 1 - mcs) > 0) { 2138 (*wstat)[MTW_TXCNT] += retry; 2139 (*wstat)[MTW_RETRY] += retry; 2140 } 2141 } 2142 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt); 2143 2144 sc->fifo_cnt = 0; 2145 } 2146 2147 static void 2148 mtw_iter_func(void *arg, struct ieee80211_node *ni) 2149 { 2150 struct mtw_softc *sc = arg; 2151 MTW_LOCK(sc); 2152 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; 2153 struct ieee80211vap *vap = ni->ni_vap; 2154 struct mtw_node *rn = MTW_NODE(ni); 2155 uint32_t sta[3]; 2156 uint16_t(*wstat)[3]; 2157 int error, ridx; 2158 2159 2160 /* Check for special case */ 2161 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA && 2162 ni != vap->iv_bss) 2163 goto fail; 2164 2165 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE | 2166 IEEE80211_RATECTL_TX_STATS_RETRIES; 2167 txs->ni = ni; 2168 if (sc->rvp_cnt <= 1 && 2169 (vap->iv_opmode == IEEE80211_M_IBSS || 2170 vap->iv_opmode == IEEE80211_M_STA)) { 2171 /* 2172 * read statistic counters (clear on read) and update AMRR state 2173 */ 2174 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta, 2175 sizeof sta); 2176 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error); 2177 if (error != 0) 2178 goto fail; 2179 2180 /* count failed TX as errors */ 2181 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 2182 le32toh(sta[0]) & 0xffff); 2183 2184 txs->nretries = (le32toh(sta[1]) >> 16); 2185 txs->nsuccess = (le32toh(sta[1]) & 0xffff); 2186 /* nretries??? */ 2187 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff); 2188 2189 MTW_DPRINTF(sc, MTW_DEBUG_RATE, 2190 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries, 2191 txs->nsuccess, le32toh(sta[0]) & 0xffff); 2192 } else { 2193 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]); 2194 2195 if (wstat == &(sc->wcid_stats[0]) || 2196 wstat > &(sc->wcid_stats[MTW_WCID_MAX])) 2197 goto fail; 2198 2199 txs->nretries = (*wstat)[MTW_RETRY]; 2200 txs->nsuccess = (*wstat)[MTW_SUCCESS]; 2201 txs->nframes = (*wstat)[MTW_TXCNT]; 2202 MTW_DPRINTF(sc, MTW_DEBUG_RATE, 2203 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries, 2204 txs->nframes, txs->nsuccess); 2205 2206 memset(wstat, 0, sizeof(*wstat)); 2207 } 2208 2209 ieee80211_ratectl_tx_update(vap, txs); 2210 ieee80211_ratectl_rate(ni, NULL, 0); 2211 /* XXX TODO: methodize with MCS rates */ 2212 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) { 2213 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n", 2214 ni->ni_txrate); 2215 if (rt2860_rates[ridx].rate == ni->ni_txrate) { 2216 break; 2217 } 2218 } 2219 rn->amrr_ridx = ridx; 2220 fail: 2221 MTW_UNLOCK(sc); 2222 2223 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n", 2224 ni->ni_txrate, rn->amrr_ridx); 2225 } 2226 2227 static void 2228 mtw_newassoc_cb(void *arg) 2229 { 2230 struct mtw_cmdq *cmdq = arg; 2231 struct ieee80211_node *ni = cmdq->arg1; 2232 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc; 2233 2234 uint8_t wcid = cmdq->wcid; 2235 2236 MTW_LOCK_ASSERT(sc, MA_OWNED); 2237 2238 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr, 2239 IEEE80211_ADDR_LEN); 2240 2241 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid])); 2242 } 2243 2244 static void 2245 mtw_newassoc(struct ieee80211_node *ni, int isnew) 2246 { 2247 2248 struct mtw_node *mn = MTW_NODE(ni); 2249 struct ieee80211vap *vap = ni->ni_vap; 2250 struct ieee80211com *ic = vap->iv_ic; 2251 struct mtw_softc *sc = ic->ic_softc; 2252 2253 uint8_t rate; 2254 uint8_t ridx; 2255 uint8_t wcid; 2256 //int i; 2257 // int i,j; 2258 wcid = MTW_AID2WCID(ni->ni_associd); 2259 2260 if (wcid > MTW_WCID_MAX) { 2261 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid); 2262 return; 2263 } 2264 2265 /* only interested in true associations */ 2266 if (isnew && ni->ni_associd != 0) { 2267 /* 2268 * This function could is called though timeout function. 2269 * Need to deferggxr. 2270 */ 2271 2272 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store); 2273 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt); 2274 sc->cmdq[cnt].func = mtw_newassoc_cb; 2275 sc->cmdq[cnt].arg0 = NULL; 2276 sc->cmdq[cnt].arg1 = ni; 2277 sc->cmdq[cnt].wcid = wcid; 2278 ieee80211_runtask(ic, &sc->cmdq_task); 2279 } 2280 2281 MTW_DPRINTF(sc, MTW_DEBUG_STATE, 2282 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd, 2283 ether_sprintf(ni->ni_macaddr)); 2284 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; 2285 /* XXX TODO: methodize with MCS rates */ 2286 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 2287 if (rt2860_rates[ridx].rate == rate) 2288 break; 2289 mn->mgt_ridx = ridx; 2290 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE, 2291 "rate=%d, ctl_ridx=%d\n", rate, ridx); 2292 MTW_LOCK(sc); 2293 if (sc->ratectl_run != MTW_RATECTL_OFF) { 2294 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc); 2295 } 2296 MTW_UNLOCK(sc); 2297 2298 } 2299 2300 /* 2301 * Return the Rx chain with the highest RSSI for a given frame. 2302 */ 2303 static __inline uint8_t 2304 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi) 2305 { 2306 uint8_t rxchain = 0; 2307 2308 if (sc->nrxchains > 1) { 2309 if (rxwi->rssi[1] > rxwi->rssi[rxchain]) 2310 rxchain = 1; 2311 if (sc->nrxchains > 2) 2312 if (rxwi->rssi[2] > rxwi->rssi[rxchain]) 2313 rxchain = 2; 2314 } 2315 return (rxchain); 2316 } 2317 static void 2318 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf) 2319 { 2320 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf)); 2321 } 2322 2323 static void 2324 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 2325 const struct ieee80211_rx_stats *rxs, int rssi, int nf) 2326 { 2327 struct ieee80211vap *vap = ni->ni_vap; 2328 struct mtw_softc *sc = vap->iv_ic->ic_softc; 2329 struct mtw_vap *rvp = MTW_VAP(vap); 2330 uint64_t ni_tstamp, rx_tstamp; 2331 2332 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); 2333 2334 if (vap->iv_state == IEEE80211_S_RUN && 2335 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 2336 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 2337 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 2338 MTW_LOCK(sc); 2339 mtw_get_tsf(sc, &rx_tstamp); 2340 MTW_UNLOCK(sc); 2341 rx_tstamp = le64toh(rx_tstamp); 2342 2343 if (ni_tstamp >= rx_tstamp) { 2344 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON, 2345 "ibss merge, tsf %ju tstamp %ju\n", 2346 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 2347 (void)ieee80211_ibss_merge(ni); 2348 } 2349 } 2350 } 2351 static void 2352 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen) 2353 { 2354 struct ieee80211com *ic = &sc->sc_ic; 2355 struct ieee80211_frame *wh; 2356 struct ieee80211_node *ni; 2357 struct epoch_tracker et; 2358 2359 struct mtw_rxwi *rxwi; 2360 uint32_t flags; 2361 uint16_t len, rxwisize; 2362 uint8_t ant, rssi; 2363 int8_t nf; 2364 2365 rxwisize = sizeof(struct mtw_rxwi); 2366 2367 if (__predict_false( 2368 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) { 2369 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2370 "payload is too short: dma length %u < %zu\n", dmalen, 2371 rxwisize + sizeof(struct ieee80211_frame_ack)); 2372 goto fail; 2373 } 2374 2375 rxwi = mtod(m, struct mtw_rxwi *); 2376 len = le16toh(rxwi->len) & 0xfff; 2377 flags = le32toh(rxwi->flags); 2378 if (__predict_false(len > dmalen - rxwisize)) { 2379 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n", 2380 len, dmalen); 2381 goto fail; 2382 } 2383 2384 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) { 2385 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n", 2386 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV"); 2387 goto fail; 2388 } 2389 2390 if (flags & MTW_RX_L2PAD) { 2391 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2392 "received RT2860_RX_L2PAD frame\n"); 2393 len += 2; 2394 } 2395 2396 m->m_data += rxwisize; 2397 m->m_pkthdr.len = m->m_len = len; 2398 2399 wh = mtod(m, struct ieee80211_frame *); 2400 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2401 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; 2402 m->m_flags |= M_WEP; 2403 } 2404 2405 if (len >= sizeof(struct ieee80211_frame_min)) { 2406 ni = ieee80211_find_rxnode(ic, 2407 mtod(m, struct ieee80211_frame_min *)); 2408 } else 2409 ni = NULL; 2410 2411 if (ni && ni->ni_flags & IEEE80211_NODE_HT) { 2412 m->m_flags |= M_AMPDU; 2413 } 2414 2415 if (__predict_false(flags & MTW_RX_MICERR)) { 2416 /* report MIC failures to net80211 for TKIP */ 2417 if (ni != NULL) 2418 ieee80211_notify_michael_failure(ni->ni_vap, wh, 2419 rxwi->keyidx); 2420 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2421 "MIC error. Someone is lying.\n"); 2422 goto fail; 2423 } 2424 2425 ant = mtw_maxrssi_chain(sc, rxwi); 2426 rssi = rxwi->rssi[ant]; 2427 nf = mtw_rssi2dbm(sc, rssi, ant); 2428 2429 if (__predict_false(ieee80211_radiotap_active(ic))) { 2430 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap; 2431 uint16_t phy; 2432 2433 tap->wr_flags = 0; 2434 if (flags & MTW_RX_L2PAD) 2435 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD; 2436 tap->wr_antsignal = rssi; 2437 tap->wr_antenna = ant; 2438 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant); 2439 tap->wr_rate = 2; /* in case it can't be found below */ 2440 //MTW_LOCK(sc); 2441 2442 // MTW_UNLOCK(sc); 2443 phy = le16toh(rxwi->phy); 2444 switch (phy >> MT7601_PHY_SHIFT) { 2445 case MTW_PHY_CCK: 2446 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) { 2447 case 0: 2448 tap->wr_rate = 2; 2449 break; 2450 case 1: 2451 tap->wr_rate = 4; 2452 break; 2453 case 2: 2454 tap->wr_rate = 11; 2455 break; 2456 case 3: 2457 tap->wr_rate = 22; 2458 break; 2459 } 2460 if (phy & MTW_PHY_SHPRE) 2461 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2462 break; 2463 case MTW_PHY_OFDM: 2464 switch (phy & MTW_PHY_MCS) { 2465 case 0: 2466 tap->wr_rate = 12; 2467 break; 2468 case 1: 2469 tap->wr_rate = 18; 2470 break; 2471 case 2: 2472 tap->wr_rate = 24; 2473 break; 2474 case 3: 2475 tap->wr_rate = 36; 2476 break; 2477 case 4: 2478 tap->wr_rate = 48; 2479 break; 2480 case 5: 2481 tap->wr_rate = 72; 2482 break; 2483 case 6: 2484 tap->wr_rate = 96; 2485 break; 2486 case 7: 2487 tap->wr_rate = 108; 2488 break; 2489 } 2490 break; 2491 } 2492 } 2493 2494 NET_EPOCH_ENTER(et); 2495 if (ni != NULL) { 2496 (void)ieee80211_input(ni, m, rssi, nf); 2497 ieee80211_free_node(ni); 2498 } else { 2499 (void)ieee80211_input_all(ic, m, rssi, nf); 2500 } 2501 NET_EPOCH_EXIT(et); 2502 2503 return; 2504 2505 fail: 2506 m_freem(m); 2507 counter_u64_add(ic->ic_ierrors, 1); 2508 } 2509 2510 static void 2511 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) 2512 { 2513 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2514 struct ieee80211com *ic = &sc->sc_ic; 2515 struct mbuf *m = NULL; 2516 struct mbuf *m0; 2517 uint32_t dmalen, mbuf_len; 2518 uint16_t rxwisize; 2519 int xferlen; 2520 2521 rxwisize = sizeof(struct mtw_rxwi); 2522 2523 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL); 2524 2525 switch (USB_GET_STATE(xfer)) { 2526 case USB_ST_TRANSFERRED: 2527 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n", 2528 xferlen); 2529 if (xferlen < (int)(sizeof(uint32_t) + rxwisize + 2530 sizeof(struct mtw_rxd))) { 2531 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2532 "xfer too short %d %d\n", xferlen, 2533 (int)(sizeof(uint32_t) + rxwisize + 2534 sizeof(struct mtw_rxd))); 2535 goto tr_setup; 2536 } 2537 2538 m = sc->rx_m; 2539 sc->rx_m = NULL; 2540 2541 /* FALLTHROUGH */ 2542 case USB_ST_SETUP: 2543 tr_setup: 2544 2545 if (sc->rx_m == NULL) { 2546 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 2547 MTW_MAX_RXSZ); 2548 } 2549 if (sc->rx_m == NULL) { 2550 MTW_DPRINTF(sc, 2551 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC | 2552 MTW_DEBUG_USB, 2553 "could not allocate mbuf - idle with stall\n"); 2554 counter_u64_add(ic->ic_ierrors, 1); 2555 usbd_xfer_set_stall(xfer); 2556 usbd_xfer_set_frames(xfer, 0); 2557 } else { 2558 /* 2559 * Directly loading a mbuf cluster into DMA to 2560 * save some data copying. This works because 2561 * there is only one cluster. 2562 */ 2563 usbd_xfer_set_frame_data(xfer, 0, 2564 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ); 2565 usbd_xfer_set_frames(xfer, 1); 2566 } 2567 usbd_transfer_submit(xfer); 2568 break; 2569 2570 default: /* Error */ 2571 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2572 "USB transfer error, %s\n", usbd_errstr(error)); 2573 2574 if (error != USB_ERR_CANCELLED) { 2575 /* try to clear stall first */ 2576 usbd_xfer_set_stall(xfer); 2577 if (error == USB_ERR_TIMEOUT) 2578 device_printf(sc->sc_dev, "device timeout %s\n", 2579 __func__); 2580 counter_u64_add(ic->ic_ierrors, 1); 2581 goto tr_setup; 2582 } 2583 if (sc->rx_m != NULL) { 2584 m_freem(sc->rx_m); 2585 sc->rx_m = NULL; 2586 } 2587 break; 2588 } 2589 2590 if (m == NULL) 2591 return; 2592 2593 /* inputting all the frames must be last */ 2594 2595 MTW_UNLOCK(sc); 2596 2597 m->m_pkthdr.len = m->m_len = xferlen; 2598 2599 /* HW can aggregate multiple 802.11 frames in a single USB xfer */ 2600 for (;;) { 2601 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff; 2602 2603 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) || 2604 ((dmalen & 3) != 0)) { 2605 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2606 "bad DMA length %u\n", dmalen); 2607 break; 2608 } 2609 if ((dmalen + 8) > (uint32_t)xferlen) { 2610 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2611 "bad DMA length %u > %d\n", dmalen + 8, xferlen); 2612 break; 2613 } 2614 2615 /* If it is the last one or a single frame, we won't copy. */ 2616 if ((xferlen -= dmalen + 8) <= 8) { 2617 /* trim 32-bit DMA-len header */ 2618 m->m_data += 4; 2619 m->m_pkthdr.len = m->m_len -= 4; 2620 mtw_rx_frame(sc, m, dmalen); 2621 m = NULL; /* don't free source buffer */ 2622 break; 2623 } 2624 2625 mbuf_len = dmalen + sizeof(struct mtw_rxd); 2626 if (__predict_false(mbuf_len > MCLBYTES)) { 2627 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2628 "payload is too big: mbuf_len %u\n", mbuf_len); 2629 counter_u64_add(ic->ic_ierrors, 1); 2630 break; 2631 } 2632 2633 /* copy aggregated frames to another mbuf */ 2634 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2635 if (__predict_false(m0 == NULL)) { 2636 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC, 2637 "could not allocate mbuf\n"); 2638 counter_u64_add(ic->ic_ierrors, 1); 2639 break; 2640 } 2641 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len, 2642 mtod(m0, caddr_t)); 2643 m0->m_pkthdr.len = m0->m_len = mbuf_len; 2644 mtw_rx_frame(sc, m0, dmalen); 2645 2646 /* update data ptr */ 2647 m->m_data += mbuf_len + 4; 2648 m->m_pkthdr.len = m->m_len -= mbuf_len + 4; 2649 } 2650 2651 /* make sure we free the source buffer, if any */ 2652 m_freem(m); 2653 2654 #ifdef IEEE80211_SUPPORT_SUPERG 2655 ieee80211_ff_age_all(ic, 100); 2656 #endif 2657 MTW_LOCK(sc); 2658 } 2659 2660 static void 2661 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr) 2662 { 2663 2664 ieee80211_tx_complete(data->ni, data->m, txerr); 2665 data->m = NULL; 2666 data->ni = NULL; 2667 2668 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); 2669 pq->tx_nfree++; 2670 } 2671 static void 2672 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index) 2673 { 2674 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2675 struct ieee80211com *ic = &sc->sc_ic; 2676 struct mtw_tx_data *data; 2677 struct ieee80211vap *vap = NULL; 2678 struct usb_page_cache *pc; 2679 struct mtw_endpoint_queue *pq = &sc->sc_epq[index]; 2680 struct mbuf *m; 2681 usb_frlength_t size; 2682 int actlen; 2683 int sumlen; 2684 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); 2685 2686 switch (USB_GET_STATE(xfer)) { 2687 case USB_ST_TRANSFERRED: 2688 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2689 "transfer complete: %d bytes @ index %d\n", actlen, index); 2690 2691 data = usbd_xfer_get_priv(xfer); 2692 mtw_tx_free(pq, data, 0); 2693 usbd_xfer_set_priv(xfer, NULL); 2694 2695 /* FALLTHROUGH */ 2696 case USB_ST_SETUP: 2697 tr_setup: 2698 data = STAILQ_FIRST(&pq->tx_qh); 2699 if (data == NULL) 2700 break; 2701 2702 STAILQ_REMOVE_HEAD(&pq->tx_qh, next); 2703 2704 m = data->m; 2705 2706 size = sizeof(data->desc); 2707 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) { 2708 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB, 2709 "data overflow, %u bytes\n", m->m_pkthdr.len); 2710 mtw_tx_free(pq, data, 1); 2711 goto tr_setup; 2712 } 2713 2714 pc = usbd_xfer_get_frame(xfer, 0); 2715 usbd_copy_in(pc, 0, &data->desc, size); 2716 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len); 2717 size += m->m_pkthdr.len; 2718 /* 2719 * Align end on a 4-byte boundary, pad 8 bytes (CRC + 2720 * 4-byte padding), and be sure to zero those trailing 2721 * bytes: 2722 */ 2723 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD); 2724 size += ((-size) & 3) + MTW_DMA_PAD; 2725 2726 vap = data->ni->ni_vap; 2727 if (ieee80211_radiotap_active_vap(vap)) { 2728 const struct ieee80211_frame *wh; 2729 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap; 2730 struct mtw_txwi *txwi = 2731 (struct mtw_txwi *)(&data->desc + 2732 sizeof(struct mtw_txd)); 2733 int has_l2pad; 2734 2735 wh = mtod(m, struct ieee80211_frame *); 2736 has_l2pad = IEEE80211_HAS_ADDR4(wh) != 2737 IEEE80211_QOS_HAS_SEQ(wh); 2738 2739 tap->wt_flags = 0; 2740 tap->wt_rate = rt2860_rates[data->ridx].rate; 2741 tap->wt_hwqueue = index; 2742 if (le16toh(txwi->phy) & MTW_PHY_SHPRE) 2743 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2744 if (has_l2pad) 2745 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD; 2746 2747 ieee80211_radiotap_tx(vap, m); 2748 } 2749 2750 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2751 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len, 2752 size, index); 2753 2754 usbd_xfer_set_frame_len(xfer, 0, size); 2755 usbd_xfer_set_priv(xfer, data); 2756 usbd_transfer_submit(xfer); 2757 mtw_start(sc); 2758 2759 break; 2760 2761 default: 2762 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2763 "USB transfer error, %s\n", usbd_errstr(error)); 2764 2765 data = usbd_xfer_get_priv(xfer); 2766 2767 if (data != NULL) { 2768 if (data->ni != NULL) 2769 vap = data->ni->ni_vap; 2770 mtw_tx_free(pq, data, error); 2771 usbd_xfer_set_priv(xfer, NULL); 2772 } 2773 2774 if (vap == NULL) 2775 vap = TAILQ_FIRST(&ic->ic_vaps); 2776 2777 if (error != USB_ERR_CANCELLED) { 2778 if (error == USB_ERR_TIMEOUT) { 2779 device_printf(sc->sc_dev, "device timeout %s\n", 2780 __func__); 2781 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store); 2782 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2783 "cmdq_store=%d\n", i); 2784 sc->cmdq[i].func = mtw_usb_timeout_cb; 2785 sc->cmdq[i].arg0 = vap; 2786 ieee80211_runtask(ic, &sc->cmdq_task); 2787 } 2788 2789 /* 2790 * Try to clear stall first, also if other 2791 * errors occur, hence clearing stall 2792 * introduces a 50 ms delay: 2793 */ 2794 usbd_xfer_set_stall(xfer); 2795 goto tr_setup; 2796 } 2797 break; 2798 } 2799 #ifdef IEEE80211_SUPPORT_SUPERG 2800 /* XXX TODO: make this deferred rather than unlock/relock */ 2801 /* XXX TODO: should only do the QoS AC this belongs to */ 2802 if (pq->tx_nfree >= RUN_TX_RING_COUNT) { 2803 RUN_UNLOCK(sc); 2804 ieee80211_ff_flush_all(ic); 2805 RUN_LOCK(sc); 2806 } 2807 #endif 2808 } 2809 2810 static void 2811 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error) 2812 { 2813 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2814 2815 int actlen; 2816 int ntries, tmp; 2817 // struct mtw_txd *data; 2818 2819 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 2820 // data = usbd_xfer_get_priv(xfer); 2821 usbd_xfer_set_priv(xfer, NULL); 2822 switch (USB_GET_STATE(xfer)) { 2823 2824 case USB_ST_TRANSFERRED: 2825 sc->sc_sent += actlen; 2826 memset(sc->txd_fw[sc->sc_idx], 0, actlen); 2827 2828 if (actlen < 0x2c44 && sc->sc_idx == 0) { 2829 return; 2830 } 2831 if (sc->sc_idx == 3) { 2832 2833 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1, 2834 MTW_MCU_IVB_LEN)) != 0) { 2835 device_printf(sc->sc_dev, 2836 "Could not write ivb error: %d\n", error); 2837 } 2838 2839 mtw_delay(sc, 10); 2840 for (ntries = 0; ntries < 100; ntries++) { 2841 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, 2842 &tmp)) != 0) { 2843 device_printf(sc->sc_dev, 2844 "Could not read cfg error: %d\n", error); 2845 2846 } 2847 if (tmp == MTW_MCU_READY) { 2848 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, 2849 "mcu reaady %d\n", tmp); 2850 sc->fwloading = 1; 2851 break; 2852 } 2853 2854 mtw_delay(sc, 10); 2855 } 2856 if (ntries == 100) 2857 sc->fwloading = 0; 2858 wakeup(&sc->fwloading); 2859 return; 2860 } 2861 2862 if (actlen == 0x2c44) { 2863 sc->sc_idx++; 2864 DELAY(1000); 2865 } 2866 2867 case USB_ST_SETUP: { 2868 int dlen = 0; 2869 dlen = sc->txd_fw[sc->sc_idx]->len; 2870 2871 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent); 2872 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16)); 2873 2874 usbd_xfer_set_frame_len(xfer, 0, dlen); 2875 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen); 2876 2877 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]); 2878 usbd_transfer_submit(xfer); 2879 break; 2880 2881 default: /* Error */ 2882 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__, 2883 usbd_errstr(error)); 2884 sc->fwloading = 0; 2885 wakeup(&sc->fwloading); 2886 /* 2887 * Print error message and clear stall 2888 * for example. 2889 */ 2890 break; 2891 } 2892 /* 2893 * Here it is safe to do something without the private 2894 * USB mutex locked. 2895 */ 2896 } 2897 return; 2898 } 2899 static void 2900 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error) 2901 { 2902 mtw_bulk_tx_callbackN(xfer, error, 0); 2903 } 2904 2905 static void 2906 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error) 2907 { 2908 2909 2910 mtw_bulk_tx_callbackN(xfer, error, 1); 2911 } 2912 2913 static void 2914 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error) 2915 { 2916 mtw_bulk_tx_callbackN(xfer, error, 2); 2917 } 2918 2919 static void 2920 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error) 2921 { 2922 mtw_bulk_tx_callbackN(xfer, error, 3); 2923 } 2924 2925 static void 2926 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error) 2927 { 2928 mtw_bulk_tx_callbackN(xfer, error, 4); 2929 } 2930 2931 static void 2932 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error) 2933 { 2934 mtw_bulk_tx_callbackN(xfer, error, 5); 2935 } 2936 2937 static void 2938 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data) 2939 { 2940 struct mbuf *m = data->m; 2941 struct ieee80211com *ic = &sc->sc_ic; 2942 struct ieee80211vap *vap = data->ni->ni_vap; 2943 struct ieee80211_frame *wh; 2944 struct mtw_txd *txd; 2945 struct mtw_txwi *txwi; 2946 uint16_t xferlen, txwisize; 2947 uint16_t mcs; 2948 uint8_t ridx = data->ridx; 2949 uint8_t pad; 2950 2951 /* get MCS code from rate index */ 2952 mcs = rt2860_rates[ridx].mcs; 2953 2954 txwisize = sizeof(*txwi); 2955 xferlen = txwisize + m->m_pkthdr.len; 2956 2957 /* roundup to 32-bit alignment */ 2958 xferlen = (xferlen + 3) & ~3; 2959 2960 txd = (struct mtw_txd *)&data->desc; 2961 txd->len = htole16(xferlen); 2962 2963 wh = mtod(m, struct ieee80211_frame *); 2964 2965 /* 2966 * Ether both are true or both are false, the header 2967 * are nicely aligned to 32-bit. So, no L2 padding. 2968 */ 2969 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh)) 2970 pad = 0; 2971 else 2972 pad = 2; 2973 2974 /* setup TX Wireless Information */ 2975 txwi = (struct mtw_txwi *)(txd + 1); 2976 txwi->len = htole16(m->m_pkthdr.len - pad); 2977 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { 2978 mcs |= MTW_PHY_CCK; 2979 if (ridx != MTW_RIDX_CCK1 && 2980 (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 2981 mcs |= MTW_PHY_SHPRE; 2982 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) { 2983 mcs |= MTW_PHY_OFDM; 2984 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) { 2985 /* XXX TODO: [adrian] set short preamble for MCS? */ 2986 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */ 2987 } 2988 txwi->phy = htole16(mcs); 2989 2990 /* check if RTS/CTS or CTS-to-self protection is required */ 2991 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 2992 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) || 2993 ((ic->ic_flags & IEEE80211_F_USEPROT) && 2994 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) || 2995 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 2996 rt2860_rates[ridx].phy == IEEE80211_T_HT))) 2997 txwi->txop |= MTW_TX_TXOP_HT; 2998 else 2999 txwi->txop |= MTW_TX_TXOP_BACKOFF; 3000 3001 } 3002 3003 /* This function must be called locked */ 3004 static int 3005 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3006 { 3007 struct ieee80211com *ic = &sc->sc_ic; 3008 struct ieee80211vap *vap = ni->ni_vap; 3009 struct ieee80211_frame *wh; 3010 3011 3012 //const struct ieee80211_txparam *tp = ni->ni_txparms; 3013 struct mtw_node *rn = MTW_NODE(ni); 3014 struct mtw_tx_data *data; 3015 struct mtw_txd *txd; 3016 struct mtw_txwi *txwi; 3017 uint16_t qos; 3018 uint16_t dur; 3019 uint16_t qid; 3020 uint8_t type; 3021 uint8_t tid; 3022 uint16_t ridx; 3023 uint8_t ctl_ridx; 3024 uint16_t qflags; 3025 uint8_t xflags = 0; 3026 3027 int hasqos; 3028 3029 MTW_LOCK_ASSERT(sc, MA_OWNED); 3030 3031 wh = mtod(m, struct ieee80211_frame *); 3032 const struct ieee80211_txparam *tp = ni->ni_txparms; 3033 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3034 3035 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 | 3036 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA); 3037 3038 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) { 3039 uint8_t *frm; 3040 frm = ieee80211_getqos(wh); 3041 3042 3043 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm); 3044 qos = le16toh(*(const uint16_t *)frm); 3045 tid = ieee80211_gettid(wh); 3046 qid = TID_TO_WME_AC(tid); 3047 qflags |= MTW_TXD_QSEL_EDCA; 3048 } else { 3049 qos = 0; 3050 tid = 0; 3051 qid = WME_AC_BE; 3052 } 3053 if (type & IEEE80211_FC0_TYPE_MGT) { 3054 qid = 0; 3055 } 3056 3057 if (type != IEEE80211_FC0_TYPE_DATA) 3058 qflags |= htole16(MTW_TXD_WIV); 3059 3060 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3061 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) { 3062 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */ 3063 ridx = (ic->ic_curmode == IEEE80211_MODE_11A 3064 || ic->ic_curmode == IEEE80211_MODE_11NA) ? 3065 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1; 3066 if (type == IEEE80211_MODE_11NG) { 3067 ridx = 12; 3068 } 3069 ctl_ridx = rt2860_rates[ridx].ctl_ridx; 3070 } else { 3071 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3072 ridx = rn->fix_ridx; 3073 3074 } else { 3075 ridx = rn->amrr_ridx; 3076 ctl_ridx = rt2860_rates[ridx].ctl_ridx; 3077 } 3078 } 3079 3080 if (hasqos) 3081 xflags = 0; 3082 else 3083 xflags = MTW_TX_NSEQ; 3084 3085 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 3086 (!hasqos || 3087 (qos & IEEE80211_QOS_ACKPOLICY) != 3088 IEEE80211_QOS_ACKPOLICY_NOACK)) { 3089 xflags |= MTW_TX_ACK; 3090 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3091 dur = rt2860_rates[ctl_ridx].sp_ack_dur; 3092 else 3093 dur = rt2860_rates[ctl_ridx].lp_ack_dur; 3094 USETW(wh->i_dur, dur); 3095 } 3096 /* reserve slots for mgmt packets, just in case */ 3097 if (sc->sc_epq[qid].tx_nfree < 3) { 3098 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid); 3099 return (-1); 3100 } 3101 3102 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh); 3103 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next); 3104 sc->sc_epq[qid].tx_nfree--; 3105 3106 txd = (struct mtw_txd *)&data->desc; 3107 txd->flags = qflags; 3108 3109 txwi = (struct mtw_txwi *)(txd + 1); 3110 txwi->xflags = xflags; 3111 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ? 3112 3113 MTW_AID2WCID(ni->ni_associd) : 3114 0xff; 3115 3116 /* clear leftover garbage bits */ 3117 txwi->flags = 0; 3118 txwi->txop = 0; 3119 3120 data->m = m; 3121 data->ni = ni; 3122 data->ridx = ridx; 3123 3124 mtw_set_tx_desc(sc, data); 3125 3126 /* 3127 * The chip keeps track of 2 kind of Tx stats, 3128 * * TX_STAT_FIFO, for per WCID stats, and 3129 * * TX_STA_CNT0 for all-TX-in-one stats. 3130 * 3131 * To use FIFO stats, we need to store MCS into the driver-private 3132 * PacketID field. So that, we can tell whose stats when we read them. 3133 * We add 1 to the MCS because setting the PacketID field to 0 means 3134 * that we don't want feedback in TX_STAT_FIFO. 3135 * And, that's what we want for STA mode, since TX_STA_CNT0 does the 3136 * job. 3137 * 3138 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in 3139 * run_tx(). 3140 */ 3141 3142 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP || 3143 vap->iv_opmode == IEEE80211_M_MBSS) { 3144 3145 /* 3146 * Unlike PCI based devices, we don't get any interrupt from 3147 * USB devices, so we simulate FIFO-is-full interrupt here. 3148 * Ralink recommends to drain FIFO stats every 100 ms, but 16 3149 * slots quickly get fulled. To prevent overflow, increment a 3150 * counter on every FIFO stat request, so we know how many slots 3151 * are left. We do this only in HOSTAP or multiple vap mode 3152 * since FIFO stats are used only in those modes. We just drain 3153 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via 3154 * callout. Call it early. Otherwise overflow. 3155 */ 3156 if (sc->fifo_cnt++ == 10) { 3157 /* 3158 * With multiple vaps or if_bridge, if_start() is called 3159 * with a non-sleepable lock, tcpinp. So, need to defer. 3160 */ 3161 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store); 3162 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i); 3163 sc->cmdq[i].func = mtw_drain_fifo; 3164 sc->cmdq[i].arg0 = sc; 3165 ieee80211_runtask(ic, &sc->cmdq_task); 3166 } 3167 } 3168 3169 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next); 3170 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]); 3171 3172 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3173 "sending data frame len=%d rate=%d qid=%d\n", 3174 m->m_pkthdr.len + 3175 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)), 3176 rt2860_rates[ridx].rate, qid); 3177 3178 return (0); 3179 } 3180 3181 static int 3182 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3183 { 3184 struct ieee80211com *ic = &sc->sc_ic; 3185 struct mtw_node *rn = MTW_NODE(ni); 3186 struct mtw_tx_data *data; 3187 struct ieee80211_frame *wh; 3188 struct mtw_txd *txd; 3189 struct mtw_txwi *txwi; 3190 uint8_t type; 3191 uint16_t dur; 3192 uint8_t ridx = rn->mgt_ridx; 3193 uint8_t xflags = 0; 3194 uint8_t wflags = 0; 3195 3196 MTW_LOCK_ASSERT(sc, MA_OWNED); 3197 3198 wh = mtod(m, struct ieee80211_frame *); 3199 3200 /* tell hardware to add timestamp for probe responses */ 3201 if ((wh->i_fc[0] & 3202 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3203 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) 3204 wflags |= MTW_TX_TS; 3205 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3206 xflags |= MTW_TX_ACK; 3207 3208 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate, 3209 ic->ic_flags & IEEE80211_F_SHPREAMBLE); 3210 USETW(wh->i_dur, dur); 3211 } 3212 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3213 if (sc->sc_epq[0].tx_nfree == 0) 3214 /* let caller free mbuf */ 3215 return (EIO); 3216 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3217 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3218 sc->sc_epq[0].tx_nfree--; 3219 3220 txd = (struct mtw_txd *)&data->desc; 3221 txd->flags = htole16( 3222 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA); 3223 if (type != IEEE80211_FC0_TYPE_DATA) 3224 txd->flags |= htole16(MTW_TXD_WIV); 3225 3226 txwi = (struct mtw_txwi *)(txd + 1); 3227 txwi->wcid = 0xff; 3228 txwi->xflags = xflags; 3229 txwi->flags = wflags; 3230 3231 txwi->txop = 0; /* clear leftover garbage bits */ 3232 3233 data->m = m; 3234 data->ni = ni; 3235 data->ridx = ridx; 3236 3237 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n", 3238 m->m_pkthdr.len + 3239 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)), 3240 rt2860_rates[ridx].rate); 3241 3242 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3243 3244 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]); 3245 3246 return (0); 3247 } 3248 3249 static int 3250 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m, 3251 struct ieee80211_node *ni, int prot, int rate) 3252 { 3253 struct ieee80211com *ic = ni->ni_ic; 3254 struct mtw_tx_data *data; 3255 struct mtw_txd *txd; 3256 struct mtw_txwi *txwi; 3257 struct mbuf *mprot; 3258 int ridx; 3259 int protrate; 3260 uint8_t wflags = 0; 3261 uint8_t xflags = 0; 3262 3263 MTW_LOCK_ASSERT(sc, MA_OWNED); 3264 3265 /* check that there are free slots before allocating the mbuf */ 3266 if (sc->sc_epq[0].tx_nfree == 0) 3267 /* let caller free mbuf */ 3268 return (ENOBUFS); 3269 3270 mprot = ieee80211_alloc_prot(ni, m, rate, prot); 3271 if (mprot == NULL) { 3272 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3273 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n"); 3274 return (ENOBUFS); 3275 } 3276 3277 protrate = ieee80211_ctl_rate(ic->ic_rt, rate); 3278 wflags = MTW_TX_FRAG; 3279 xflags = 0; 3280 if (prot == IEEE80211_PROT_RTSCTS) 3281 xflags |= MTW_TX_ACK; 3282 3283 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3284 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3285 sc->sc_epq[0].tx_nfree--; 3286 3287 txd = (struct mtw_txd *)&data->desc; 3288 txd->flags = RT2860_TX_QSEL_EDCA; 3289 txwi = (struct mtw_txwi *)(txd + 1); 3290 txwi->wcid = 0xff; 3291 txwi->flags = wflags; 3292 txwi->xflags = xflags; 3293 txwi->txop = 0; /* clear leftover garbage bits */ 3294 3295 data->m = mprot; 3296 data->ni = ieee80211_ref_node(ni); 3297 3298 /* XXX TODO: methodize with MCS rates */ 3299 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 3300 if (rt2860_rates[ridx].rate == protrate) 3301 break; 3302 data->ridx = ridx; 3303 3304 mtw_set_tx_desc(sc, data); 3305 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n", 3306 m->m_pkthdr.len, rate); 3307 3308 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3309 3310 usbd_transfer_start(sc->sc_xfer[0]); 3311 3312 return (0); 3313 } 3314 3315 static int 3316 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 3317 const struct ieee80211_bpf_params *params) 3318 { 3319 struct ieee80211com *ic = ni->ni_ic; 3320 struct mtw_tx_data *data; 3321 struct mtw_txd *txd; 3322 struct mtw_txwi *txwi; 3323 uint8_t ridx; 3324 uint8_t rate; 3325 uint8_t opflags = 0; 3326 uint8_t xflags = 0; 3327 int error; 3328 3329 MTW_LOCK_ASSERT(sc, MA_OWNED); 3330 3331 KASSERT(params != NULL, ("no raw xmit params")); 3332 3333 rate = params->ibp_rate0; 3334 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3335 /* let caller free mbuf */ 3336 return (EINVAL); 3337 } 3338 3339 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3340 xflags |= MTW_TX_ACK; 3341 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) { 3342 error = mtw_sendprot(sc, m, ni, 3343 params->ibp_flags & IEEE80211_BPF_RTS ? 3344 IEEE80211_PROT_RTSCTS : 3345 IEEE80211_PROT_CTSONLY, 3346 rate); 3347 if (error) { 3348 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__, 3349 __LINE__, error); 3350 return (error); 3351 } 3352 opflags |= MTW_TX_TXOP_SIFS; 3353 } 3354 3355 if (sc->sc_epq[0].tx_nfree == 0) { 3356 /* let caller free mbuf */ 3357 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3358 "sending raw frame, but tx ring is full\n"); 3359 return (EIO); 3360 } 3361 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3362 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3363 sc->sc_epq[0].tx_nfree--; 3364 3365 txd = (struct mtw_txd *)&data->desc; 3366 txd->flags = htole16( 3367 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA); 3368 // txd->flags = htole16(MTW_TXD_QSEL_EDCA); 3369 txwi = (struct mtw_txwi *)(txd + 1); 3370 txwi->wcid = 0xff; 3371 txwi->xflags = xflags; 3372 txwi->txop = opflags; 3373 txwi->flags = 0; /* clear leftover garbage bits */ 3374 3375 data->m = m; 3376 data->ni = ni; 3377 /* XXX TODO: methodize with MCS rates */ 3378 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 3379 if (rt2860_rates[ridx].rate == rate) 3380 break; 3381 data->ridx = ridx; 3382 3383 mtw_set_tx_desc(sc, data); 3384 3385 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n", 3386 m->m_pkthdr.len, rate); 3387 3388 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3389 3390 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]); 3391 3392 return (0); 3393 } 3394 3395 static int 3396 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3397 const struct ieee80211_bpf_params *params) 3398 { 3399 struct mtw_softc *sc = ni->ni_ic->ic_softc; 3400 int error = 0; 3401 MTW_LOCK(sc); 3402 /* prevent management frames from being sent if we're not ready */ 3403 if (!(sc->sc_flags & MTW_RUNNING)) { 3404 error = ENETDOWN; 3405 goto done; 3406 } 3407 3408 if (params == NULL) { 3409 /* tx mgt packet */ 3410 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) { 3411 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n"); 3412 goto done; 3413 } 3414 } else { 3415 /* tx raw packet with param */ 3416 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) { 3417 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3418 "tx with param failed\n"); 3419 goto done; 3420 } 3421 } 3422 3423 done: 3424 3425 MTW_UNLOCK(sc); 3426 3427 if (error != 0) { 3428 if (m != NULL) 3429 m_freem(m); 3430 } 3431 3432 return (error); 3433 } 3434 3435 static int 3436 mtw_transmit(struct ieee80211com *ic, struct mbuf *m) 3437 { 3438 struct mtw_softc *sc = ic->ic_softc; 3439 int error; 3440 MTW_LOCK(sc); 3441 if ((sc->sc_flags & MTW_RUNNING) == 0) { 3442 MTW_UNLOCK(sc); 3443 return (ENXIO); 3444 } 3445 error = mbufq_enqueue(&sc->sc_snd, m); 3446 if (error) { 3447 MTW_UNLOCK(sc); 3448 return (error); 3449 } 3450 mtw_start(sc); 3451 MTW_UNLOCK(sc); 3452 3453 return (0); 3454 } 3455 3456 static void 3457 mtw_start(struct mtw_softc *sc) 3458 { 3459 struct ieee80211_node *ni; 3460 struct mbuf *m; 3461 3462 MTW_LOCK_ASSERT(sc, MA_OWNED); 3463 3464 if ((sc->sc_flags & MTW_RUNNING) == 0) { 3465 3466 return; 3467 } 3468 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 3469 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3470 if (mtw_tx(sc, m, ni) != 0) { 3471 mbufq_prepend(&sc->sc_snd, m); 3472 break; 3473 } 3474 } 3475 } 3476 3477 static void 3478 mtw_parent(struct ieee80211com *ic) 3479 { 3480 3481 struct mtw_softc *sc = ic->ic_softc; 3482 3483 MTW_LOCK(sc); 3484 if (sc->sc_detached) { 3485 MTW_UNLOCK(sc); 3486 return; 3487 } 3488 3489 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) { 3490 mtw_init_locked(sc); 3491 MTW_UNLOCK(sc); 3492 ieee80211_start_all(ic); 3493 return; 3494 } 3495 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) { 3496 mtw_update_promisc_locked(sc); 3497 MTW_UNLOCK(sc); 3498 return; 3499 } 3500 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 && 3501 ic->ic_nrunning == 0) { 3502 mtw_stop(sc); 3503 MTW_UNLOCK(sc); 3504 return; 3505 } 3506 return; 3507 } 3508 3509 static void 3510 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc) 3511 { 3512 uint8_t bbp; 3513 3514 mtw_bbp_write(sc, 66, agc); 3515 mtw_bbp_write(sc, 195, 0x87); 3516 bbp = (agc & 0xf0) | 0x08; 3517 mtw_bbp_write(sc, 196, bbp); 3518 } 3519 3520 static int 3521 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val) 3522 { 3523 struct mtw_mcu_cmd_8 cmd; 3524 3525 cmd.func = htole32(func); 3526 cmd.val = htole32(val); 3527 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8))); 3528 } 3529 3530 static int 3531 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val) 3532 { 3533 uint32_t tmp; 3534 int error, ntries, shift; 3535 3536 for (ntries = 0; ntries < 10; ntries++) { 3537 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3538 return (error); 3539 if (!(tmp & MTW_RF_CSR_KICK)) 3540 break; 3541 } 3542 if (ntries == 10) 3543 return (ETIMEDOUT); 3544 3545 if (sc->asic_ver == 0x7601) 3546 shift = MT7601_BANK_SHIFT; 3547 else 3548 shift = MT7610_BANK_SHIFT; 3549 3550 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift | 3551 reg << 8 | val; 3552 return (mtw_write(sc, MTW_RF_CSR, tmp)); 3553 } 3554 3555 void 3556 mtw_select_chan_group(struct mtw_softc *sc, int group) 3557 { 3558 uint32_t tmp; 3559 uint8_t bbp; 3560 3561 /* Tx band 20MHz 2G */ 3562 mtw_read(sc, MTW_TX_BAND_CFG, &tmp); 3563 tmp &= ~( 3564 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M); 3565 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G; 3566 mtw_write(sc, MTW_TX_BAND_CFG, tmp); 3567 3568 /* select 20 MHz bandwidth */ 3569 mtw_bbp_read(sc, 4, &bbp); 3570 bbp &= ~0x18; 3571 bbp |= 0x40; 3572 mtw_bbp_write(sc, 4, bbp); 3573 3574 /* calibrate BBP */ 3575 mtw_bbp_write(sc, 69, 0x12); 3576 mtw_bbp_write(sc, 91, 0x07); 3577 mtw_bbp_write(sc, 195, 0x23); 3578 mtw_bbp_write(sc, 196, 0x17); 3579 mtw_bbp_write(sc, 195, 0x24); 3580 mtw_bbp_write(sc, 196, 0x06); 3581 mtw_bbp_write(sc, 195, 0x81); 3582 mtw_bbp_write(sc, 196, 0x12); 3583 mtw_bbp_write(sc, 195, 0x83); 3584 mtw_bbp_write(sc, 196, 0x17); 3585 mtw_rf_write(sc, 5, 8, 0x00); 3586 // mtw_mcu_calibrate(sc, 0x6, 0x10001); 3587 3588 /* set initial AGC value */ 3589 mt7601_set_agc(sc, 0x14); 3590 } 3591 3592 static int 3593 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val) 3594 { 3595 uint32_t tmp; 3596 int error, ntries, shift; 3597 3598 for (ntries = 0; ntries < 100; ntries++) { 3599 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3600 return (error); 3601 if (!(tmp & MTW_RF_CSR_KICK)) 3602 break; 3603 } 3604 if (ntries == 100) 3605 return (ETIMEDOUT); 3606 3607 if (sc->asic_ver == 0x7601) 3608 shift = MT7601_BANK_SHIFT; 3609 else 3610 shift = MT7610_BANK_SHIFT; 3611 3612 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8; 3613 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0) 3614 return (error); 3615 3616 for (ntries = 0; ntries < 100; ntries++) { 3617 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3618 return (error); 3619 if (!(tmp & MTW_RF_CSR_KICK)) 3620 break; 3621 } 3622 if (ntries == 100) 3623 return (ETIMEDOUT); 3624 3625 *val = tmp & 0xff; 3626 return (0); 3627 } 3628 static void 3629 mt7601_set_chan(struct mtw_softc *sc, u_int chan) 3630 { 3631 uint32_t tmp; 3632 uint8_t bbp, rf, txpow1; 3633 int i; 3634 /* find the settings for this channel */ 3635 for (i = 0; mt7601_rf_chan[i].chan != chan; i++) 3636 ; 3637 3638 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17); 3639 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18); 3640 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19); 3641 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20); 3642 3643 /* use Tx power values from EEPROM */ 3644 txpow1 = sc->txpow1[i]; 3645 3646 /* Tx automatic level control */ 3647 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp); 3648 tmp &= ~0x3f3f; 3649 tmp |= (txpow1 & 0x3f); 3650 mtw_write(sc, MTW_TX_ALC_CFG0, tmp); 3651 3652 /* LNA */ 3653 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]); 3654 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]); 3655 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]); 3656 3657 /* VCO calibration */ 3658 mtw_rf_write(sc, 0, 4, 0x0a); 3659 mtw_rf_write(sc, 0, 5, 0x20); 3660 mtw_rf_read(sc, 0, 4, &rf); 3661 mtw_rf_write(sc, 0, 4, rf | 0x80); 3662 3663 /* select 20 MHz bandwidth */ 3664 mtw_bbp_read(sc, 4, &bbp); 3665 bbp &= ~0x18; 3666 bbp |= 0x40; 3667 mtw_bbp_write(sc, 4, bbp); 3668 mtw_bbp_write(sc, 178, 0xff); 3669 } 3670 3671 static int 3672 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c) 3673 { 3674 struct ieee80211com *ic = &sc->sc_ic; 3675 u_int chan, group; 3676 3677 chan = ieee80211_chan2ieee(ic, c); 3678 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 3679 return (EINVAL); 3680 3681 /* determine channel group */ 3682 if (chan <= 14) 3683 group = 0; 3684 else if (chan <= 64) 3685 group = 1; 3686 else if (chan <= 128) 3687 group = 2; 3688 else 3689 group = 3; 3690 3691 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated) 3692 mtw_select_chan_group(sc, group); 3693 3694 sc->sc_chan_group = group; 3695 3696 /* chipset specific */ 3697 if (sc->asic_ver == 0x7601) 3698 mt7601_set_chan(sc, chan); 3699 3700 DELAY(1000); 3701 return (0); 3702 } 3703 3704 static void 3705 mtw_set_channel(struct ieee80211com *ic) 3706 { 3707 struct mtw_softc *sc = ic->ic_softc; 3708 3709 MTW_LOCK(sc); 3710 mtw_set_chan(sc, ic->ic_curchan); 3711 MTW_UNLOCK(sc); 3712 3713 return; 3714 } 3715 3716 static void 3717 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, 3718 struct ieee80211_channel chans[]) 3719 { 3720 // struct mtw_softc *sc = ic->ic_softc; 3721 uint8_t bands[IEEE80211_MODE_BYTES]; 3722 3723 memset(bands, 0, sizeof(bands)); 3724 setbit(bands, IEEE80211_MODE_11B); 3725 setbit(bands, IEEE80211_MODE_11G); 3726 setbit(bands, IEEE80211_MODE_11NG); 3727 3728 /* Note: for now, only support HT20 channels */ 3729 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0); 3730 } 3731 3732 static void 3733 mtw_scan_start(struct ieee80211com *ic) 3734 { 3735 struct mtw_softc *sc = ic->ic_softc; 3736 MTW_LOCK(sc); 3737 /* abort TSF synchronization */ 3738 mtw_abort_tsf_sync(sc); 3739 mtw_set_bssid(sc, ieee80211broadcastaddr); 3740 3741 MTW_UNLOCK(sc); 3742 3743 return; 3744 } 3745 3746 static void 3747 mtw_scan_end(struct ieee80211com *ic) 3748 { 3749 struct mtw_softc *sc = ic->ic_softc; 3750 3751 MTW_LOCK(sc); 3752 3753 mtw_enable_tsf_sync(sc); 3754 mtw_set_bssid(sc, sc->sc_bssid); 3755 3756 MTW_UNLOCK(sc); 3757 3758 return; 3759 } 3760 3761 /* 3762 * Could be called from ieee80211_node_timeout() 3763 * (non-sleepable thread) 3764 */ 3765 static void 3766 mtw_update_beacon(struct ieee80211vap *vap, int item) 3767 { 3768 struct ieee80211com *ic = vap->iv_ic; 3769 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 3770 struct ieee80211_node *ni = vap->iv_bss; 3771 struct mtw_softc *sc = ic->ic_softc; 3772 struct mtw_vap *rvp = MTW_VAP(vap); 3773 int mcast = 0; 3774 uint32_t i; 3775 3776 switch (item) { 3777 case IEEE80211_BEACON_ERP: 3778 mtw_updateslot(ic); 3779 break; 3780 case IEEE80211_BEACON_HTINFO: 3781 mtw_updateprot(ic); 3782 break; 3783 case IEEE80211_BEACON_TIM: 3784 mcast = 1; /*TODO*/ 3785 break; 3786 default: 3787 break; 3788 } 3789 3790 setbit(bo->bo_flags, item); 3791 if (rvp->beacon_mbuf == NULL) { 3792 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); 3793 if (rvp->beacon_mbuf == NULL) 3794 return; 3795 } 3796 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast); 3797 3798 i = MTW_CMDQ_GET(&sc->cmdq_store); 3799 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i); 3800 sc->cmdq[i].func = mtw_update_beacon_cb; 3801 sc->cmdq[i].arg0 = vap; 3802 ieee80211_runtask(ic, &sc->cmdq_task); 3803 3804 return; 3805 } 3806 3807 static void 3808 mtw_update_beacon_cb(void *arg) 3809 { 3810 3811 struct ieee80211vap *vap = arg; 3812 struct ieee80211_node *ni = vap->iv_bss; 3813 struct mtw_vap *rvp = MTW_VAP(vap); 3814 struct ieee80211com *ic = vap->iv_ic; 3815 struct mtw_softc *sc = ic->ic_softc; 3816 struct mtw_txwi txwi; 3817 struct mbuf *m; 3818 uint16_t txwisize; 3819 uint8_t ridx; 3820 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 3821 return; 3822 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) 3823 return; 3824 3825 /* 3826 * No need to call ieee80211_beacon_update(), mtw_update_beacon() 3827 * is taking care of appropriate calls. 3828 */ 3829 if (rvp->beacon_mbuf == NULL) { 3830 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); 3831 if (rvp->beacon_mbuf == NULL) 3832 return; 3833 } 3834 m = rvp->beacon_mbuf; 3835 3836 memset(&txwi, 0, sizeof(txwi)); 3837 txwi.wcid = 0xff; 3838 txwi.len = htole16(m->m_pkthdr.len); 3839 3840 /* send beacons at the lowest available rate */ 3841 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 : 3842 MTW_RIDX_CCK1; 3843 txwi.phy = htole16(rt2860_rates[ridx].mcs); 3844 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) 3845 txwi.phy |= htole16(MTW_PHY_OFDM); 3846 txwi.txop = MTW_TX_TXOP_HT; 3847 txwi.flags = MTW_TX_TS; 3848 txwi.xflags = MTW_TX_NSEQ; 3849 3850 txwisize = sizeof(txwi); 3851 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize); 3852 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *), 3853 (m->m_pkthdr.len + 1) & ~1); 3854 } 3855 3856 static void 3857 mtw_updateprot(struct ieee80211com *ic) 3858 { 3859 struct mtw_softc *sc = ic->ic_softc; 3860 uint32_t i; 3861 3862 i = MTW_CMDQ_GET(&sc->cmdq_store); 3863 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i); 3864 sc->cmdq[i].func = mtw_updateprot_cb; 3865 sc->cmdq[i].arg0 = ic; 3866 ieee80211_runtask(ic, &sc->cmdq_task); 3867 } 3868 3869 static void 3870 mtw_updateprot_cb(void *arg) 3871 { 3872 3873 struct ieee80211com *ic = arg; 3874 struct mtw_softc *sc = ic->ic_softc; 3875 uint32_t tmp; 3876 3877 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; 3878 /* setup protection frame rate (MCS code) */ 3879 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ? 3880 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM : 3881 rt2860_rates[MTW_RIDX_CCK11].mcs; 3882 3883 /* CCK frames don't require protection */ 3884 mtw_write(sc, MTW_CCK_PROT_CFG, tmp); 3885 if (ic->ic_flags & IEEE80211_F_USEPROT) { 3886 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3887 tmp |= RT2860_PROT_CTRL_RTS_CTS; 3888 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3889 tmp |= RT2860_PROT_CTRL_CTS; 3890 } 3891 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp); 3892 } 3893 3894 static void 3895 mtw_usb_timeout_cb(void *arg) 3896 { 3897 struct ieee80211vap *vap = arg; 3898 struct mtw_softc *sc = vap->iv_ic->ic_softc; 3899 3900 MTW_LOCK_ASSERT(sc, MA_OWNED); 3901 3902 if (vap->iv_state == IEEE80211_S_SCAN) { 3903 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE, 3904 "timeout caused by scan\n"); 3905 /* cancel bgscan */ 3906 ieee80211_cancel_scan(vap); 3907 } else { 3908 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE, 3909 "timeout by unknown cause\n"); 3910 } 3911 } 3912 static int mtw_reset(struct mtw_softc *sc) 3913 { 3914 3915 usb_device_request_t req; 3916 uint16_t tmp; 3917 uint16_t actlen; 3918 3919 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 3920 req.bRequest = MTW_RESET; 3921 USETW(req.wValue, 1); 3922 USETW(req.wIndex, 0); 3923 USETW(req.wLength, 0); 3924 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, 3925 &req, &tmp, 0, &actlen, 1000)); 3926 3927 } 3928 3929 3930 static void 3931 mtw_update_promisc_locked(struct mtw_softc *sc) 3932 { 3933 3934 uint32_t tmp; 3935 3936 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp); 3937 3938 tmp |= MTW_DROP_UC_NOME; 3939 if (sc->sc_ic.ic_promisc > 0) 3940 tmp &= ~MTW_DROP_UC_NOME; 3941 3942 mtw_write(sc, MTW_RX_FILTR_CFG, tmp); 3943 3944 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n", 3945 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving"); 3946 } 3947 3948 static void 3949 mtw_update_promisc(struct ieee80211com *ic) 3950 { 3951 struct mtw_softc *sc = ic->ic_softc; 3952 3953 if ((sc->sc_flags & MTW_RUNNING) == 0) 3954 return; 3955 3956 MTW_LOCK(sc); 3957 mtw_update_promisc_locked(sc); 3958 MTW_UNLOCK(sc); 3959 } 3960 3961 static void 3962 mtw_enable_tsf_sync(struct mtw_softc *sc) 3963 { 3964 struct ieee80211com *ic = &sc->sc_ic; 3965 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3966 uint32_t tmp; 3967 int error; 3968 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp); 3969 tmp &= ~0x1fffff; 3970 tmp |= vap->iv_bss->ni_intval * 16; 3971 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN; 3972 3973 /* local TSF is always updated with remote TSF on beacon reception */ 3974 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT; 3975 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp); 3976 if (error != 0) { 3977 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n", 3978 error); 3979 } 3980 return; 3981 } 3982 3983 static void 3984 mtw_enable_mrr(struct mtw_softc *sc) 3985 { 3986 #define CCK(mcs) (mcs) 3987 3988 #define OFDM(mcs) (1 << 3 | (mcs)) 3989 mtw_write(sc, MTW_LG_FBK_CFG0, 3990 OFDM(6) << 28 | /* 54->48 */ 3991 OFDM(5) << 24 | /* 48->36 */ 3992 OFDM(4) << 20 | /* 36->24 */ 3993 OFDM(3) << 16 | /* 24->18 */ 3994 OFDM(2) << 12 | /* 18->12 */ 3995 OFDM(1) << 8 | /* 12-> 9 */ 3996 OFDM(0) << 4 | /* 9-> 6 */ 3997 OFDM(0)); /* 6-> 6 */ 3998 3999 mtw_write(sc, MTW_LG_FBK_CFG1, 4000 CCK(2) << 12 | /* 11->5.5 */ 4001 CCK(1) << 8 | /* 5.5-> 2 */ 4002 CCK(0) << 4 | /* 2-> 1 */ 4003 CCK(0)); /* 1-> 1 */ 4004 #undef OFDM 4005 #undef CCK 4006 } 4007 4008 static void 4009 mtw_set_txpreamble(struct mtw_softc *sc) 4010 { 4011 struct ieee80211com *ic = &sc->sc_ic; 4012 uint32_t tmp; 4013 4014 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp); 4015 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4016 tmp |= MTW_CCK_SHORT_EN; 4017 else 4018 tmp &= ~MTW_CCK_SHORT_EN; 4019 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp); 4020 } 4021 4022 static void 4023 mtw_set_basicrates(struct mtw_softc *sc) 4024 { 4025 struct ieee80211com *ic = &sc->sc_ic; 4026 4027 /* set basic rates mask */ 4028 if (ic->ic_curmode == IEEE80211_MODE_11B) 4029 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003); 4030 else if (ic->ic_curmode == IEEE80211_MODE_11A) 4031 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150); 4032 else /* 11g */ 4033 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f); 4034 } 4035 4036 static void 4037 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid) 4038 { 4039 mtw_write(sc, MTW_MAC_BSSID_DW0, 4040 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); 4041 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8); 4042 } 4043 4044 static void 4045 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr) 4046 { 4047 mtw_write(sc, MTW_MAC_ADDR_DW0, 4048 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); 4049 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16); 4050 } 4051 4052 static void 4053 mtw_updateslot(struct ieee80211com *ic) 4054 { 4055 4056 struct mtw_softc *sc = ic->ic_softc; 4057 uint32_t i; 4058 4059 i = MTW_CMDQ_GET(&sc->cmdq_store); 4060 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i); 4061 sc->cmdq[i].func = mtw_updateslot_cb; 4062 sc->cmdq[i].arg0 = ic; 4063 ieee80211_runtask(ic, &sc->cmdq_task); 4064 4065 return; 4066 } 4067 4068 /* ARGSUSED */ 4069 static void 4070 mtw_updateslot_cb(void *arg) 4071 { 4072 struct ieee80211com *ic = arg; 4073 struct mtw_softc *sc = ic->ic_softc; 4074 uint32_t tmp; 4075 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp); 4076 tmp &= ~0xff; 4077 tmp |= IEEE80211_GET_SLOTTIME(ic); 4078 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp); 4079 } 4080 4081 static void 4082 mtw_update_mcast(struct ieee80211com *ic) 4083 { 4084 } 4085 4086 static int8_t 4087 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain) 4088 { 4089 struct ieee80211com *ic = &sc->sc_ic; 4090 struct ieee80211_channel *c = ic->ic_curchan; 4091 int delta; 4092 4093 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4094 u_int chan = ieee80211_chan2ieee(ic, c); 4095 delta = sc->rssi_5ghz[rxchain]; 4096 4097 /* determine channel group */ 4098 if (chan <= 64) 4099 delta -= sc->lna[1]; 4100 else if (chan <= 128) 4101 delta -= sc->lna[2]; 4102 else 4103 delta -= sc->lna[3]; 4104 } else 4105 delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; 4106 4107 return (-12 - delta - rssi); 4108 } 4109 static int 4110 mt7601_bbp_init(struct mtw_softc *sc) 4111 { 4112 uint8_t bbp; 4113 int i, error, ntries; 4114 4115 /* wait for BBP to wake up */ 4116 for (ntries = 0; ntries < 20; ntries++) { 4117 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0) 4118 return (error); 4119 if (bbp != 0 && bbp != 0xff) 4120 break; 4121 } 4122 4123 if (ntries == 20) 4124 return (ETIMEDOUT); 4125 4126 mtw_bbp_read(sc, 3, &bbp); 4127 mtw_bbp_write(sc, 3, 0); 4128 mtw_bbp_read(sc, 105, &bbp); 4129 mtw_bbp_write(sc, 105, 0); 4130 4131 /* initialize BBP registers to default values */ 4132 for (i = 0; i < nitems(mt7601_def_bbp); i++) { 4133 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg, 4134 mt7601_def_bbp[i].val)) != 0) 4135 return (error); 4136 } 4137 4138 sc->sc_bw_calibrated = 0; 4139 4140 return (0); 4141 } 4142 4143 static int 4144 mt7601_rf_init(struct mtw_softc *sc) 4145 { 4146 int i, error; 4147 4148 /* RF bank 0 */ 4149 for (i = 0; i < nitems(mt7601_rf_bank0); i++) { 4150 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg, 4151 mt7601_rf_bank0[i].val); 4152 if (error != 0) 4153 return (error); 4154 } 4155 /* RF bank 4 */ 4156 for (i = 0; i < nitems(mt7601_rf_bank4); i++) { 4157 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg, 4158 mt7601_rf_bank4[i].val); 4159 if (error != 0) 4160 return (error); 4161 } 4162 /* RF bank 5 */ 4163 for (i = 0; i < nitems(mt7601_rf_bank5); i++) { 4164 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg, 4165 mt7601_rf_bank5[i].val); 4166 if (error != 0) 4167 return (error); 4168 } 4169 return (0); 4170 } 4171 4172 static int 4173 mtw_txrx_enable(struct mtw_softc *sc) 4174 { 4175 struct ieee80211com *ic = &sc->sc_ic; 4176 uint32_t tmp; 4177 int error, ntries; 4178 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN); 4179 for (ntries = 0; ntries < 200; ntries++) { 4180 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) { 4181 return (error); 4182 } 4183 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4184 break; 4185 mtw_delay(sc, 50); 4186 } 4187 if (ntries == 200) { 4188 return (ETIMEDOUT); 4189 } 4190 4191 DELAY(50); 4192 4193 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE; 4194 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4195 4196 /* enable Rx bulk aggregation (set timeout and limit) */ 4197 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN | 4198 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2); 4199 mtw_write(sc, MTW_USB_DMA_CFG, tmp); 4200 4201 /* set Rx filter */ 4202 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR; 4203 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 4204 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS | 4205 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR | 4206 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND; 4207 if (ic->ic_opmode == IEEE80211_M_STA) 4208 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL; 4209 } 4210 mtw_write(sc, MTW_RX_FILTR_CFG, tmp); 4211 4212 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN); 4213 return (0); 4214 } 4215 static int 4216 mt7601_rxdc_cal(struct mtw_softc *sc) 4217 { 4218 uint32_t tmp; 4219 uint8_t bbp; 4220 int ntries; 4221 4222 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp); 4223 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN); 4224 mtw_bbp_write(sc, 158, 0x8d); 4225 mtw_bbp_write(sc, 159, 0xfc); 4226 mtw_bbp_write(sc, 158, 0x8c); 4227 mtw_bbp_write(sc, 159, 0x4c); 4228 4229 for (ntries = 0; ntries < 20; ntries++) { 4230 DELAY(300); 4231 mtw_bbp_write(sc, 158, 0x8c); 4232 mtw_bbp_read(sc, 159, &bbp); 4233 if (bbp == 0x0c) 4234 break; 4235 } 4236 4237 if (ntries == 20) 4238 return (ETIMEDOUT); 4239 4240 mtw_write(sc, MTW_MAC_SYS_CTRL, 0); 4241 mtw_bbp_write(sc, 158, 0x8d); 4242 mtw_bbp_write(sc, 159, 0xe0); 4243 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp); 4244 return (0); 4245 } 4246 4247 static int 4248 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val) 4249 { 4250 uint8_t bbp; 4251 4252 mtw_bbp_read(sc, 47, &bbp); 4253 bbp = 0x90; 4254 mtw_bbp_write(sc, 47, bbp); 4255 bbp &= ~0x0f; 4256 bbp |= flag; 4257 mtw_bbp_write(sc, 47, bbp); 4258 return (mtw_bbp_read(sc, 49, val)); 4259 } 4260 4261 static int 4262 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val) 4263 { 4264 uint32_t rfb, rfs; 4265 uint8_t bbp; 4266 int ntries; 4267 4268 mtw_read(sc, MTW_RF_BYPASS0, &rfb); 4269 mtw_read(sc, MTW_RF_SETTING0, &rfs); 4270 mtw_write(sc, MTW_RF_BYPASS0, 0); 4271 mtw_write(sc, MTW_RF_SETTING0, 0x10); 4272 mtw_write(sc, MTW_RF_BYPASS0, 0x10); 4273 4274 mtw_bbp_read(sc, 47, &bbp); 4275 bbp &= ~0x7f; 4276 bbp |= 0x10; 4277 mtw_bbp_write(sc, 47, bbp); 4278 4279 mtw_bbp_write(sc, 22, 0x40); 4280 4281 for (ntries = 0; ntries < 10; ntries++) { 4282 mtw_bbp_read(sc, 47, &bbp); 4283 if ((bbp & 0x10) == 0) 4284 break; 4285 } 4286 if (ntries == 10) 4287 return (ETIMEDOUT); 4288 4289 mt7601_r49_read(sc, MT7601_R47_TEMP, val); 4290 4291 mtw_bbp_write(sc, 22, 0); 4292 4293 mtw_bbp_read(sc, 21, &bbp); 4294 bbp |= 0x02; 4295 mtw_bbp_write(sc, 21, bbp); 4296 bbp &= ~0x02; 4297 mtw_bbp_write(sc, 21, bbp); 4298 4299 mtw_write(sc, MTW_RF_BYPASS0, 0); 4300 mtw_write(sc, MTW_RF_SETTING0, rfs); 4301 mtw_write(sc, MTW_RF_BYPASS0, rfb); 4302 return (0); 4303 } 4304 4305 static int 4306 mt7601_rf_setup(struct mtw_softc *sc) 4307 { 4308 uint32_t tmp; 4309 uint8_t rf; 4310 int error; 4311 4312 if (sc->sc_rf_calibrated) 4313 return (0); 4314 4315 /* init RF registers */ 4316 if ((error = mt7601_rf_init(sc)) != 0) 4317 return (error); 4318 4319 /* init frequency offset */ 4320 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset); 4321 mtw_rf_read(sc, 0, 12, &rf); 4322 4323 /* read temperature */ 4324 mt7601_rf_temperature(sc, &rf); 4325 sc->bbp_temp = rf; 4326 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf); 4327 4328 mtw_rf_read(sc, 0, 7, &rf); 4329 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0) 4330 return (error); 4331 mtw_delay(sc, 100); 4332 mtw_rf_read(sc, 0, 7, &rf); 4333 4334 /* Calibrate VCO RF 0/4 */ 4335 mtw_rf_write(sc, 0, 4, 0x0a); 4336 mtw_rf_write(sc, 0, 4, 0x20); 4337 mtw_rf_read(sc, 0, 4, &rf); 4338 mtw_rf_write(sc, 0, 4, rf | 0x80); 4339 4340 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0) 4341 return (error); 4342 if ((error = mt7601_rxdc_cal(sc)) != 0) 4343 return (error); 4344 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0) 4345 return (error); 4346 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0) 4347 return (error); 4348 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0) 4349 return (error); 4350 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0) 4351 return (error); 4352 4353 mtw_read(sc, MTW_LDO_CFG0, &tmp); 4354 tmp &= ~(1 << 4); 4355 tmp |= (1 << 2); 4356 mtw_write(sc, MTW_LDO_CFG0, tmp); 4357 4358 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0) 4359 return (error); 4360 if ((error = mt7601_rxdc_cal(sc)) != 0) 4361 return (error); 4362 4363 sc->sc_rf_calibrated = 1; 4364 return (0); 4365 } 4366 4367 static void 4368 mtw_set_txrts(struct mtw_softc *sc) 4369 { 4370 uint32_t tmp; 4371 4372 /* set RTS threshold */ 4373 mtw_read(sc, MTW_TX_RTS_CFG, &tmp); 4374 tmp &= ~0xffff00; 4375 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT; 4376 mtw_write(sc, MTW_TX_RTS_CFG, tmp); 4377 } 4378 static int 4379 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val) 4380 { 4381 struct mtw_mcu_cmd_16 cmd; 4382 4383 cmd.r1 = htole32(func); 4384 cmd.r2 = htole32(val); 4385 cmd.r3 = 0; 4386 cmd.r4 = 0; 4387 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16))); 4388 } 4389 static void 4390 mtw_init_locked(struct mtw_softc *sc) 4391 { 4392 4393 struct ieee80211com *ic = &sc->sc_ic; 4394 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4395 uint32_t tmp; 4396 int i, error, ridx, ntries; 4397 if (ic->ic_nrunning > 1) 4398 return; 4399 mtw_stop(sc); 4400 4401 for (i = 0; i != MTW_EP_QUEUES; i++) 4402 mtw_setup_tx_list(sc, &sc->sc_epq[i]); 4403 4404 for (ntries = 0; ntries < 100; ntries++) { 4405 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) 4406 goto fail; 4407 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4408 break; 4409 DELAY(1000); 4410 } 4411 if (ntries == 100) { 4412 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); 4413 error = ETIMEDOUT; 4414 goto fail; 4415 } 4416 tmp &= 0xff0; 4417 tmp |= MTW_TX_WB_DDONE; 4418 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4419 4420 mtw_set_leds(sc, MTW_LED_MODE_ON); 4421 /* reset MAC and baseband */ 4422 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST); 4423 mtw_write(sc, MTW_USB_DMA_CFG, 0); 4424 mtw_write(sc, MTW_MAC_SYS_CTRL, 0); 4425 4426 /* init MAC values */ 4427 if (sc->asic_ver == 0x7601) { 4428 for (i = 0; i < nitems(mt7601_def_mac); i++) 4429 mtw_write(sc, mt7601_def_mac[i].reg, 4430 mt7601_def_mac[i].val); 4431 } 4432 4433 /* wait while MAC is busy */ 4434 for (ntries = 0; ntries < 100; ntries++) { 4435 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0) 4436 goto fail; 4437 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY))) 4438 break; 4439 DELAY(1000); 4440 } 4441 if (ntries == 100) { 4442 error = ETIMEDOUT; 4443 goto fail; 4444 } 4445 4446 /* set MAC address */ 4447 4448 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); 4449 4450 /* clear WCID attribute table */ 4451 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32); 4452 4453 mtw_write(sc, 0x1648, 0x00830083); 4454 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp); 4455 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN; 4456 mtw_write(sc, MTW_FCE_L2_STUFF, tmp); 4457 4458 /* RTS config */ 4459 mtw_set_txrts(sc); 4460 4461 /* clear Host to MCU mailbox */ 4462 mtw_write(sc, MTW_BBP_CSR, 0); 4463 mtw_write(sc, MTW_H2M_MAILBOX, 0); 4464 4465 /* clear RX WCID search table */ 4466 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512); 4467 4468 /* abort TSF synchronization */ 4469 mtw_abort_tsf_sync(sc); 4470 4471 mtw_read(sc, MTW_US_CYC_CNT, &tmp); 4472 tmp = (tmp & ~0xff); 4473 if (sc->asic_ver == 0x7601) 4474 tmp |= 0x1e; 4475 mtw_write(sc, MTW_US_CYC_CNT, tmp); 4476 4477 /* clear shared key table */ 4478 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32); 4479 4480 /* clear IV/EIV table */ 4481 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32); 4482 4483 /* clear shared key mode */ 4484 mtw_write(sc, MTW_SKEY_MODE_0_7, 0); 4485 mtw_write(sc, MTW_SKEY_MODE_8_15, 0); 4486 4487 /* txop truncation */ 4488 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f); 4489 4490 /* init Tx power for all Tx rates */ 4491 for (ridx = 0; ridx < 5; ridx++) { 4492 if (sc->txpow20mhz[ridx] == 0xffffffff) 4493 continue; 4494 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); 4495 } 4496 mtw_write(sc, MTW_TX_PWR_CFG7, 0); 4497 mtw_write(sc, MTW_TX_PWR_CFG9, 0); 4498 4499 mtw_read(sc, MTW_CMB_CTRL, &tmp); 4500 tmp &= ~(1 << 18 | 1 << 14); 4501 mtw_write(sc, MTW_CMB_CTRL, tmp); 4502 4503 /* clear USB DMA */ 4504 mtw_write(sc, MTW_USB_DMA_CFG, 4505 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN | 4506 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP); 4507 mtw_delay(sc, 50); 4508 mtw_read(sc, MTW_USB_DMA_CFG, &tmp); 4509 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP); 4510 mtw_write(sc, MTW_USB_DMA_CFG, tmp); 4511 4512 /* enable radio */ 4513 mtw_mcu_radio(sc, 0x31, 0); 4514 4515 /* init RF registers */ 4516 if (sc->asic_ver == 0x7601) 4517 mt7601_rf_init(sc); 4518 4519 /* init baseband registers */ 4520 if (sc->asic_ver == 0x7601) 4521 error = mt7601_bbp_init(sc); 4522 4523 if (error != 0) { 4524 device_printf(sc->sc_dev, "could not initialize BBP\n"); 4525 goto fail; 4526 } 4527 4528 /* setup and calibrate RF */ 4529 error = mt7601_rf_setup(sc); 4530 4531 if (error != 0) { 4532 device_printf(sc->sc_dev, "could not initialize RF\n"); 4533 goto fail; 4534 } 4535 4536 /* select default channel */ 4537 mtw_set_chan(sc, ic->ic_curchan); 4538 4539 /* setup initial protection mode */ 4540 mtw_updateprot_cb(ic); 4541 4542 sc->sc_flags |= MTW_RUNNING; 4543 sc->cmdq_run = MTW_CMDQ_GO; 4544 for (i = 0; i != MTW_N_XFER; i++) 4545 usbd_xfer_set_stall(sc->sc_xfer[i]); 4546 4547 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]); 4548 4549 error = mtw_txrx_enable(sc); 4550 if (error != 0) { 4551 goto fail; 4552 } 4553 4554 return; 4555 4556 fail: 4557 4558 mtw_stop(sc); 4559 return; 4560 } 4561 4562 static void 4563 mtw_stop(void *arg) 4564 { 4565 struct mtw_softc *sc = (struct mtw_softc *)arg; 4566 uint32_t tmp; 4567 int i, ntries, error; 4568 4569 MTW_LOCK_ASSERT(sc, MA_OWNED); 4570 4571 sc->sc_flags &= ~MTW_RUNNING; 4572 4573 sc->ratectl_run = MTW_RATECTL_OFF; 4574 sc->cmdq_run = sc->cmdq_key_set; 4575 4576 MTW_UNLOCK(sc); 4577 4578 for (i = 0; i < MTW_N_XFER; i++) 4579 usbd_transfer_drain(sc->sc_xfer[i]); 4580 4581 MTW_LOCK(sc); 4582 4583 mtw_drain_mbufq(sc); 4584 4585 if (sc->rx_m != NULL) { 4586 m_free(sc->rx_m); 4587 sc->rx_m = NULL; 4588 } 4589 4590 /* Disable Tx/Rx DMA. */ 4591 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp); 4592 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN); 4593 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4594 // mtw_usb_dma_write(sc, 0); 4595 4596 for (ntries = 0; ntries < 100; ntries++) { 4597 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0) 4598 break; 4599 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4600 break; 4601 DELAY(10); 4602 } 4603 if (ntries == 100) { 4604 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); 4605 } 4606 4607 /* stop MAC Tx/Rx */ 4608 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp); 4609 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN); 4610 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp); 4611 4612 /* disable RTS retry */ 4613 mtw_read(sc, MTW_TX_RTS_CFG, &tmp); 4614 tmp &= ~0xff; 4615 mtw_write(sc, MTW_TX_RTS_CFG, tmp); 4616 4617 /* US_CYC_CFG */ 4618 mtw_read(sc, MTW_US_CYC_CNT, &tmp); 4619 tmp = (tmp & ~0xff); 4620 mtw_write(sc, MTW_US_CYC_CNT, tmp); 4621 4622 /* stop PBF */ 4623 mtw_read(sc, MTW_PBF_CFG, &tmp); 4624 tmp &= ~0x3; 4625 mtw_write(sc, MTW_PBF_CFG, tmp); 4626 4627 /* wait for pending Tx to complete */ 4628 for (ntries = 0; ntries < 100; ntries++) { 4629 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0) 4630 break; 4631 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0) 4632 break; 4633 } 4634 4635 } 4636 4637 static void 4638 mtw_delay(struct mtw_softc *sc, u_int ms) 4639 { 4640 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL, 4641 USB_MS_TO_TICKS(ms)); 4642 } 4643 4644 static void 4645 mtw_update_chw(struct ieee80211com *ic) 4646 { 4647 4648 printf("%s: TODO\n", __func__); 4649 } 4650 4651 static int 4652 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 4653 { 4654 4655 /* For now, no A-MPDU TX support in the driver */ 4656 return (0); 4657 } 4658 4659 static device_method_t mtw_methods[] = { 4660 /* Device interface */ 4661 DEVMETHOD(device_probe, mtw_match), 4662 DEVMETHOD(device_attach, mtw_attach), 4663 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END 4664 }; 4665 4666 static driver_t mtw_driver = { .name = "mtw", 4667 .methods = mtw_methods, 4668 .size = sizeof(struct mtw_softc) }; 4669 4670 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL); 4671 MODULE_DEPEND(mtw, wlan, 1, 1, 1); 4672 MODULE_DEPEND(mtw, usb, 1, 1, 1); 4673 MODULE_DEPEND(mtw, firmware, 1, 1, 1); 4674 MODULE_VERSION(mtw, 1); 4675 USB_PNP_HOST_INFO(mtw_devs); 4676