1 /*- 2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2013-2014 Kevin Lo 4 * Copyright (c) 2021 James Hastings 5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /* 21 * MediaTek MT7601U 802.11b/g/n WLAN. 22 */ 23 24 #include "opt_wlan.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/bus.h> 29 #include <sys/endian.h> 30 #include <sys/eventhandler.h> 31 #include <sys/firmware.h> 32 #include <sys/kdb.h> 33 #include <sys/kernel.h> 34 #include <sys/linker.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 44 #include <net/bpf.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_arp.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 #include <net/if_types.h> 51 #include <net/if_var.h> 52 #include <net80211/ieee80211_var.h> 53 #include <net80211/ieee80211_radiotap.h> 54 #include <net80211/ieee80211_ratectl.h> 55 #include <net80211/ieee80211_regdomain.h> 56 #ifdef IEEE80211_SUPPORT_SUPERG 57 #include <net80211/ieee80211_superg.h> 58 #endif 59 #include <netinet/if_ether.h> 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 65 #include <dev/usb/usb.h> 66 #include <dev/usb/usbdi.h> 67 68 #include "usbdevs.h" 69 70 #define USB_DEBUG_VAR mtw_debug 71 #include <dev/usb/usb_debug.h> 72 #include <dev/usb/usb_msctest.h> 73 74 #include "if_mtwreg.h" 75 #include "if_mtwvar.h" 76 77 #define MTW_DEBUG 78 79 #ifdef MTW_DEBUG 80 int mtw_debug; 81 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 82 "USB mtw"); 83 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0, 84 "mtw debug level"); 85 86 enum { 87 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 88 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 89 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */ 90 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 91 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */ 92 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */ 93 MTW_DEBUG_USB = 0x00000040, /* usb requests */ 94 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */ 95 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */ 96 MTW_DEBUG_INTR = 0x00000200, /* ISR */ 97 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */ 98 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */ 99 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */ 100 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */ 101 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */ 102 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */ 103 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */ 104 MTW_DEBUG_CMD = 0x00020000, /* command queue */ 105 MTW_DEBUG_ANY = 0xffffffff 106 }; 107 108 #define MTW_DPRINTF(_sc, _m, ...) \ 109 do { \ 110 if (mtw_debug & (_m)) \ 111 device_printf((_sc)->sc_dev, __VA_ARGS__); \ 112 } while (0) 113 114 #else 115 #define MTW_DPRINTF(_sc, _m, ...) \ 116 do { \ 117 (void)_sc; \ 118 } while (0) 119 #endif 120 121 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh) 122 123 /* NB: "11" is the maximum number of padding bytes needed for Tx */ 124 #define MTW_MAX_TXSZ \ 125 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11) 126 127 /* 128 * Because of LOR in mtw_key_delete(), use atomic instead. 129 * '& MTW_CMDQ_MASQ' is to loop cmdq[]. 130 */ 131 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ) 132 133 static const STRUCT_USB_HOST_ID mtw_devs[] = { 134 #define MTW_DEV(v, p) \ 135 { \ 136 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \ 137 } 138 MTW_DEV(EDIMAX, MT7601U), 139 MTW_DEV(RALINK, MT7601U), 140 MTW_DEV(XIAOMI, MT7601U) 141 }; 142 #undef MTW_DEV 143 144 static device_probe_t mtw_match; 145 static device_attach_t mtw_attach; 146 static device_detach_t mtw_detach; 147 148 static usb_callback_t mtw_bulk_rx_callback; 149 static usb_callback_t mtw_bulk_tx_callback0; 150 static usb_callback_t mtw_bulk_tx_callback1; 151 static usb_callback_t mtw_bulk_tx_callback2; 152 static usb_callback_t mtw_bulk_tx_callback3; 153 static usb_callback_t mtw_bulk_tx_callback4; 154 static usb_callback_t mtw_bulk_tx_callback5; 155 static usb_callback_t mtw_fw_callback; 156 157 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *); 158 static int mtw_driver_loaded(struct module *, int, void *); 159 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, 160 u_int index); 161 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *, 162 const char[IFNAMSIZ], int, enum ieee80211_opmode, int, 163 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]); 164 static void mtw_vap_delete(struct ieee80211vap *); 165 static void mtw_cmdq_cb(void *, int); 166 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *); 167 static void mtw_unsetup_tx_list(struct mtw_softc *, 168 struct mtw_endpoint_queue *); 169 static void mtw_load_microcode(void *arg); 170 171 static usb_error_t mtw_do_request(struct mtw_softc *, 172 struct usb_device_request *, void *); 173 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *); 174 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int); 175 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t); 176 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t); 177 static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int); 178 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int); 179 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *); 180 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *); 181 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t); 182 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len); 183 static void mtw_get_txpower(struct mtw_softc *); 184 static int mtw_read_eeprom(struct mtw_softc *); 185 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *, 186 const uint8_t mac[IEEE80211_ADDR_LEN]); 187 static int mtw_media_change(if_t); 188 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189 static int mtw_wme_update(struct ieee80211com *); 190 static void mtw_key_set_cb(void *); 191 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *); 192 static void mtw_key_delete_cb(void *); 193 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *); 194 static void mtw_ratectl_to(void *); 195 static void mtw_ratectl_cb(void *, int); 196 static void mtw_drain_fifo(void *); 197 static void mtw_iter_func(void *, struct ieee80211_node *); 198 static void mtw_newassoc_cb(void *); 199 static void mtw_newassoc(struct ieee80211_node *, int); 200 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val); 201 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 202 const struct ieee80211_rx_stats *, int, int); 203 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t); 204 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *, 205 int); 206 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *); 207 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *); 208 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *, 209 struct ieee80211_node *); 210 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *, 211 struct ieee80211_node *, int, int); 212 static int mtw_tx_param(struct mtw_softc *, struct mbuf *, 213 struct ieee80211_node *, const struct ieee80211_bpf_params *); 214 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *, 215 const struct ieee80211_bpf_params *); 216 static int mtw_transmit(struct ieee80211com *, struct mbuf *); 217 static void mtw_start(struct mtw_softc *); 218 static void mtw_parent(struct ieee80211com *); 219 static void mtw_select_chan_group(struct mtw_softc *, int); 220 221 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *); 222 static void mtw_set_channel(struct ieee80211com *); 223 static void mtw_getradiocaps(struct ieee80211com *, int, int *, 224 struct ieee80211_channel[]); 225 static void mtw_scan_start(struct ieee80211com *); 226 static void mtw_scan_end(struct ieee80211com *); 227 static void mtw_update_beacon(struct ieee80211vap *, int); 228 static void mtw_update_beacon_cb(void *); 229 static void mtw_updateprot(struct ieee80211com *); 230 static void mtw_updateprot_cb(void *); 231 static void mtw_usb_timeout_cb(void *); 232 static int mtw_reset(struct mtw_softc *sc); 233 static void mtw_enable_tsf_sync(struct mtw_softc *); 234 235 236 static void mtw_enable_mrr(struct mtw_softc *); 237 static void mtw_set_txpreamble(struct mtw_softc *); 238 static void mtw_set_basicrates(struct mtw_softc *); 239 static void mtw_set_leds(struct mtw_softc *, uint16_t); 240 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *); 241 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *); 242 static void mtw_updateslot(struct ieee80211com *); 243 static void mtw_updateslot_cb(void *); 244 static void mtw_update_mcast(struct ieee80211com *); 245 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t); 246 static void mtw_update_promisc_locked(struct mtw_softc *); 247 static void mtw_update_promisc(struct ieee80211com *); 248 static int mtw_txrx_enable(struct mtw_softc *); 249 static void mtw_init_locked(struct mtw_softc *); 250 static void mtw_stop(void *); 251 static void mtw_delay(struct mtw_softc *, u_int); 252 static void mtw_update_chw(struct ieee80211com *ic); 253 static int mtw_ampdu_enable(struct ieee80211_node *ni, 254 struct ieee80211_tx_ampdu *tap); 255 256 static eventhandler_tag mtw_etag; 257 258 static const struct { 259 uint8_t reg; 260 uint8_t val; 261 } mt7601_rf_bank0[] = { MT7601_BANK0_RF }, 262 mt7601_rf_bank4[] = { MT7601_BANK4_RF }, 263 mt7601_rf_bank5[] = { MT7601_BANK5_RF }; 264 static const struct { 265 uint32_t reg; 266 uint32_t val; 267 } mt7601_def_mac[] = { MT7601_DEF_MAC }; 268 static const struct { 269 uint8_t reg; 270 uint8_t val; 271 } mt7601_def_bbp[] = { MT7601_DEF_BBP }; 272 273 274 static const struct { 275 u_int chan; 276 uint8_t r17, r18, r19, r20; 277 } mt7601_rf_chan[] = { MT7601_RF_CHAN }; 278 279 280 static const struct usb_config mtw_config[MTW_N_XFER] = { 281 [MTW_BULK_RX] = { 282 .type = UE_BULK, 283 .endpoint = UE_ADDR_ANY, 284 .direction = UE_DIR_IN, 285 .bufsize = MTW_MAX_RXSZ, 286 .flags = {.pipe_bof = 1, 287 .short_xfer_ok = 1,}, 288 .callback = mtw_bulk_rx_callback, 289 }, 290 [MTW_BULK_TX_BE] = { 291 .type = UE_BULK, 292 .endpoint = UE_ADDR_ANY, 293 .direction = UE_DIR_OUT, 294 .bufsize = MTW_MAX_TXSZ, 295 .flags = {.pipe_bof = 1, 296 .force_short_xfer = 0,}, 297 .callback = mtw_bulk_tx_callback0, 298 .timeout = 5000, /* ms */ 299 }, 300 [MTW_BULK_TX_BK] = { 301 .type = UE_BULK, 302 .endpoint = UE_ADDR_ANY, 303 .direction = UE_DIR_OUT, 304 .bufsize = MTW_MAX_TXSZ, 305 .flags = {.pipe_bof = 1, 306 .force_short_xfer = 1,}, 307 .callback = mtw_bulk_tx_callback1, 308 .timeout = 5000, /* ms */ 309 }, 310 [MTW_BULK_TX_VI] = { 311 .type = UE_BULK, 312 .endpoint = UE_ADDR_ANY, 313 .direction = UE_DIR_OUT, 314 .bufsize = MTW_MAX_TXSZ, 315 .flags = {.pipe_bof = 1, 316 .force_short_xfer = 1,}, 317 .callback = mtw_bulk_tx_callback2, 318 .timeout = 5000, /* ms */ 319 }, 320 [MTW_BULK_TX_VO] = { 321 .type = UE_BULK, 322 .endpoint = UE_ADDR_ANY, 323 .direction = UE_DIR_OUT, 324 .bufsize = MTW_MAX_TXSZ, 325 .flags = {.pipe_bof = 1, 326 .force_short_xfer = 1,}, 327 .callback = mtw_bulk_tx_callback3, 328 .timeout = 5000, /* ms */ 329 }, 330 [MTW_BULK_TX_HCCA] = { 331 .type = UE_BULK, 332 .endpoint = UE_ADDR_ANY, 333 .direction = UE_DIR_OUT, 334 .bufsize = MTW_MAX_TXSZ, 335 .flags = {.pipe_bof = 1, 336 .force_short_xfer = 1, .no_pipe_ok = 1,}, 337 .callback = mtw_bulk_tx_callback4, 338 .timeout = 5000, /* ms */ 339 }, 340 [MTW_BULK_TX_PRIO] = { 341 .type = UE_BULK, 342 .endpoint = UE_ADDR_ANY, 343 .direction = UE_DIR_OUT, 344 .bufsize = MTW_MAX_TXSZ, 345 .flags = {.pipe_bof = 1, 346 .force_short_xfer = 1, .no_pipe_ok = 1,}, 347 .callback = mtw_bulk_tx_callback5, 348 .timeout = 5000, /* ms */ 349 }, 350 351 [MTW_BULK_FW_CMD] = { 352 .type = UE_BULK, 353 .endpoint = UE_ADDR_ANY, 354 .direction = UE_DIR_OUT, 355 .bufsize = 0x2c44, 356 .flags = {.pipe_bof = 1, 357 .force_short_xfer = 1, .no_pipe_ok = 1,}, 358 .callback = mtw_fw_callback, 359 360 }, 361 362 [MTW_BULK_RAW_TX] = { 363 .type = UE_BULK, 364 .ep_index = 0, 365 .endpoint = UE_ADDR_ANY, 366 .direction = UE_DIR_OUT, 367 .bufsize = MTW_MAX_TXSZ, 368 .flags = {.pipe_bof = 1, 369 .force_short_xfer = 1, .no_pipe_ok = 1,}, 370 .callback = mtw_bulk_tx_callback0, 371 .timeout = 5000, /* ms */ 372 }, 373 374 }; 375 static uint8_t mtw_wme_ac_xfer_map[4] = { 376 [WME_AC_BE] = MTW_BULK_TX_BE, 377 [WME_AC_BK] = MTW_BULK_TX_BK, 378 [WME_AC_VI] = MTW_BULK_TX_VI, 379 [WME_AC_VO] = MTW_BULK_TX_VO, 380 }; 381 static void 382 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa) 383 { 384 struct usb_interface *iface; 385 struct usb_interface_descriptor *id; 386 387 if (uaa->dev_state != UAA_DEV_READY) 388 return; 389 390 iface = usbd_get_iface(udev, 0); 391 if (iface == NULL) 392 return; 393 id = iface->idesc; 394 if (id == NULL || id->bInterfaceClass != UICLASS_MASS) 395 return; 396 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa)) 397 return; 398 399 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0) 400 uaa->dev_state = UAA_DEV_EJECTING; 401 } 402 403 static int 404 mtw_driver_loaded(struct module *mod, int what, void *arg) 405 { 406 switch (what) { 407 case MOD_LOAD: 408 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured, 409 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY); 410 break; 411 case MOD_UNLOAD: 412 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag); 413 break; 414 default: 415 return (EOPNOTSUPP); 416 } 417 return (0); 418 } 419 420 static const char * 421 mtw_get_rf(int rev) 422 { 423 switch (rev) { 424 case MT7601_RF_7601: 425 return ("MT7601"); 426 case MT7610_RF_7610: 427 return ("MT7610"); 428 case MT7612_RF_7612: 429 return ("MT7612"); 430 } 431 return ("unknown"); 432 } 433 static int 434 mtw_wlan_enable(struct mtw_softc *sc, int enable) 435 { 436 uint32_t tmp; 437 int error = 0; 438 439 if (enable) { 440 mtw_read(sc, MTW_WLAN_CTRL, &tmp); 441 if (sc->asic_ver == 0x7612) 442 tmp &= ~0xfffff000; 443 444 tmp &= ~MTW_WLAN_CLK_EN; 445 tmp |= MTW_WLAN_EN; 446 mtw_write(sc, MTW_WLAN_CTRL, tmp); 447 mtw_delay(sc, 2); 448 449 tmp |= MTW_WLAN_CLK_EN; 450 if (sc->asic_ver == 0x7612) { 451 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF); 452 } 453 mtw_write(sc, MTW_WLAN_CTRL, tmp); 454 mtw_delay(sc, 2); 455 456 mtw_read(sc, MTW_OSC_CTRL, &tmp); 457 tmp |= MTW_OSC_EN; 458 mtw_write(sc, MTW_OSC_CTRL, tmp); 459 tmp |= MTW_OSC_CAL_REQ; 460 mtw_write(sc, MTW_OSC_CTRL, tmp); 461 } else { 462 mtw_read(sc, MTW_WLAN_CTRL, &tmp); 463 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN); 464 mtw_write(sc, MTW_WLAN_CTRL, tmp); 465 466 mtw_read(sc, MTW_OSC_CTRL, &tmp); 467 tmp &= ~MTW_OSC_EN; 468 mtw_write(sc, MTW_OSC_CTRL, tmp); 469 } 470 return (error); 471 } 472 473 static int 474 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val) 475 { 476 usb_device_request_t req; 477 uint32_t tmp; 478 uint16_t actlen; 479 int error; 480 481 req.bmRequestType = UT_READ_VENDOR_DEVICE; 482 req.bRequest = MTW_READ_CFG; 483 USETW(req.wValue, 0); 484 USETW(req.wIndex, reg); 485 USETW(req.wLength, 4); 486 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0, 487 &actlen, 1000); 488 489 if (error == 0) 490 *val = le32toh(tmp); 491 else 492 *val = 0xffffffff; 493 return (error); 494 } 495 496 static int 497 mtw_match(device_t self) 498 { 499 struct usb_attach_arg *uaa = device_get_ivars(self); 500 501 if (uaa->usb_mode != USB_MODE_HOST) 502 return (ENXIO); 503 if (uaa->info.bConfigIndex != 0) 504 return (ENXIO); 505 if (uaa->info.bIfaceIndex != 0) 506 return (ENXIO); 507 508 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa)); 509 } 510 511 static int 512 mtw_attach(device_t self) 513 { 514 struct mtw_softc *sc = device_get_softc(self); 515 struct usb_attach_arg *uaa = device_get_ivars(self); 516 struct ieee80211com *ic = &sc->sc_ic; 517 uint32_t ver; 518 int i, ret; 519 // uint32_t tmp; 520 uint8_t iface_index; 521 int ntries, error; 522 523 device_set_usb_desc(self); 524 sc->sc_udev = uaa->device; 525 sc->sc_dev = self; 526 sc->sc_sent = 0; 527 528 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), 529 MTX_NETWORK_LOCK, MTX_DEF); 530 531 iface_index = 0; 532 533 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, 534 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx); 535 if (error) { 536 device_printf(sc->sc_dev, 537 "could not allocate USB transfers, " 538 "err=%s\n", 539 usbd_errstr(error)); 540 goto detach; 541 } 542 for (i = 0; i < 4; i++) { 543 sc->txd_fw[i] = (struct mtw_txd_fw *) 544 malloc(sizeof(struct mtw_txd_fw), 545 M_USBDEV, M_NOWAIT | M_ZERO); 546 } 547 MTW_LOCK(sc); 548 sc->sc_idx = 0; 549 mbufq_init(&sc->sc_snd, ifqmaxlen); 550 551 /*enable WLAN core */ 552 if ((error = mtw_wlan_enable(sc, 1)) != 0) { 553 device_printf(sc->sc_dev, "could not enable WLAN core\n"); 554 return (ENXIO); 555 } 556 557 /* wait for the chip to settle */ 558 DELAY(100); 559 for (ntries = 0; ntries < 100; ntries++) { 560 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) { 561 goto detach; 562 } 563 if (ver != 0 && ver != 0xffffffff) 564 break; 565 DELAY(10); 566 } 567 if (ntries == 100) { 568 device_printf(sc->sc_dev, 569 "timeout waiting for NIC to initialize\n"); 570 goto detach; 571 } 572 sc->asic_ver = ver >> 16; 573 sc->asic_rev = ver & 0xffff; 574 DELAY(100); 575 if (sc->asic_ver != 0x7601) { 576 device_printf(sc->sc_dev, 577 "Your revision 0x04%x is not supported yet\n", 578 sc->asic_rev); 579 goto detach; 580 } 581 582 mtw_load_microcode(sc); 583 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz); 584 if (ret == EWOULDBLOCK || sc->fwloading != 1) { 585 device_printf(sc->sc_dev, 586 "timeout waiting for MCU to initialize\n"); 587 goto detach; 588 } 589 590 sc->sc_srom_read = mtw_efuse_read_2; 591 /* retrieve RF rev. no and various other things from EEPROM */ 592 mtw_read_eeprom(sc); 593 594 device_printf(sc->sc_dev, 595 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n", 596 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains, 597 sc->nrxchains, ether_sprintf(ic->ic_macaddr)); 598 DELAY(100); 599 600 //mtw_set_leds(sc,5); 601 // mtw_mcu_radio(sc,0x31,0); 602 MTW_UNLOCK(sc); 603 604 605 ic->ic_softc = sc; 606 ic->ic_name = device_get_nameunit(self); 607 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 608 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 609 610 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */ 611 IEEE80211_C_MONITOR | /* monitor mode supported */ 612 IEEE80211_C_IBSS | 613 IEEE80211_C_HOSTAP | 614 IEEE80211_C_WDS | /* 4-address traffic works */ 615 IEEE80211_C_MBSS | 616 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 617 IEEE80211_C_SHSLOT | /* short slot time supported */ 618 IEEE80211_C_WME | /* WME */ 619 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */ 620 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n"); 621 ic->ic_htcaps = IEEE80211_HTC_HT 622 | IEEE80211_HTC_AMPDU 623 | IEEE80211_HTC_AMSDU 624 | IEEE80211_HTCAP_MAXAMSDU_3839 625 | IEEE80211_HTCAP_SMPS_OFF; 626 627 ic->ic_rxstream = sc->nrxchains; 628 ic->ic_txstream = sc->ntxchains; 629 630 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | 631 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP | 632 IEEE80211_CRYPTO_TKIPMIC; 633 634 ic->ic_flags |= IEEE80211_F_DATAPAD; 635 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; 636 637 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 638 ic->ic_channels); 639 640 ieee80211_ifattach(ic); 641 642 ic->ic_scan_start = mtw_scan_start; 643 ic->ic_scan_end = mtw_scan_end; 644 ic->ic_set_channel = mtw_set_channel; 645 ic->ic_getradiocaps = mtw_getradiocaps; 646 ic->ic_node_alloc = mtw_node_alloc; 647 ic->ic_newassoc = mtw_newassoc; 648 ic->ic_update_mcast = mtw_update_mcast; 649 ic->ic_updateslot = mtw_updateslot; 650 ic->ic_wme.wme_update = mtw_wme_update; 651 ic->ic_raw_xmit = mtw_raw_xmit; 652 ic->ic_update_promisc = mtw_update_promisc; 653 ic->ic_vap_create = mtw_vap_create; 654 ic->ic_vap_delete = mtw_vap_delete; 655 ic->ic_transmit = mtw_transmit; 656 ic->ic_parent = mtw_parent; 657 658 ic->ic_update_chw = mtw_update_chw; 659 ic->ic_ampdu_enable = mtw_ampdu_enable; 660 661 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, 662 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT, 663 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 664 MTW_RX_RADIOTAP_PRESENT); 665 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc); 666 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc); 667 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0); 668 669 if (bootverbose) 670 ieee80211_announce(ic); 671 672 return (0); 673 674 detach: 675 MTW_UNLOCK(sc); 676 mtw_detach(self); 677 return (ENXIO); 678 } 679 680 static void 681 mtw_drain_mbufq(struct mtw_softc *sc) 682 { 683 struct mbuf *m; 684 struct ieee80211_node *ni; 685 686 MTW_LOCK_ASSERT(sc, MA_OWNED); 687 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 688 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 689 m->m_pkthdr.rcvif = NULL; 690 ieee80211_free_node(ni); 691 m_freem(m); 692 } 693 } 694 695 static int 696 mtw_detach(device_t self) 697 { 698 struct mtw_softc *sc = device_get_softc(self); 699 struct ieee80211com *ic = &sc->sc_ic; 700 int i; 701 MTW_LOCK(sc); 702 mtw_reset(sc); 703 DELAY(10000); 704 sc->sc_detached = 1; 705 MTW_UNLOCK(sc); 706 707 708 /* stop all USB transfers */ 709 for (i = 0; i < MTW_N_XFER; i++) 710 usbd_transfer_drain(sc->sc_xfer[i]); 711 712 MTW_LOCK(sc); 713 sc->ratectl_run = MTW_RATECTL_OFF; 714 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT; 715 716 /* free TX list, if any */ 717 if (ic->ic_nrunning > 0) 718 for (i = 0; i < MTW_EP_QUEUES; i++) 719 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]); 720 721 /* Free TX queue */ 722 mtw_drain_mbufq(sc); 723 MTW_UNLOCK(sc); 724 if (sc->sc_ic.ic_softc == sc) { 725 /* drain tasks */ 726 usb_callout_drain(&sc->ratectl_ch); 727 ieee80211_draintask(ic, &sc->cmdq_task); 728 ieee80211_draintask(ic, &sc->ratectl_task); 729 ieee80211_ifdetach(ic); 730 } 731 for (i = 0; i < 4; i++) { 732 free(sc->txd_fw[i], M_USBDEV); 733 } 734 firmware_unregister("/mediatek/mt7601u"); 735 mtx_destroy(&sc->sc_mtx); 736 737 return (0); 738 } 739 740 static struct ieee80211vap * 741 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 742 enum ieee80211_opmode opmode, int flags, 743 const uint8_t bssid[IEEE80211_ADDR_LEN], 744 const uint8_t mac[IEEE80211_ADDR_LEN]) 745 { 746 struct mtw_softc *sc = ic->ic_softc; 747 struct mtw_vap *rvp; 748 struct ieee80211vap *vap; 749 int i; 750 751 if (sc->rvp_cnt >= MTW_VAP_MAX) { 752 device_printf(sc->sc_dev, "number of VAPs maxed out\n"); 753 return (NULL); 754 } 755 756 switch (opmode) { 757 case IEEE80211_M_STA: 758 /* enable s/w bmiss handling for sta mode */ 759 flags |= IEEE80211_CLONE_NOBEACONS; 760 /* fall though */ 761 case IEEE80211_M_IBSS: 762 case IEEE80211_M_MONITOR: 763 case IEEE80211_M_HOSTAP: 764 case IEEE80211_M_MBSS: 765 /* other than WDS vaps, only one at a time */ 766 if (!TAILQ_EMPTY(&ic->ic_vaps)) 767 return (NULL); 768 break; 769 case IEEE80211_M_WDS: 770 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 771 if (vap->iv_opmode != IEEE80211_M_HOSTAP) 772 continue; 773 /* WDS vap's always share the local mac address. */ 774 flags &= ~IEEE80211_CLONE_BSSID; 775 break; 776 } 777 if (vap == NULL) { 778 device_printf(sc->sc_dev, 779 "wds only supported in ap mode\n"); 780 return (NULL); 781 } 782 break; 783 default: 784 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 785 return (NULL); 786 } 787 788 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO); 789 vap = &rvp->vap; 790 791 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) != 792 0) { 793 /* out of memory */ 794 free(rvp, M_80211_VAP); 795 return (NULL); 796 } 797 798 vap->iv_update_beacon = mtw_update_beacon; 799 vap->iv_max_aid = MTW_WCID_MAX; 800 801 /* 802 * The linux rt2800 driver limits 1 stream devices to a 32KB 803 * RX AMPDU. 804 */ 805 if (ic->ic_rxstream > 1) 806 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 807 else 808 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 809 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */ 810 811 /* 812 * To delete the right key from h/w, we need wcid. 813 * Luckily, there is unused space in ieee80211_key{}, wk_pad, 814 * and matching wcid will be written into there. So, cast 815 * some spells to remove 'const' from ieee80211_key{} 816 */ 817 vap->iv_key_delete = (void *)mtw_key_delete; 818 vap->iv_key_set = (void *)mtw_key_set; 819 820 // override state transition machine 821 rvp->newstate = vap->iv_newstate; 822 vap->iv_newstate = mtw_newstate; 823 if (opmode == IEEE80211_M_IBSS) { 824 rvp->recv_mgmt = vap->iv_recv_mgmt; 825 vap->iv_recv_mgmt = mtw_recv_mgmt; 826 } 827 828 ieee80211_ratectl_init(vap); 829 ieee80211_ratectl_setinterval(vap, 1000); // 1 second 830 831 /* complete setup */ 832 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status, 833 mac); 834 835 /* make sure id is always unique */ 836 for (i = 0; i < MTW_VAP_MAX; i++) { 837 if ((sc->rvp_bmap & 1 << i) == 0) { 838 sc->rvp_bmap |= 1 << i; 839 rvp->rvp_id = i; 840 break; 841 } 842 } 843 if (sc->rvp_cnt++ == 0) 844 ic->ic_opmode = opmode; 845 846 if (opmode == IEEE80211_M_HOSTAP) 847 sc->cmdq_run = MTW_CMDQ_GO; 848 849 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n", 850 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt); 851 852 return (vap); 853 } 854 855 static void 856 mtw_vap_delete(struct ieee80211vap *vap) 857 { 858 struct mtw_vap *rvp = MTW_VAP(vap); 859 struct ieee80211com *ic; 860 struct mtw_softc *sc; 861 uint8_t rvp_id; 862 863 if (vap == NULL) 864 return; 865 866 ic = vap->iv_ic; 867 sc = ic->ic_softc; 868 869 MTW_LOCK(sc); 870 m_freem(rvp->beacon_mbuf); 871 rvp->beacon_mbuf = NULL; 872 873 rvp_id = rvp->rvp_id; 874 sc->ratectl_run &= ~(1 << rvp_id); 875 sc->rvp_bmap &= ~(1 << rvp_id); 876 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256); 877 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512); 878 --sc->rvp_cnt; 879 880 MTW_DPRINTF(sc, MTW_DEBUG_STATE, 881 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap, 882 sc->rvp_cnt); 883 884 MTW_UNLOCK(sc); 885 886 ieee80211_ratectl_deinit(vap); 887 ieee80211_vap_detach(vap); 888 free(rvp, M_80211_VAP); 889 } 890 891 /* 892 * There are numbers of functions need to be called in context thread. 893 * Rather than creating taskqueue event for each of those functions, 894 * here is all-for-one taskqueue callback function. This function 895 * guarantees deferred functions are executed in the same order they 896 * were enqueued. 897 * '& MTW_CMDQ_MASQ' is to loop cmdq[]. 898 */ 899 static void 900 mtw_cmdq_cb(void *arg, int pending) 901 { 902 struct mtw_softc *sc = arg; 903 uint8_t i; 904 /* call cmdq[].func locked */ 905 MTW_LOCK(sc); 906 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending; 907 i = sc->cmdq_exec, pending--) { 908 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i, 909 pending); 910 if (sc->cmdq_run == MTW_CMDQ_GO) { 911 /* 912 * If arg0 is NULL, callback func needs more 913 * than one arg. So, pass ptr to cmdq struct. 914 */ 915 if (sc->cmdq[i].arg0) 916 sc->cmdq[i].func(sc->cmdq[i].arg0); 917 else 918 sc->cmdq[i].func(&sc->cmdq[i]); 919 } 920 sc->cmdq[i].arg0 = NULL; 921 sc->cmdq[i].func = NULL; 922 sc->cmdq_exec++; 923 sc->cmdq_exec &= MTW_CMDQ_MASQ; 924 } 925 MTW_UNLOCK(sc); 926 } 927 928 static void 929 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq) 930 { 931 struct mtw_tx_data *data; 932 933 memset(pq, 0, sizeof(*pq)); 934 935 STAILQ_INIT(&pq->tx_qh); 936 STAILQ_INIT(&pq->tx_fh); 937 938 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT]; 939 data++) { 940 data->sc = sc; 941 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); 942 } 943 pq->tx_nfree = MTW_TX_RING_COUNT; 944 } 945 946 static void 947 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq) 948 { 949 struct mtw_tx_data *data; 950 /* make sure any subsequent use of the queues will fail */ 951 pq->tx_nfree = 0; 952 953 STAILQ_INIT(&pq->tx_fh); 954 STAILQ_INIT(&pq->tx_qh); 955 956 /* free up all node references and mbufs */ 957 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT]; 958 data++) { 959 if (data->m != NULL) { 960 m_freem(data->m); 961 data->m = NULL; 962 } 963 if (data->ni != NULL) { 964 ieee80211_free_node(data->ni); 965 data->ni = NULL; 966 } 967 } 968 } 969 970 static int 971 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len) 972 { 973 usb_device_request_t req; 974 uint16_t actlen; 975 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 976 req.bRequest = MTW_RESET; 977 USETW(req.wValue, 0x12); 978 USETW(req.wIndex, 0); 979 USETW(req.wLength, len); 980 981 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf, 982 0, &actlen, 1000); 983 984 return (error); 985 } 986 987 static int 988 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val) 989 { 990 usb_device_request_t req; 991 int error; 992 993 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 994 req.bRequest = MTW_WRITE_CFG; 995 USETW(req.wValue, 0); 996 USETW(req.wIndex, reg); 997 USETW(req.wLength, 4); 998 val = htole32(val); 999 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val); 1000 return (error); 1001 } 1002 1003 static int 1004 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val) 1005 { 1006 // if (sc->asic_ver == 0x7612) 1007 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val); 1008 // else 1009 return (mtw_write(sc, MTW_USB_DMA_CFG, val)); 1010 } 1011 1012 static void 1013 mtw_ucode_setup(struct mtw_softc *sc) 1014 { 1015 1016 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN)); 1017 mtw_write(sc, MTW_FCE_PSE_CTRL, 1); 1018 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230); 1019 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1); 1020 mtw_write(sc, MTW_MCU_FW_IDX, 1); 1021 mtw_write(sc, MTW_FCE_PDMA, 0x44); 1022 mtw_write(sc, MTW_FCE_SKIP_FS, 3); 1023 } 1024 static int 1025 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb, 1026 int32_t len, uint32_t offset) 1027 { 1028 1029 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev); 1030 #if 0 // firmware not tested 1031 1032 if (sc->asic_ver == 0x7612 && offset >= 0x90000) 1033 blksz = 0x800; /* MT7612 ROM Patch */ 1034 1035 xfer = usbd_alloc_xfer(sc->sc_udev); 1036 if (xfer == NULL) { 1037 error = ENOMEM; 1038 goto fail; 1039 } 1040 buf = usbd_alloc_buffer(xfer, blksz + 12); 1041 if (buf == NULL) { 1042 error = ENOMEM; 1043 goto fail; 1044 } 1045 #endif 1046 1047 1048 1049 int mlen; 1050 int idx = 0; 1051 1052 mlen = 0x2c44; 1053 1054 while (len > 0) { 1055 1056 if (len < 0x2c44 && len > 0) { 1057 mlen = len; 1058 } 1059 1060 sc->txd_fw[idx]->len = htole16(mlen); 1061 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU); 1062 1063 memcpy(&sc->txd_fw[idx]->fw, fw, mlen); 1064 // memcpy(&txd[1], fw, mlen); 1065 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD); 1066 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset 1067 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16)); 1068 1069 // sc->sc_fw_data[idx]->len=htole16(mlen); 1070 1071 // memcpy(tmpbuf,fw,mlen); 1072 // memset(tmpbuf+mlen,0,MTW_DMA_PAD); 1073 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen); 1074 1075 fw += mlen; 1076 len -= mlen; 1077 // sent+=mlen; 1078 idx++; 1079 } 1080 sc->sc_sent = 0; 1081 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN); 1082 1083 usbd_transfer_start(sc->sc_xfer[7]); 1084 1085 return (0); 1086 } 1087 1088 static void 1089 mtw_load_microcode(void *arg) 1090 { 1091 1092 struct mtw_softc *sc = (struct mtw_softc *)arg; 1093 const struct mtw_ucode_hdr *hdr; 1094 // onst struct mtw_ucode *fw = NULL; 1095 const char *fwname; 1096 size_t size; 1097 int error = 0; 1098 uint32_t tmp, iofs = 0x40; 1099 // int ntries; 1100 int dlen, ilen; 1101 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver); 1102 /* is firmware already running? */ 1103 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp); 1104 if (tmp == MTW_MCU_READY) { 1105 return; 1106 } 1107 if (sc->asic_ver == 0x7612) { 1108 fwname = "mtw-mt7662u_rom_patch"; 1109 1110 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN); 1111 if (firmware == NULL) { 1112 device_printf(sc->sc_dev, 1113 "failed loadfirmware of file %s (error %d)\n", 1114 fwname, error); 1115 return; 1116 } 1117 size = firmware->datasize; 1118 1119 const struct mtw_ucode *fw = (const struct mtw_ucode *) 1120 firmware->data; 1121 hdr = (const struct mtw_ucode_hdr *)&fw->hdr; 1122 // memcpy(fw,(const unsigned char*)firmware->data + 1123 // 0x1e,size-0x1e); 1124 ilen = size - 0x1e; 1125 1126 mtw_ucode_setup(sc); 1127 1128 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen, 1129 0x90000)) != 0) { 1130 goto fail; 1131 } 1132 mtw_usb_dma_write(sc, 0x00e41814); 1133 } 1134 1135 fwname = "/mediatek/mt7601u.bin"; 1136 iofs = 0x40; 1137 // dofs = 0; 1138 if (sc->asic_ver == 0x7612) { 1139 fwname = "mtw-mt7662u"; 1140 iofs = 0x80040; 1141 // dofs = 0x110800; 1142 } else if (sc->asic_ver == 0x7610) { 1143 fwname = "mt7610u"; 1144 // dofs = 0x80000; 1145 } 1146 MTW_UNLOCK(sc); 1147 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN); 1148 1149 if (firmware == NULL) { 1150 device_printf(sc->sc_dev, 1151 "failed loadfirmware of file %s (error %d)\n", fwname, 1152 error); 1153 MTW_LOCK(sc); 1154 return; 1155 } 1156 MTW_LOCK(sc); 1157 size = firmware->datasize; 1158 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size); 1159 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data; 1160 1161 if (size < sizeof(struct mtw_ucode_hdr)) { 1162 device_printf(sc->sc_dev, "firmware header too short\n"); 1163 goto fail; 1164 } 1165 1166 hdr = (const struct mtw_ucode_hdr *)&fw->hdr; 1167 1168 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) + 1169 le32toh(hdr->dlm_len)) { 1170 device_printf(sc->sc_dev, "firmware payload too short\n"); 1171 goto fail; 1172 } 1173 1174 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN; 1175 dlen = le32toh(hdr->dlm_len); 1176 1177 if (ilen > size || dlen > size) { 1178 device_printf(sc->sc_dev, "firmware payload too large\n"); 1179 goto fail; 1180 } 1181 1182 mtw_write(sc, MTW_FCE_PDMA, 0); 1183 mtw_write(sc, MTW_FCE_PSE_CTRL, 0); 1184 mtw_ucode_setup(sc); 1185 1186 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0) 1187 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n", 1188 error); 1189 1190 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n", 1191 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time); 1192 1193 return; 1194 fail: 1195 return; 1196 } 1197 static usb_error_t 1198 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data) 1199 { 1200 usb_error_t err; 1201 int ntries = 5; 1202 1203 MTW_LOCK_ASSERT(sc, MA_OWNED); 1204 1205 while (ntries--) { 1206 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 1207 0, NULL, 2000); // ms seconds 1208 if (err == 0) 1209 break; 1210 MTW_DPRINTF(sc, MTW_DEBUG_USB, 1211 "Control request failed, %s (retrying)\n", 1212 usbd_errstr(err)); 1213 mtw_delay(sc, 10); 1214 } 1215 return (err); 1216 } 1217 1218 static int 1219 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val) 1220 { 1221 uint32_t tmp; 1222 int error; 1223 1224 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp); 1225 if (error == 0) 1226 *val = le32toh(tmp); 1227 else 1228 *val = 0xffffffff; 1229 return (error); 1230 } 1231 1232 static int 1233 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len) 1234 { 1235 usb_device_request_t req; 1236 1237 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1238 req.bRequest = MTW_READ_REGION_1; 1239 USETW(req.wValue, 0); 1240 USETW(req.wIndex, reg); 1241 USETW(req.wLength, len); 1242 1243 return (mtw_do_request(sc, &req, buf)); 1244 } 1245 1246 static int 1247 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val) 1248 { 1249 1250 usb_device_request_t req; 1251 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1252 req.bRequest = MTW_WRITE_2; 1253 USETW(req.wValue, val); 1254 USETW(req.wIndex, reg); 1255 USETW(req.wLength, 0); 1256 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL)); 1257 } 1258 1259 static int 1260 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val) 1261 { 1262 1263 int error; 1264 1265 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) { 1266 1267 error = mtw_write_2(sc, reg + 2, val >> 16); 1268 } 1269 1270 return (error); 1271 } 1272 1273 static int 1274 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len) 1275 { 1276 1277 usb_device_request_t req; 1278 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1279 req.bRequest = MTW_WRITE_REGION_1; 1280 USETW(req.wValue, 0); 1281 USETW(req.wIndex, reg); 1282 USETW(req.wLength, len); 1283 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf)); 1284 } 1285 1286 static int 1287 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count) 1288 { 1289 int i, error = 0; 1290 1291 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n")); 1292 for (i = 0; i < count && error == 0; i += 4) 1293 error = mtw_write(sc, reg + i, val); 1294 return (error); 1295 } 1296 1297 static int 1298 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val) 1299 { 1300 1301 uint32_t tmp; 1302 uint16_t reg; 1303 int error, ntries; 1304 1305 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0) 1306 return (error); 1307 1308 addr *= 2; 1309 /* 1310 * Read one 16-byte block into registers EFUSE_DATA[0-3]: 1311 * DATA0: 3 2 1 0 1312 * DATA1: 7 6 5 4 1313 * DATA2: B A 9 8 1314 * DATA3: F E D C 1315 */ 1316 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK); 1317 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK; 1318 mtw_write(sc, MTW_EFUSE_CTRL, tmp); 1319 for (ntries = 0; ntries < 100; ntries++) { 1320 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0) 1321 return (error); 1322 if (!(tmp & MTW_EFSROM_KICK)) 1323 break; 1324 DELAY(2); 1325 } 1326 if (ntries == 100) 1327 return (ETIMEDOUT); 1328 1329 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) { 1330 *val = 0xffff; // address not found 1331 return (0); 1332 } 1333 // determine to which 32-bit register our 16-bit word belongs 1334 reg = MTW_EFUSE_DATA0 + (addr & 0xc); 1335 if ((error = mtw_read(sc, reg, &tmp)) != 0) 1336 return (error); 1337 1338 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff; 1339 return (0); 1340 } 1341 1342 static __inline int 1343 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val) 1344 { 1345 /* either eFUSE ROM or EEPROM */ 1346 return (sc->sc_srom_read(sc, addr, val)); 1347 } 1348 1349 static int 1350 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val) 1351 { 1352 uint32_t tmp; 1353 int ntries, error; 1354 1355 for (ntries = 0; ntries < 10; ntries++) { 1356 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1357 return (error); 1358 if (!(tmp & MTW_BBP_CSR_KICK)) 1359 break; 1360 } 1361 if (ntries == 10) 1362 return (ETIMEDOUT); 1363 1364 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8; 1365 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0) 1366 return (error); 1367 1368 for (ntries = 0; ntries < 10; ntries++) { 1369 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1370 return (error); 1371 if (!(tmp & MTW_BBP_CSR_KICK)) 1372 break; 1373 } 1374 if (ntries == 10) 1375 return (ETIMEDOUT); 1376 1377 *val = tmp & 0xff; 1378 return (0); 1379 } 1380 1381 static int 1382 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val) 1383 { 1384 uint32_t tmp; 1385 int ntries, error; 1386 1387 for (ntries = 0; ntries < 10; ntries++) { 1388 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0) 1389 return (error); 1390 if (!(tmp & MTW_BBP_CSR_KICK)) 1391 break; 1392 } 1393 if (ntries == 10) 1394 return (ETIMEDOUT); 1395 1396 tmp = MTW_BBP_CSR_KICK | reg << 8 | val; 1397 return (mtw_write(sc, MTW_BBP_CSR, tmp)); 1398 } 1399 1400 static int 1401 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len) 1402 { 1403 sc->sc_idx = 0; 1404 sc->txd_fw[sc->sc_idx]->len = htole16( 1405 len + 8); 1406 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU | 1407 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf)); 1408 1409 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004); 1410 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len); 1411 usbd_transfer_start(sc->sc_xfer[7]); 1412 return (0); 1413 } 1414 1415 /* 1416 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. 1417 * Used to adjust per-rate Tx power registers. 1418 */ 1419 static __inline uint32_t 1420 b4inc(uint32_t b32, int8_t delta) 1421 { 1422 int8_t i, b4; 1423 1424 for (i = 0; i < 8; i++) { 1425 b4 = b32 & 0xf; 1426 b4 += delta; 1427 if (b4 < 0) 1428 b4 = 0; 1429 else if (b4 > 0xf) 1430 b4 = 0xf; 1431 b32 = b32 >> 4 | b4 << 28; 1432 } 1433 return (b32); 1434 } 1435 static void 1436 mtw_get_txpower(struct mtw_softc *sc) 1437 { 1438 uint16_t val; 1439 int i; 1440 1441 /* Read power settings for 2GHz channels. */ 1442 for (i = 0; i < 14; i += 2) { 1443 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val); 1444 sc->txpow1[i + 0] = (int8_t)(val & 0xff); 1445 sc->txpow1[i + 1] = (int8_t)(val >> 8); 1446 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val); 1447 sc->txpow2[i + 0] = (int8_t)(val & 0xff); 1448 sc->txpow2[i + 1] = (int8_t)(val >> 8); 1449 } 1450 /* Fix broken Tx power entries. */ 1451 for (i = 0; i < 14; i++) { 1452 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27) 1453 sc->txpow1[i] = 5; 1454 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27) 1455 sc->txpow2[i] = 5; 1456 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR, 1457 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan, 1458 sc->txpow1[i], sc->txpow2[i]); 1459 } 1460 } 1461 1462 struct ieee80211_node * 1463 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1464 { 1465 return (malloc(sizeof(struct mtw_node), M_80211_NODE, 1466 M_NOWAIT | M_ZERO)); 1467 } 1468 static int 1469 mtw_read_eeprom(struct mtw_softc *sc) 1470 { 1471 struct ieee80211com *ic = &sc->sc_ic; 1472 int8_t delta_2ghz, delta_5ghz; 1473 uint16_t val; 1474 int ridx, ant; 1475 1476 sc->sc_srom_read = mtw_efuse_read_2; 1477 1478 /* read RF information */ 1479 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val); 1480 sc->rf_rev = val; 1481 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val); 1482 sc->ntxchains = (val >> 4) & 0xf; 1483 sc->nrxchains = val & 0xf; 1484 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n", 1485 sc->rf_rev, sc->ntxchains, sc->nrxchains); 1486 1487 /* read ROM version */ 1488 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val); 1489 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff, 1490 val >> 8); 1491 1492 /* read MAC address */ 1493 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val); 1494 ic->ic_macaddr[0] = val & 0xff; 1495 ic->ic_macaddr[1] = val >> 8; 1496 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val); 1497 ic->ic_macaddr[2] = val & 0xff; 1498 ic->ic_macaddr[3] = val >> 8; 1499 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val); 1500 ic->ic_macaddr[4] = val & 0xff; 1501 ic->ic_macaddr[5] = val >> 8; 1502 #if 0 1503 printf("eFUSE ROM\n00: "); 1504 for (int i = 0; i < 256; i++) { 1505 if (((i % 8) == 0) && i > 0) 1506 printf("\n%02x: ", i); 1507 mtw_srom_read(sc, i, &val); 1508 printf(" %04x", val); 1509 } 1510 printf("\n"); 1511 #endif 1512 /* check if RF supports automatic Tx access gain control */ 1513 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val); 1514 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val); 1515 if ((val & 0xff) != 0xff) { 1516 sc->ext_5ghz_lna = (val >> 3) & 1; 1517 sc->ext_2ghz_lna = (val >> 2) & 1; 1518 /* check if RF supports automatic Tx access gain control */ 1519 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1; 1520 /* check if we have a hardware radio switch */ 1521 sc->rfswitch = val & 1; 1522 } 1523 1524 /* read RF frequency offset from EEPROM */ 1525 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val); 1526 if ((val & 0xff) != 0xff) 1527 sc->rf_freq_offset = val; 1528 else 1529 sc->rf_freq_offset = 0; 1530 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n", 1531 sc->rf_freq_offset); 1532 1533 /* Read Tx power settings. */ 1534 mtw_get_txpower(sc); 1535 1536 /* read Tx power compensation for each Tx rate */ 1537 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val); 1538 delta_2ghz = delta_5ghz = 0; 1539 if ((val & 0xff) != 0xff && (val & 0x80)) { 1540 delta_2ghz = val & 0xf; 1541 if (!(val & 0x40)) /* negative number */ 1542 delta_2ghz = -delta_2ghz; 1543 } 1544 val >>= 8; 1545 if ((val & 0xff) != 0xff && (val & 0x80)) { 1546 delta_5ghz = val & 0xf; 1547 if (!(val & 0x40)) /* negative number */ 1548 delta_5ghz = -delta_5ghz; 1549 } 1550 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR, 1551 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz, 1552 delta_5ghz); 1553 1554 for (ridx = 0; ridx < 5; ridx++) { 1555 uint32_t reg; 1556 1557 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val); 1558 reg = val; 1559 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val); 1560 reg |= (uint32_t)val << 16; 1561 1562 sc->txpow20mhz[ridx] = reg; 1563 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); 1564 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); 1565 1566 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR, 1567 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " 1568 "40MHz/5GHz=0x%08x\n", 1569 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx], 1570 sc->txpow40mhz_5ghz[ridx]); 1571 } 1572 1573 /* read RSSI offsets and LNA gains from EEPROM */ 1574 val = 0; 1575 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val); 1576 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ 1577 sc->rssi_2ghz[1] = val >> 8; /* Ant B */ 1578 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val); 1579 /* 1580 * On RT3070 chips (limited to 2 Rx chains), this ROM 1581 * field contains the Tx mixer gain for the 2GHz band. 1582 */ 1583 if ((val & 0xff) != 0xff) 1584 sc->txmixgain_2ghz = val & 0x7; 1585 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n", 1586 sc->txmixgain_2ghz); 1587 sc->lna[2] = val >> 8; /* channel group 2 */ 1588 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val); 1589 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ 1590 sc->rssi_5ghz[1] = val >> 8; /* Ant B */ 1591 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val); 1592 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ 1593 1594 sc->lna[3] = val >> 8; /* channel group 3 */ 1595 1596 mtw_srom_read(sc, MTW_EEPROM_LNA, &val); 1597 sc->lna[0] = val & 0xff; /* channel group 0 */ 1598 sc->lna[1] = val >> 8; /* channel group 1 */ 1599 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]); 1600 1601 /* fix broken 5GHz LNA entries */ 1602 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { 1603 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1604 "invalid LNA for channel group %d\n", 2); 1605 sc->lna[2] = sc->lna[1]; 1606 } 1607 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { 1608 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1609 "invalid LNA for channel group %d\n", 3); 1610 sc->lna[3] = sc->lna[1]; 1611 } 1612 1613 /* fix broken RSSI offset entries */ 1614 for (ant = 0; ant < 3; ant++) { 1615 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { 1616 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1617 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1, 1618 sc->rssi_2ghz[ant]); 1619 sc->rssi_2ghz[ant] = 0; 1620 } 1621 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { 1622 MTW_DPRINTF(sc, MTW_DEBUG_ROM, 1623 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1, 1624 sc->rssi_5ghz[ant]); 1625 sc->rssi_5ghz[ant] = 0; 1626 } 1627 } 1628 return (0); 1629 } 1630 static int 1631 mtw_media_change(if_t ifp) 1632 { 1633 struct ieee80211vap *vap = if_getsoftc(ifp); 1634 struct ieee80211com *ic = vap->iv_ic; 1635 const struct ieee80211_txparam *tp; 1636 struct mtw_softc *sc = ic->ic_softc; 1637 uint8_t rate, ridx; 1638 1639 MTW_LOCK(sc); 1640 ieee80211_media_change(ifp); 1641 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 1642 tp = &vap->iv_txparms[ic->ic_curmode]; 1643 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 1644 struct ieee80211_node *ni; 1645 struct mtw_node *rn; 1646 /* XXX TODO: methodize with MCS rates */ 1647 rate = 1648 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] & 1649 IEEE80211_RATE_VAL; 1650 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) { 1651 if (rt2860_rates[ridx].rate == rate) 1652 break; 1653 } 1654 ni = ieee80211_ref_node(vap->iv_bss); 1655 rn = MTW_NODE(ni); 1656 rn->fix_ridx = ridx; 1657 1658 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate, 1659 rn->fix_ridx); 1660 ieee80211_free_node(ni); 1661 } 1662 MTW_UNLOCK(sc); 1663 1664 return (0); 1665 } 1666 1667 void 1668 mtw_set_leds(struct mtw_softc *sc, uint16_t which) 1669 { 1670 struct mtw_mcu_cmd_8 cmd; 1671 cmd.func = htole32(0x1); 1672 cmd.val = htole32(which); 1673 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8)); 1674 } 1675 static void 1676 mtw_abort_tsf_sync(struct mtw_softc *sc) 1677 { 1678 uint32_t tmp; 1679 1680 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp); 1681 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN); 1682 mtw_write(sc, MTW_BCN_TIME_CFG, tmp); 1683 } 1684 static int 1685 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1686 { 1687 const struct ieee80211_txparam *tp; 1688 struct ieee80211com *ic = vap->iv_ic; 1689 struct mtw_softc *sc = ic->ic_softc; 1690 struct mtw_vap *rvp = MTW_VAP(vap); 1691 enum ieee80211_state ostate; 1692 uint32_t sta[3]; 1693 uint8_t ratectl = 0; 1694 uint8_t restart_ratectl = 0; 1695 uint8_t bid = 1 << rvp->rvp_id; 1696 1697 1698 ostate = vap->iv_state; 1699 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n", 1700 ieee80211_state_name[ostate], ieee80211_state_name[nstate]); 1701 IEEE80211_UNLOCK(ic); 1702 MTW_LOCK(sc); 1703 ratectl = sc->ratectl_run; /* remember current state */ 1704 usb_callout_stop(&sc->ratectl_ch); 1705 sc->ratectl_run = MTW_RATECTL_OFF; 1706 if (ostate == IEEE80211_S_RUN) { 1707 /* turn link LED off */ 1708 } 1709 1710 switch (nstate) { 1711 case IEEE80211_S_INIT: 1712 restart_ratectl = 1; 1713 if (ostate != IEEE80211_S_RUN) 1714 break; 1715 1716 ratectl &= ~bid; 1717 sc->runbmap &= ~bid; 1718 1719 /* abort TSF synchronization if there is no vap running */ 1720 if (--sc->running == 0) 1721 mtw_abort_tsf_sync(sc); 1722 break; 1723 1724 case IEEE80211_S_RUN: 1725 if (!(sc->runbmap & bid)) { 1726 if (sc->running++) 1727 restart_ratectl = 1; 1728 sc->runbmap |= bid; 1729 } 1730 1731 m_freem(rvp->beacon_mbuf); 1732 rvp->beacon_mbuf = NULL; 1733 1734 switch (vap->iv_opmode) { 1735 case IEEE80211_M_HOSTAP: 1736 case IEEE80211_M_MBSS: 1737 sc->ap_running |= bid; 1738 ic->ic_opmode = vap->iv_opmode; 1739 mtw_update_beacon_cb(vap); 1740 break; 1741 case IEEE80211_M_IBSS: 1742 sc->adhoc_running |= bid; 1743 if (!sc->ap_running) 1744 ic->ic_opmode = vap->iv_opmode; 1745 mtw_update_beacon_cb(vap); 1746 break; 1747 case IEEE80211_M_STA: 1748 sc->sta_running |= bid; 1749 if (!sc->ap_running && !sc->adhoc_running) 1750 ic->ic_opmode = vap->iv_opmode; 1751 1752 /* read statistic counters (clear on read) */ 1753 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta, 1754 sizeof sta); 1755 1756 break; 1757 default: 1758 ic->ic_opmode = vap->iv_opmode; 1759 break; 1760 } 1761 1762 if (vap->iv_opmode != IEEE80211_M_MONITOR) { 1763 struct ieee80211_node *ni; 1764 1765 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { 1766 MTW_UNLOCK(sc); 1767 IEEE80211_LOCK(ic); 1768 return (-1); 1769 } 1770 mtw_updateslot(ic); 1771 mtw_enable_mrr(sc); 1772 mtw_set_txpreamble(sc); 1773 mtw_set_basicrates(sc); 1774 ni = ieee80211_ref_node(vap->iv_bss); 1775 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); 1776 mtw_set_bssid(sc, sc->sc_bssid); 1777 ieee80211_free_node(ni); 1778 mtw_enable_tsf_sync(sc); 1779 1780 /* enable automatic rate adaptation */ 1781 tp = &vap->iv_txparms[ieee80211_chan2mode( 1782 ic->ic_curchan)]; 1783 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) 1784 ratectl |= bid; 1785 } else { 1786 mtw_enable_tsf_sync(sc); 1787 } 1788 1789 break; 1790 default: 1791 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n"); 1792 break; 1793 } 1794 1795 /* restart amrr for running VAPs */ 1796 if ((sc->ratectl_run = ratectl) && restart_ratectl) { 1797 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc); 1798 } 1799 MTW_UNLOCK(sc); 1800 IEEE80211_LOCK(ic); 1801 return (rvp->newstate(vap, nstate, arg)); 1802 } 1803 1804 static int 1805 mtw_wme_update(struct ieee80211com *ic) 1806 { 1807 struct chanAccParams chp; 1808 struct mtw_softc *sc = ic->ic_softc; 1809 const struct wmeParams *ac; 1810 int aci, error = 0; 1811 ieee80211_wme_ic_getparams(ic, &chp); 1812 ac = chp.cap_wmeParams; 1813 1814 MTW_LOCK(sc); 1815 /* update MAC TX configuration registers */ 1816 for (aci = 0; aci < WME_NUM_AC; aci++) { 1817 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci), 1818 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 | 1819 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit); 1820 if (error) 1821 goto err; 1822 } 1823 1824 /* update SCH/DMA registers too */ 1825 error = mtw_write(sc, MTW_WMM_AIFSN_CFG, 1826 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 | 1827 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn); 1828 if (error) 1829 goto err; 1830 error = mtw_write(sc, MTW_WMM_CWMIN_CFG, 1831 ac[WME_AC_VO].wmep_logcwmin << 12 | 1832 ac[WME_AC_VI].wmep_logcwmin << 8 | 1833 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin); 1834 if (error) 1835 goto err; 1836 error = mtw_write(sc, MTW_WMM_CWMAX_CFG, 1837 ac[WME_AC_VO].wmep_logcwmax << 12 | 1838 ac[WME_AC_VI].wmep_logcwmax << 8 | 1839 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax); 1840 if (error) 1841 goto err; 1842 error = mtw_write(sc, MTW_WMM_TXOP0_CFG, 1843 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit); 1844 if (error) 1845 goto err; 1846 error = mtw_write(sc, MTW_WMM_TXOP1_CFG, 1847 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit); 1848 1849 err: 1850 MTW_UNLOCK(sc); 1851 if (error) 1852 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n"); 1853 1854 return (error); 1855 } 1856 1857 static int 1858 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k) 1859 { 1860 struct ieee80211com *ic = vap->iv_ic; 1861 struct mtw_softc *sc = ic->ic_softc; 1862 uint32_t i; 1863 1864 i = MTW_CMDQ_GET(&sc->cmdq_store); 1865 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i); 1866 sc->cmdq[i].func = mtw_key_set_cb; 1867 sc->cmdq[i].arg0 = NULL; 1868 sc->cmdq[i].arg1 = vap; 1869 sc->cmdq[i].k = k; 1870 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr); 1871 ieee80211_runtask(ic, &sc->cmdq_task); 1872 1873 /* 1874 * To make sure key will be set when hostapd 1875 * calls iv_key_set() before if_init(). 1876 */ 1877 if (vap->iv_opmode == IEEE80211_M_HOSTAP) { 1878 MTW_LOCK(sc); 1879 sc->cmdq_key_set = MTW_CMDQ_GO; 1880 MTW_UNLOCK(sc); 1881 } 1882 1883 return (1); 1884 } 1885 static void 1886 mtw_key_set_cb(void *arg) 1887 { 1888 struct mtw_cmdq *cmdq = arg; 1889 struct ieee80211vap *vap = cmdq->arg1; 1890 struct ieee80211_key *k = cmdq->k; 1891 struct ieee80211com *ic = vap->iv_ic; 1892 struct mtw_softc *sc = ic->ic_softc; 1893 struct ieee80211_node *ni; 1894 u_int cipher = k->wk_cipher->ic_cipher; 1895 uint32_t attr; 1896 uint16_t base; 1897 uint8_t mode, wcid, iv[8]; 1898 MTW_LOCK_ASSERT(sc, MA_OWNED); 1899 1900 if (vap->iv_opmode == IEEE80211_M_HOSTAP) 1901 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac); 1902 else 1903 ni = vap->iv_bss; 1904 1905 /* map net80211 cipher to RT2860 security mode */ 1906 switch (cipher) { 1907 case IEEE80211_CIPHER_WEP: 1908 if (k->wk_keylen < 8) 1909 mode = MTW_MODE_WEP40; 1910 else 1911 mode = MTW_MODE_WEP104; 1912 break; 1913 case IEEE80211_CIPHER_TKIP: 1914 mode = MTW_MODE_TKIP; 1915 break; 1916 case IEEE80211_CIPHER_AES_CCM: 1917 mode = MTW_MODE_AES_CCMP; 1918 break; 1919 default: 1920 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n"); 1921 return; 1922 } 1923 1924 if (k->wk_flags & IEEE80211_KEY_GROUP) { 1925 wcid = 0; /* NB: update WCID0 for group keys */ 1926 base = MTW_SKEY(0, k->wk_keyix); 1927 } else { 1928 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0; 1929 base = MTW_PKEY(wcid); 1930 } 1931 1932 if (cipher == IEEE80211_CIPHER_TKIP) { 1933 mtw_write_region_1(sc, base, k->wk_key, 16); 1934 mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8); 1935 mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8); 1936 } else { 1937 /* roundup len to 16-bit: XXX fix write_region_1() instead */ 1938 mtw_write_region_1(sc, base, k->wk_key, 1939 (k->wk_keylen + 1) & ~1); 1940 } 1941 1942 if (!(k->wk_flags & IEEE80211_KEY_GROUP) || 1943 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) { 1944 /* set initial packet number in IV+EIV */ 1945 if (cipher == IEEE80211_CIPHER_WEP) { 1946 memset(iv, 0, sizeof iv); 1947 iv[3] = vap->iv_def_txkey << 6; 1948 } else { 1949 if (cipher == IEEE80211_CIPHER_TKIP) { 1950 iv[0] = k->wk_keytsc >> 8; 1951 iv[1] = (iv[0] | 0x20) & 0x7f; 1952 iv[2] = k->wk_keytsc; 1953 } else { //CCMP 1954 iv[0] = k->wk_keytsc; 1955 iv[1] = k->wk_keytsc >> 8; 1956 iv[2] = 0; 1957 } 1958 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV; 1959 iv[4] = k->wk_keytsc >> 16; 1960 iv[5] = k->wk_keytsc >> 24; 1961 iv[6] = k->wk_keytsc >> 32; 1962 iv[7] = k->wk_keytsc >> 40; 1963 } 1964 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8); 1965 } 1966 1967 if (k->wk_flags & IEEE80211_KEY_GROUP) { 1968 /* install group key */ 1969 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr); 1970 attr &= ~(0xf << (k->wk_keyix * 4)); 1971 attr |= mode << (k->wk_keyix * 4); 1972 mtw_write(sc, MTW_SKEY_MODE_0_7, attr); 1973 1974 if (cipher & (IEEE80211_CIPHER_WEP)) { 1975 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr); 1976 attr = (attr & ~0xf) | (mode << 1); 1977 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr); 1978 1979 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4); 1980 1981 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 1982 attr = (attr & ~0xf) | (mode << 1); 1983 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 1984 } 1985 } else { 1986 /* install pairwise key */ 1987 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 1988 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN; 1989 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 1990 } 1991 k->wk_pad = wcid; 1992 } 1993 1994 /* 1995 * If wlan is destroyed without being brought down i.e. without 1996 * wlan down or wpa_cli terminate, this function is called after 1997 * vap is gone. Don't refer it. 1998 */ 1999 static void 2000 mtw_key_delete_cb(void *arg) 2001 { 2002 struct mtw_cmdq *cmdq = arg; 2003 struct mtw_softc *sc = cmdq->arg1; 2004 struct ieee80211_key *k = &cmdq->key; 2005 uint32_t attr; 2006 uint8_t wcid; 2007 2008 MTW_LOCK_ASSERT(sc, MA_OWNED); 2009 2010 if (k->wk_flags & IEEE80211_KEY_GROUP) { 2011 /* remove group key */ 2012 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n"); 2013 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr); 2014 attr &= ~(0xf << (k->wk_keyix * 4)); 2015 mtw_write(sc, MTW_SKEY_MODE_0_7, attr); 2016 } else { 2017 /* remove pairwise key */ 2018 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n", 2019 k->wk_pad); 2020 /* matching wcid was written to wk_pad in mtw_key_set() */ 2021 wcid = k->wk_pad; 2022 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr); 2023 attr &= ~0xf; 2024 mtw_write(sc, MTW_WCID_ATTR(wcid), attr); 2025 } 2026 2027 k->wk_pad = 0; 2028 } 2029 2030 /* 2031 * return 0 on error 2032 */ 2033 static int 2034 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k) 2035 { 2036 struct ieee80211com *ic = vap->iv_ic; 2037 struct mtw_softc *sc = ic->ic_softc; 2038 struct ieee80211_key *k0; 2039 uint32_t i; 2040 if (sc->sc_flags & MTW_RUNNING) 2041 return (1); 2042 2043 /* 2044 * When called back, key might be gone. So, make a copy 2045 * of some values need to delete keys before deferring. 2046 * But, because of LOR with node lock, cannot use lock here. 2047 * So, use atomic instead. 2048 */ 2049 i = MTW_CMDQ_GET(&sc->cmdq_store); 2050 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i); 2051 sc->cmdq[i].func = mtw_key_delete_cb; 2052 sc->cmdq[i].arg0 = NULL; 2053 sc->cmdq[i].arg1 = sc; 2054 k0 = &sc->cmdq[i].key; 2055 k0->wk_flags = k->wk_flags; 2056 k0->wk_keyix = k->wk_keyix; 2057 /* matching wcid was written to wk_pad in mtw_key_set() */ 2058 k0->wk_pad = k->wk_pad; 2059 ieee80211_runtask(ic, &sc->cmdq_task); 2060 return (1); /* return fake success */ 2061 } 2062 2063 static void 2064 mtw_ratectl_to(void *arg) 2065 { 2066 struct mtw_softc *sc = arg; 2067 /* do it in a process context, so it can go sleep */ 2068 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task); 2069 /* next timeout will be rescheduled in the callback task */ 2070 } 2071 2072 /* ARGSUSED */ 2073 static void 2074 mtw_ratectl_cb(void *arg, int pending) 2075 { 2076 2077 struct mtw_softc *sc = arg; 2078 struct ieee80211com *ic = &sc->sc_ic; 2079 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2080 2081 if (vap == NULL) 2082 return; 2083 2084 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc); 2085 2086 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc); 2087 2088 2089 } 2090 2091 static void 2092 mtw_drain_fifo(void *arg) 2093 { 2094 struct mtw_softc *sc = arg; 2095 uint32_t stat; 2096 uint16_t(*wstat)[3]; 2097 uint8_t wcid, mcs, pid; 2098 int8_t retry; 2099 2100 MTW_LOCK_ASSERT(sc, MA_OWNED); 2101 2102 for (;;) { 2103 /* drain Tx status FIFO (maxsize = 16) */ 2104 mtw_read(sc, MTW_TX_STAT_FIFO, &stat); 2105 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat); 2106 if (!(stat & MTW_TXQ_VLD)) 2107 break; 2108 2109 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff; 2110 2111 /* if no ACK was requested, no feedback is available */ 2112 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX || 2113 wcid == 0) 2114 continue; 2115 2116 /* 2117 * Even though each stat is Tx-complete-status like format, 2118 * the device can poll stats. Because there is no guarantee 2119 * that the referring node is still around when read the stats. 2120 * So that, if we use ieee80211_ratectl_tx_update(), we will 2121 * have hard time not to refer already freed node. 2122 * 2123 * To eliminate such page faults, we poll stats in softc. 2124 * Then, update the rates later with 2125 * ieee80211_ratectl_tx_update(). 2126 */ 2127 wstat = &(sc->wcid_stats[wcid]); 2128 (*wstat)[MTW_TXCNT]++; 2129 if (stat & MTW_TXQ_OK) 2130 (*wstat)[MTW_SUCCESS]++; 2131 else 2132 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 2133 /* 2134 * Check if there were retries, ie if the Tx success rate is 2135 * different from the requested rate. Note that it works only 2136 * because we do not allow rate fallback from OFDM to CCK. 2137 */ 2138 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f; 2139 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf; 2140 if ((retry = pid - 1 - mcs) > 0) { 2141 (*wstat)[MTW_TXCNT] += retry; 2142 (*wstat)[MTW_RETRY] += retry; 2143 } 2144 } 2145 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt); 2146 2147 sc->fifo_cnt = 0; 2148 } 2149 2150 static void 2151 mtw_iter_func(void *arg, struct ieee80211_node *ni) 2152 { 2153 struct mtw_softc *sc = arg; 2154 MTW_LOCK(sc); 2155 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; 2156 struct ieee80211vap *vap = ni->ni_vap; 2157 struct mtw_node *rn = MTW_NODE(ni); 2158 uint32_t sta[3]; 2159 uint16_t(*wstat)[3]; 2160 int error, ridx; 2161 uint8_t txrate = 0; 2162 2163 /* Check for special case */ 2164 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA && 2165 ni != vap->iv_bss) 2166 goto fail; 2167 2168 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE | 2169 IEEE80211_RATECTL_TX_STATS_RETRIES; 2170 txs->ni = ni; 2171 if (sc->rvp_cnt <= 1 && 2172 (vap->iv_opmode == IEEE80211_M_IBSS || 2173 vap->iv_opmode == IEEE80211_M_STA)) { 2174 /* 2175 * read statistic counters (clear on read) and update AMRR state 2176 */ 2177 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta, 2178 sizeof sta); 2179 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error); 2180 if (error != 0) 2181 goto fail; 2182 2183 /* count failed TX as errors */ 2184 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 2185 le32toh(sta[0]) & 0xffff); 2186 2187 txs->nretries = (le32toh(sta[1]) >> 16); 2188 txs->nsuccess = (le32toh(sta[1]) & 0xffff); 2189 /* nretries??? */ 2190 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff); 2191 2192 MTW_DPRINTF(sc, MTW_DEBUG_RATE, 2193 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries, 2194 txs->nsuccess, le32toh(sta[0]) & 0xffff); 2195 } else { 2196 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]); 2197 2198 if (wstat == &(sc->wcid_stats[0]) || 2199 wstat > &(sc->wcid_stats[MTW_WCID_MAX])) 2200 goto fail; 2201 2202 txs->nretries = (*wstat)[MTW_RETRY]; 2203 txs->nsuccess = (*wstat)[MTW_SUCCESS]; 2204 txs->nframes = (*wstat)[MTW_TXCNT]; 2205 MTW_DPRINTF(sc, MTW_DEBUG_RATE, 2206 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries, 2207 txs->nframes, txs->nsuccess); 2208 2209 memset(wstat, 0, sizeof(*wstat)); 2210 } 2211 2212 ieee80211_ratectl_tx_update(vap, txs); 2213 ieee80211_ratectl_rate(ni, NULL, 0); 2214 txrate = ieee80211_node_get_txrate_dot11rate(ni); 2215 2216 /* XXX TODO: methodize with MCS rates */ 2217 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) { 2218 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n", 2219 txrate); 2220 if (rt2860_rates[ridx].rate == txrate) { 2221 break; 2222 } 2223 } 2224 rn->amrr_ridx = ridx; 2225 fail: 2226 MTW_UNLOCK(sc); 2227 2228 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n", 2229 txrate, rn->amrr_ridx); 2230 } 2231 2232 static void 2233 mtw_newassoc_cb(void *arg) 2234 { 2235 struct mtw_cmdq *cmdq = arg; 2236 struct ieee80211_node *ni = cmdq->arg1; 2237 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc; 2238 2239 uint8_t wcid = cmdq->wcid; 2240 2241 MTW_LOCK_ASSERT(sc, MA_OWNED); 2242 2243 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr, 2244 IEEE80211_ADDR_LEN); 2245 2246 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid])); 2247 } 2248 2249 static void 2250 mtw_newassoc(struct ieee80211_node *ni, int isnew) 2251 { 2252 2253 struct mtw_node *mn = MTW_NODE(ni); 2254 struct ieee80211vap *vap = ni->ni_vap; 2255 struct ieee80211com *ic = vap->iv_ic; 2256 struct mtw_softc *sc = ic->ic_softc; 2257 2258 uint8_t rate; 2259 uint8_t ridx; 2260 uint8_t wcid; 2261 //int i; 2262 // int i,j; 2263 wcid = MTW_AID2WCID(ni->ni_associd); 2264 2265 if (wcid > MTW_WCID_MAX) { 2266 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid); 2267 return; 2268 } 2269 2270 /* only interested in true associations */ 2271 if (isnew && ni->ni_associd != 0) { 2272 /* 2273 * This function could is called though timeout function. 2274 * Need to deferggxr. 2275 */ 2276 2277 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store); 2278 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt); 2279 sc->cmdq[cnt].func = mtw_newassoc_cb; 2280 sc->cmdq[cnt].arg0 = NULL; 2281 sc->cmdq[cnt].arg1 = ni; 2282 sc->cmdq[cnt].wcid = wcid; 2283 ieee80211_runtask(ic, &sc->cmdq_task); 2284 } 2285 2286 MTW_DPRINTF(sc, MTW_DEBUG_STATE, 2287 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd, 2288 ether_sprintf(ni->ni_macaddr)); 2289 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; 2290 /* XXX TODO: methodize with MCS rates */ 2291 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 2292 if (rt2860_rates[ridx].rate == rate) 2293 break; 2294 mn->mgt_ridx = ridx; 2295 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE, 2296 "rate=%d, ctl_ridx=%d\n", rate, ridx); 2297 MTW_LOCK(sc); 2298 if (sc->ratectl_run != MTW_RATECTL_OFF) { 2299 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc); 2300 } 2301 MTW_UNLOCK(sc); 2302 2303 } 2304 2305 /* 2306 * Return the Rx chain with the highest RSSI for a given frame. 2307 */ 2308 static __inline uint8_t 2309 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi) 2310 { 2311 uint8_t rxchain = 0; 2312 2313 if (sc->nrxchains > 1) { 2314 if (rxwi->rssi[1] > rxwi->rssi[rxchain]) 2315 rxchain = 1; 2316 if (sc->nrxchains > 2) 2317 if (rxwi->rssi[2] > rxwi->rssi[rxchain]) 2318 rxchain = 2; 2319 } 2320 return (rxchain); 2321 } 2322 static void 2323 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf) 2324 { 2325 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf)); 2326 } 2327 2328 static void 2329 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 2330 const struct ieee80211_rx_stats *rxs, int rssi, int nf) 2331 { 2332 struct ieee80211vap *vap = ni->ni_vap; 2333 struct mtw_softc *sc = vap->iv_ic->ic_softc; 2334 struct mtw_vap *rvp = MTW_VAP(vap); 2335 uint64_t ni_tstamp, rx_tstamp; 2336 2337 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); 2338 2339 if (vap->iv_state == IEEE80211_S_RUN && 2340 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 2341 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 2342 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 2343 MTW_LOCK(sc); 2344 mtw_get_tsf(sc, &rx_tstamp); 2345 MTW_UNLOCK(sc); 2346 rx_tstamp = le64toh(rx_tstamp); 2347 2348 if (ni_tstamp >= rx_tstamp) { 2349 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON, 2350 "ibss merge, tsf %ju tstamp %ju\n", 2351 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 2352 (void)ieee80211_ibss_merge(ni); 2353 } 2354 } 2355 } 2356 static void 2357 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen) 2358 { 2359 struct ieee80211com *ic = &sc->sc_ic; 2360 struct ieee80211_frame *wh; 2361 struct ieee80211_node *ni; 2362 struct epoch_tracker et; 2363 2364 struct mtw_rxwi *rxwi; 2365 uint32_t flags; 2366 uint16_t len, rxwisize; 2367 uint8_t ant, rssi; 2368 int8_t nf; 2369 2370 rxwisize = sizeof(struct mtw_rxwi); 2371 2372 if (__predict_false( 2373 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) { 2374 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2375 "payload is too short: dma length %u < %zu\n", dmalen, 2376 rxwisize + sizeof(struct ieee80211_frame_ack)); 2377 goto fail; 2378 } 2379 2380 rxwi = mtod(m, struct mtw_rxwi *); 2381 len = le16toh(rxwi->len) & 0xfff; 2382 flags = le32toh(rxwi->flags); 2383 if (__predict_false(len > dmalen - rxwisize)) { 2384 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n", 2385 len, dmalen); 2386 goto fail; 2387 } 2388 2389 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) { 2390 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n", 2391 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV"); 2392 goto fail; 2393 } 2394 2395 if (flags & MTW_RX_L2PAD) { 2396 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2397 "received RT2860_RX_L2PAD frame\n"); 2398 len += 2; 2399 } 2400 2401 m->m_data += rxwisize; 2402 m->m_pkthdr.len = m->m_len = len; 2403 2404 wh = mtod(m, struct ieee80211_frame *); 2405 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2406 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; 2407 m->m_flags |= M_WEP; 2408 } 2409 2410 if (len >= sizeof(struct ieee80211_frame_min)) { 2411 ni = ieee80211_find_rxnode(ic, 2412 mtod(m, struct ieee80211_frame_min *)); 2413 } else 2414 ni = NULL; 2415 2416 if (ni && ni->ni_flags & IEEE80211_NODE_HT) { 2417 m->m_flags |= M_AMPDU; 2418 } 2419 2420 if (__predict_false(flags & MTW_RX_MICERR)) { 2421 /* report MIC failures to net80211 for TKIP */ 2422 if (ni != NULL) 2423 ieee80211_notify_michael_failure(ni->ni_vap, wh, 2424 rxwi->keyidx); 2425 MTW_DPRINTF(sc, MTW_DEBUG_RECV, 2426 "MIC error. Someone is lying.\n"); 2427 goto fail; 2428 } 2429 2430 ant = mtw_maxrssi_chain(sc, rxwi); 2431 rssi = rxwi->rssi[ant]; 2432 nf = mtw_rssi2dbm(sc, rssi, ant); 2433 2434 if (__predict_false(ieee80211_radiotap_active(ic))) { 2435 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap; 2436 uint16_t phy; 2437 2438 tap->wr_flags = 0; 2439 if (flags & MTW_RX_L2PAD) 2440 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD; 2441 tap->wr_antsignal = rssi; 2442 tap->wr_antenna = ant; 2443 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant); 2444 tap->wr_rate = 2; /* in case it can't be found below */ 2445 //MTW_LOCK(sc); 2446 2447 // MTW_UNLOCK(sc); 2448 phy = le16toh(rxwi->phy); 2449 switch (phy >> MT7601_PHY_SHIFT) { 2450 case MTW_PHY_CCK: 2451 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) { 2452 case 0: 2453 tap->wr_rate = 2; 2454 break; 2455 case 1: 2456 tap->wr_rate = 4; 2457 break; 2458 case 2: 2459 tap->wr_rate = 11; 2460 break; 2461 case 3: 2462 tap->wr_rate = 22; 2463 break; 2464 } 2465 if (phy & MTW_PHY_SHPRE) 2466 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2467 break; 2468 case MTW_PHY_OFDM: 2469 switch (phy & MTW_PHY_MCS) { 2470 case 0: 2471 tap->wr_rate = 12; 2472 break; 2473 case 1: 2474 tap->wr_rate = 18; 2475 break; 2476 case 2: 2477 tap->wr_rate = 24; 2478 break; 2479 case 3: 2480 tap->wr_rate = 36; 2481 break; 2482 case 4: 2483 tap->wr_rate = 48; 2484 break; 2485 case 5: 2486 tap->wr_rate = 72; 2487 break; 2488 case 6: 2489 tap->wr_rate = 96; 2490 break; 2491 case 7: 2492 tap->wr_rate = 108; 2493 break; 2494 } 2495 break; 2496 } 2497 } 2498 2499 NET_EPOCH_ENTER(et); 2500 if (ni != NULL) { 2501 (void)ieee80211_input(ni, m, rssi, nf); 2502 ieee80211_free_node(ni); 2503 } else { 2504 (void)ieee80211_input_all(ic, m, rssi, nf); 2505 } 2506 NET_EPOCH_EXIT(et); 2507 2508 return; 2509 2510 fail: 2511 m_freem(m); 2512 counter_u64_add(ic->ic_ierrors, 1); 2513 } 2514 2515 static void 2516 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) 2517 { 2518 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2519 struct ieee80211com *ic = &sc->sc_ic; 2520 struct mbuf *m = NULL; 2521 struct mbuf *m0; 2522 uint32_t dmalen, mbuf_len; 2523 uint16_t rxwisize; 2524 int xferlen; 2525 2526 rxwisize = sizeof(struct mtw_rxwi); 2527 2528 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL); 2529 2530 switch (USB_GET_STATE(xfer)) { 2531 case USB_ST_TRANSFERRED: 2532 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n", 2533 xferlen); 2534 if (xferlen < (int)(sizeof(uint32_t) + rxwisize + 2535 sizeof(struct mtw_rxd))) { 2536 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2537 "xfer too short %d %d\n", xferlen, 2538 (int)(sizeof(uint32_t) + rxwisize + 2539 sizeof(struct mtw_rxd))); 2540 goto tr_setup; 2541 } 2542 2543 m = sc->rx_m; 2544 sc->rx_m = NULL; 2545 2546 /* FALLTHROUGH */ 2547 case USB_ST_SETUP: 2548 tr_setup: 2549 2550 if (sc->rx_m == NULL) { 2551 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 2552 MTW_MAX_RXSZ); 2553 } 2554 if (sc->rx_m == NULL) { 2555 MTW_DPRINTF(sc, 2556 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC | 2557 MTW_DEBUG_USB, 2558 "could not allocate mbuf - idle with stall\n"); 2559 counter_u64_add(ic->ic_ierrors, 1); 2560 usbd_xfer_set_stall(xfer); 2561 usbd_xfer_set_frames(xfer, 0); 2562 } else { 2563 /* 2564 * Directly loading a mbuf cluster into DMA to 2565 * save some data copying. This works because 2566 * there is only one cluster. 2567 */ 2568 usbd_xfer_set_frame_data(xfer, 0, 2569 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ); 2570 usbd_xfer_set_frames(xfer, 1); 2571 } 2572 usbd_transfer_submit(xfer); 2573 break; 2574 2575 default: /* Error */ 2576 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2577 "USB transfer error, %s\n", usbd_errstr(error)); 2578 2579 if (error != USB_ERR_CANCELLED) { 2580 /* try to clear stall first */ 2581 usbd_xfer_set_stall(xfer); 2582 if (error == USB_ERR_TIMEOUT) 2583 device_printf(sc->sc_dev, "device timeout %s\n", 2584 __func__); 2585 counter_u64_add(ic->ic_ierrors, 1); 2586 goto tr_setup; 2587 } 2588 if (sc->rx_m != NULL) { 2589 m_freem(sc->rx_m); 2590 sc->rx_m = NULL; 2591 } 2592 break; 2593 } 2594 2595 if (m == NULL) 2596 return; 2597 2598 /* inputting all the frames must be last */ 2599 2600 MTW_UNLOCK(sc); 2601 2602 m->m_pkthdr.len = m->m_len = xferlen; 2603 2604 /* HW can aggregate multiple 802.11 frames in a single USB xfer */ 2605 for (;;) { 2606 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff; 2607 2608 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) || 2609 ((dmalen & 3) != 0)) { 2610 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2611 "bad DMA length %u\n", dmalen); 2612 break; 2613 } 2614 if ((dmalen + 8) > (uint32_t)xferlen) { 2615 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2616 "bad DMA length %u > %d\n", dmalen + 8, xferlen); 2617 break; 2618 } 2619 2620 /* If it is the last one or a single frame, we won't copy. */ 2621 if ((xferlen -= dmalen + 8) <= 8) { 2622 /* trim 32-bit DMA-len header */ 2623 m->m_data += 4; 2624 m->m_pkthdr.len = m->m_len -= 4; 2625 mtw_rx_frame(sc, m, dmalen); 2626 m = NULL; /* don't free source buffer */ 2627 break; 2628 } 2629 2630 mbuf_len = dmalen + sizeof(struct mtw_rxd); 2631 if (__predict_false(mbuf_len > MCLBYTES)) { 2632 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB, 2633 "payload is too big: mbuf_len %u\n", mbuf_len); 2634 counter_u64_add(ic->ic_ierrors, 1); 2635 break; 2636 } 2637 2638 /* copy aggregated frames to another mbuf */ 2639 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2640 if (__predict_false(m0 == NULL)) { 2641 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC, 2642 "could not allocate mbuf\n"); 2643 counter_u64_add(ic->ic_ierrors, 1); 2644 break; 2645 } 2646 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len, 2647 mtod(m0, caddr_t)); 2648 m0->m_pkthdr.len = m0->m_len = mbuf_len; 2649 mtw_rx_frame(sc, m0, dmalen); 2650 2651 /* update data ptr */ 2652 m->m_data += mbuf_len + 4; 2653 m->m_pkthdr.len = m->m_len -= mbuf_len + 4; 2654 } 2655 2656 /* make sure we free the source buffer, if any */ 2657 m_freem(m); 2658 2659 #ifdef IEEE80211_SUPPORT_SUPERG 2660 ieee80211_ff_age_all(ic, 100); 2661 #endif 2662 MTW_LOCK(sc); 2663 } 2664 2665 static void 2666 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr) 2667 { 2668 2669 ieee80211_tx_complete(data->ni, data->m, txerr); 2670 data->m = NULL; 2671 data->ni = NULL; 2672 2673 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next); 2674 pq->tx_nfree++; 2675 } 2676 static void 2677 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index) 2678 { 2679 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2680 struct ieee80211com *ic = &sc->sc_ic; 2681 struct mtw_tx_data *data; 2682 struct ieee80211vap *vap = NULL; 2683 struct usb_page_cache *pc; 2684 struct mtw_endpoint_queue *pq = &sc->sc_epq[index]; 2685 struct mbuf *m; 2686 usb_frlength_t size; 2687 int actlen; 2688 int sumlen; 2689 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); 2690 2691 switch (USB_GET_STATE(xfer)) { 2692 case USB_ST_TRANSFERRED: 2693 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2694 "transfer complete: %d bytes @ index %d\n", actlen, index); 2695 2696 data = usbd_xfer_get_priv(xfer); 2697 mtw_tx_free(pq, data, 0); 2698 usbd_xfer_set_priv(xfer, NULL); 2699 2700 /* FALLTHROUGH */ 2701 case USB_ST_SETUP: 2702 tr_setup: 2703 data = STAILQ_FIRST(&pq->tx_qh); 2704 if (data == NULL) 2705 break; 2706 2707 STAILQ_REMOVE_HEAD(&pq->tx_qh, next); 2708 2709 m = data->m; 2710 2711 size = sizeof(data->desc); 2712 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) { 2713 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB, 2714 "data overflow, %u bytes\n", m->m_pkthdr.len); 2715 mtw_tx_free(pq, data, 1); 2716 goto tr_setup; 2717 } 2718 2719 pc = usbd_xfer_get_frame(xfer, 0); 2720 usbd_copy_in(pc, 0, &data->desc, size); 2721 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len); 2722 size += m->m_pkthdr.len; 2723 /* 2724 * Align end on a 4-byte boundary, pad 8 bytes (CRC + 2725 * 4-byte padding), and be sure to zero those trailing 2726 * bytes: 2727 */ 2728 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD); 2729 size += ((-size) & 3) + MTW_DMA_PAD; 2730 2731 vap = data->ni->ni_vap; 2732 if (ieee80211_radiotap_active_vap(vap)) { 2733 const struct ieee80211_frame *wh; 2734 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap; 2735 struct mtw_txwi *txwi = 2736 (struct mtw_txwi *)(&data->desc + 2737 sizeof(struct mtw_txd)); 2738 int has_l2pad; 2739 2740 wh = mtod(m, struct ieee80211_frame *); 2741 has_l2pad = IEEE80211_HAS_ADDR4(wh) != 2742 IEEE80211_QOS_HAS_SEQ(wh); 2743 2744 tap->wt_flags = 0; 2745 tap->wt_rate = rt2860_rates[data->ridx].rate; 2746 tap->wt_hwqueue = index; 2747 if (le16toh(txwi->phy) & MTW_PHY_SHPRE) 2748 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2749 if (has_l2pad) 2750 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD; 2751 2752 ieee80211_radiotap_tx(vap, m); 2753 } 2754 2755 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2756 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len, 2757 size, index); 2758 2759 usbd_xfer_set_frame_len(xfer, 0, size); 2760 usbd_xfer_set_priv(xfer, data); 2761 usbd_transfer_submit(xfer); 2762 mtw_start(sc); 2763 2764 break; 2765 2766 default: 2767 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2768 "USB transfer error, %s\n", usbd_errstr(error)); 2769 2770 data = usbd_xfer_get_priv(xfer); 2771 2772 if (data != NULL) { 2773 if (data->ni != NULL) 2774 vap = data->ni->ni_vap; 2775 mtw_tx_free(pq, data, error); 2776 usbd_xfer_set_priv(xfer, NULL); 2777 } 2778 2779 if (vap == NULL) 2780 vap = TAILQ_FIRST(&ic->ic_vaps); 2781 2782 if (error != USB_ERR_CANCELLED) { 2783 if (error == USB_ERR_TIMEOUT) { 2784 device_printf(sc->sc_dev, "device timeout %s\n", 2785 __func__); 2786 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store); 2787 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB, 2788 "cmdq_store=%d\n", i); 2789 sc->cmdq[i].func = mtw_usb_timeout_cb; 2790 sc->cmdq[i].arg0 = vap; 2791 ieee80211_runtask(ic, &sc->cmdq_task); 2792 } 2793 2794 /* 2795 * Try to clear stall first, also if other 2796 * errors occur, hence clearing stall 2797 * introduces a 50 ms delay: 2798 */ 2799 usbd_xfer_set_stall(xfer); 2800 goto tr_setup; 2801 } 2802 break; 2803 } 2804 #ifdef IEEE80211_SUPPORT_SUPERG 2805 /* XXX TODO: make this deferred rather than unlock/relock */ 2806 /* XXX TODO: should only do the QoS AC this belongs to */ 2807 if (pq->tx_nfree >= MTW_TX_RING_COUNT) { 2808 MTW_UNLOCK(sc); 2809 ieee80211_ff_flush_all(ic); 2810 MTW_LOCK(sc); 2811 } 2812 #endif 2813 } 2814 2815 static void 2816 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error) 2817 { 2818 struct mtw_softc *sc = usbd_xfer_softc(xfer); 2819 2820 int actlen; 2821 int ntries, tmp; 2822 // struct mtw_txd *data; 2823 2824 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 2825 // data = usbd_xfer_get_priv(xfer); 2826 usbd_xfer_set_priv(xfer, NULL); 2827 switch (USB_GET_STATE(xfer)) { 2828 2829 case USB_ST_TRANSFERRED: 2830 sc->sc_sent += actlen; 2831 memset(sc->txd_fw[sc->sc_idx], 0, actlen); 2832 2833 if (actlen < 0x2c44 && sc->sc_idx == 0) { 2834 return; 2835 } 2836 if (sc->sc_idx == 3) { 2837 2838 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1, 2839 MTW_MCU_IVB_LEN)) != 0) { 2840 device_printf(sc->sc_dev, 2841 "Could not write ivb error: %d\n", error); 2842 } 2843 2844 mtw_delay(sc, 10); 2845 for (ntries = 0; ntries < 100; ntries++) { 2846 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, 2847 &tmp)) != 0) { 2848 device_printf(sc->sc_dev, 2849 "Could not read cfg error: %d\n", error); 2850 2851 } 2852 if (tmp == MTW_MCU_READY) { 2853 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, 2854 "mcu reaady %d\n", tmp); 2855 sc->fwloading = 1; 2856 break; 2857 } 2858 2859 mtw_delay(sc, 10); 2860 } 2861 if (ntries == 100) 2862 sc->fwloading = 0; 2863 wakeup(&sc->fwloading); 2864 return; 2865 } 2866 2867 if (actlen == 0x2c44) { 2868 sc->sc_idx++; 2869 DELAY(1000); 2870 } 2871 2872 case USB_ST_SETUP: { 2873 int dlen = 0; 2874 dlen = sc->txd_fw[sc->sc_idx]->len; 2875 2876 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent); 2877 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16)); 2878 2879 usbd_xfer_set_frame_len(xfer, 0, dlen); 2880 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen); 2881 2882 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]); 2883 usbd_transfer_submit(xfer); 2884 break; 2885 2886 default: /* Error */ 2887 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__, 2888 usbd_errstr(error)); 2889 sc->fwloading = 0; 2890 wakeup(&sc->fwloading); 2891 /* 2892 * Print error message and clear stall 2893 * for example. 2894 */ 2895 break; 2896 } 2897 /* 2898 * Here it is safe to do something without the private 2899 * USB mutex locked. 2900 */ 2901 } 2902 return; 2903 } 2904 static void 2905 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error) 2906 { 2907 mtw_bulk_tx_callbackN(xfer, error, 0); 2908 } 2909 2910 static void 2911 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error) 2912 { 2913 2914 2915 mtw_bulk_tx_callbackN(xfer, error, 1); 2916 } 2917 2918 static void 2919 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error) 2920 { 2921 mtw_bulk_tx_callbackN(xfer, error, 2); 2922 } 2923 2924 static void 2925 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error) 2926 { 2927 mtw_bulk_tx_callbackN(xfer, error, 3); 2928 } 2929 2930 static void 2931 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error) 2932 { 2933 mtw_bulk_tx_callbackN(xfer, error, 4); 2934 } 2935 2936 static void 2937 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error) 2938 { 2939 mtw_bulk_tx_callbackN(xfer, error, 5); 2940 } 2941 2942 static void 2943 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data) 2944 { 2945 struct mbuf *m = data->m; 2946 struct ieee80211com *ic = &sc->sc_ic; 2947 struct ieee80211vap *vap = data->ni->ni_vap; 2948 struct ieee80211_frame *wh; 2949 struct mtw_txd *txd; 2950 struct mtw_txwi *txwi; 2951 uint16_t xferlen, txwisize; 2952 uint16_t mcs; 2953 uint8_t ridx = data->ridx; 2954 uint8_t pad; 2955 2956 /* get MCS code from rate index */ 2957 mcs = rt2860_rates[ridx].mcs; 2958 2959 txwisize = sizeof(*txwi); 2960 xferlen = txwisize + m->m_pkthdr.len; 2961 2962 /* roundup to 32-bit alignment */ 2963 xferlen = (xferlen + 3) & ~3; 2964 2965 txd = (struct mtw_txd *)&data->desc; 2966 txd->len = htole16(xferlen); 2967 2968 wh = mtod(m, struct ieee80211_frame *); 2969 2970 /* 2971 * Ether both are true or both are false, the header 2972 * are nicely aligned to 32-bit. So, no L2 padding. 2973 */ 2974 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh)) 2975 pad = 0; 2976 else 2977 pad = 2; 2978 2979 /* setup TX Wireless Information */ 2980 txwi = (struct mtw_txwi *)(txd + 1); 2981 txwi->len = htole16(m->m_pkthdr.len - pad); 2982 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { 2983 mcs |= MTW_PHY_CCK; 2984 if (ridx != MTW_RIDX_CCK1 && 2985 (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 2986 mcs |= MTW_PHY_SHPRE; 2987 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) { 2988 mcs |= MTW_PHY_OFDM; 2989 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) { 2990 /* XXX TODO: [adrian] set short preamble for MCS? */ 2991 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */ 2992 } 2993 txwi->phy = htole16(mcs); 2994 2995 /* check if RTS/CTS or CTS-to-self protection is required */ 2996 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 2997 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) || 2998 ((ic->ic_flags & IEEE80211_F_USEPROT) && 2999 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) || 3000 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 3001 rt2860_rates[ridx].phy == IEEE80211_T_HT))) 3002 txwi->txop |= MTW_TX_TXOP_HT; 3003 else 3004 txwi->txop |= MTW_TX_TXOP_BACKOFF; 3005 3006 } 3007 3008 /* This function must be called locked */ 3009 static int 3010 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3011 { 3012 struct ieee80211com *ic = &sc->sc_ic; 3013 struct ieee80211vap *vap = ni->ni_vap; 3014 struct ieee80211_frame *wh; 3015 3016 3017 //const struct ieee80211_txparam *tp = ni->ni_txparms; 3018 struct mtw_node *rn = MTW_NODE(ni); 3019 struct mtw_tx_data *data; 3020 struct mtw_txd *txd; 3021 struct mtw_txwi *txwi; 3022 uint16_t qos; 3023 uint16_t dur; 3024 uint16_t qid; 3025 uint8_t type; 3026 uint8_t tid; 3027 uint16_t ridx; 3028 uint8_t ctl_ridx; 3029 uint16_t qflags; 3030 uint8_t xflags = 0; 3031 3032 int hasqos; 3033 3034 MTW_LOCK_ASSERT(sc, MA_OWNED); 3035 3036 wh = mtod(m, struct ieee80211_frame *); 3037 const struct ieee80211_txparam *tp = ni->ni_txparms; 3038 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3039 3040 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 | 3041 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA); 3042 3043 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) { 3044 uint8_t *frm; 3045 frm = ieee80211_getqos(wh); 3046 3047 3048 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm); 3049 qos = le16toh(*(const uint16_t *)frm); 3050 tid = ieee80211_gettid(wh); 3051 qid = TID_TO_WME_AC(tid); 3052 qflags |= MTW_TXD_QSEL_EDCA; 3053 } else { 3054 qos = 0; 3055 tid = 0; 3056 qid = WME_AC_BE; 3057 } 3058 if (type & IEEE80211_FC0_TYPE_MGT) { 3059 qid = 0; 3060 } 3061 3062 if (type != IEEE80211_FC0_TYPE_DATA) 3063 qflags |= htole16(MTW_TXD_WIV); 3064 3065 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3066 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) { 3067 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */ 3068 ridx = (ic->ic_curmode == IEEE80211_MODE_11A 3069 || ic->ic_curmode == IEEE80211_MODE_11NA) ? 3070 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1; 3071 if (type == IEEE80211_MODE_11NG) { 3072 ridx = 12; 3073 } 3074 ctl_ridx = rt2860_rates[ridx].ctl_ridx; 3075 } else { 3076 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3077 ridx = rn->fix_ridx; 3078 3079 } else { 3080 ridx = rn->amrr_ridx; 3081 ctl_ridx = rt2860_rates[ridx].ctl_ridx; 3082 } 3083 } 3084 3085 if (hasqos) 3086 xflags = 0; 3087 else 3088 xflags = MTW_TX_NSEQ; 3089 3090 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 3091 (!hasqos || 3092 (qos & IEEE80211_QOS_ACKPOLICY) != 3093 IEEE80211_QOS_ACKPOLICY_NOACK)) { 3094 xflags |= MTW_TX_ACK; 3095 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3096 dur = rt2860_rates[ctl_ridx].sp_ack_dur; 3097 else 3098 dur = rt2860_rates[ctl_ridx].lp_ack_dur; 3099 USETW(wh->i_dur, dur); 3100 } 3101 /* reserve slots for mgmt packets, just in case */ 3102 if (sc->sc_epq[qid].tx_nfree < 3) { 3103 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid); 3104 return (-1); 3105 } 3106 3107 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh); 3108 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next); 3109 sc->sc_epq[qid].tx_nfree--; 3110 3111 txd = (struct mtw_txd *)&data->desc; 3112 txd->flags = qflags; 3113 3114 txwi = (struct mtw_txwi *)(txd + 1); 3115 txwi->xflags = xflags; 3116 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ? 3117 3118 MTW_AID2WCID(ni->ni_associd) : 3119 0xff; 3120 3121 /* clear leftover garbage bits */ 3122 txwi->flags = 0; 3123 txwi->txop = 0; 3124 3125 data->m = m; 3126 data->ni = ni; 3127 data->ridx = ridx; 3128 3129 mtw_set_tx_desc(sc, data); 3130 3131 /* 3132 * The chip keeps track of 2 kind of Tx stats, 3133 * * TX_STAT_FIFO, for per WCID stats, and 3134 * * TX_STA_CNT0 for all-TX-in-one stats. 3135 * 3136 * To use FIFO stats, we need to store MCS into the driver-private 3137 * PacketID field. So that, we can tell whose stats when we read them. 3138 * We add 1 to the MCS because setting the PacketID field to 0 means 3139 * that we don't want feedback in TX_STAT_FIFO. 3140 * And, that's what we want for STA mode, since TX_STA_CNT0 does the 3141 * job. 3142 * 3143 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in 3144 * run_tx(). 3145 */ 3146 3147 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP || 3148 vap->iv_opmode == IEEE80211_M_MBSS) { 3149 3150 /* 3151 * Unlike PCI based devices, we don't get any interrupt from 3152 * USB devices, so we simulate FIFO-is-full interrupt here. 3153 * Ralink recommends to drain FIFO stats every 100 ms, but 16 3154 * slots quickly get fulled. To prevent overflow, increment a 3155 * counter on every FIFO stat request, so we know how many slots 3156 * are left. We do this only in HOSTAP or multiple vap mode 3157 * since FIFO stats are used only in those modes. We just drain 3158 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via 3159 * callout. Call it early. Otherwise overflow. 3160 */ 3161 if (sc->fifo_cnt++ == 10) { 3162 /* 3163 * With multiple vaps or if_bridge, if_start() is called 3164 * with a non-sleepable lock, tcpinp. So, need to defer. 3165 */ 3166 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store); 3167 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i); 3168 sc->cmdq[i].func = mtw_drain_fifo; 3169 sc->cmdq[i].arg0 = sc; 3170 ieee80211_runtask(ic, &sc->cmdq_task); 3171 } 3172 } 3173 3174 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next); 3175 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]); 3176 3177 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3178 "sending data frame len=%d rate=%d qid=%d\n", 3179 m->m_pkthdr.len + 3180 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)), 3181 rt2860_rates[ridx].rate, qid); 3182 3183 return (0); 3184 } 3185 3186 static int 3187 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3188 { 3189 struct ieee80211com *ic = &sc->sc_ic; 3190 struct mtw_node *rn = MTW_NODE(ni); 3191 struct mtw_tx_data *data; 3192 struct ieee80211_frame *wh; 3193 struct mtw_txd *txd; 3194 struct mtw_txwi *txwi; 3195 uint8_t type; 3196 uint16_t dur; 3197 uint8_t ridx = rn->mgt_ridx; 3198 uint8_t xflags = 0; 3199 uint8_t wflags = 0; 3200 3201 MTW_LOCK_ASSERT(sc, MA_OWNED); 3202 3203 wh = mtod(m, struct ieee80211_frame *); 3204 3205 /* tell hardware to add timestamp for probe responses */ 3206 if ((wh->i_fc[0] & 3207 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3208 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) 3209 wflags |= MTW_TX_TS; 3210 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3211 xflags |= MTW_TX_ACK; 3212 3213 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate, 3214 ic->ic_flags & IEEE80211_F_SHPREAMBLE); 3215 USETW(wh->i_dur, dur); 3216 } 3217 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3218 if (sc->sc_epq[0].tx_nfree == 0) 3219 /* let caller free mbuf */ 3220 return (EIO); 3221 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3222 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3223 sc->sc_epq[0].tx_nfree--; 3224 3225 txd = (struct mtw_txd *)&data->desc; 3226 txd->flags = htole16( 3227 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA); 3228 if (type != IEEE80211_FC0_TYPE_DATA) 3229 txd->flags |= htole16(MTW_TXD_WIV); 3230 3231 txwi = (struct mtw_txwi *)(txd + 1); 3232 txwi->wcid = 0xff; 3233 txwi->xflags = xflags; 3234 txwi->flags = wflags; 3235 3236 txwi->txop = 0; /* clear leftover garbage bits */ 3237 3238 data->m = m; 3239 data->ni = ni; 3240 data->ridx = ridx; 3241 3242 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n", 3243 m->m_pkthdr.len + 3244 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)), 3245 rt2860_rates[ridx].rate); 3246 3247 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3248 3249 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]); 3250 3251 return (0); 3252 } 3253 3254 static int 3255 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m, 3256 struct ieee80211_node *ni, int prot, int rate) 3257 { 3258 struct ieee80211com *ic = ni->ni_ic; 3259 struct mtw_tx_data *data; 3260 struct mtw_txd *txd; 3261 struct mtw_txwi *txwi; 3262 struct mbuf *mprot; 3263 int ridx; 3264 int protrate; 3265 uint8_t wflags = 0; 3266 uint8_t xflags = 0; 3267 3268 MTW_LOCK_ASSERT(sc, MA_OWNED); 3269 3270 /* check that there are free slots before allocating the mbuf */ 3271 if (sc->sc_epq[0].tx_nfree == 0) 3272 /* let caller free mbuf */ 3273 return (ENOBUFS); 3274 3275 mprot = ieee80211_alloc_prot(ni, m, rate, prot); 3276 if (mprot == NULL) { 3277 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3278 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n"); 3279 return (ENOBUFS); 3280 } 3281 3282 protrate = ieee80211_ctl_rate(ic->ic_rt, rate); 3283 wflags = MTW_TX_FRAG; 3284 xflags = 0; 3285 if (prot == IEEE80211_PROT_RTSCTS) 3286 xflags |= MTW_TX_ACK; 3287 3288 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3289 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3290 sc->sc_epq[0].tx_nfree--; 3291 3292 txd = (struct mtw_txd *)&data->desc; 3293 txd->flags = RT2860_TX_QSEL_EDCA; 3294 txwi = (struct mtw_txwi *)(txd + 1); 3295 txwi->wcid = 0xff; 3296 txwi->flags = wflags; 3297 txwi->xflags = xflags; 3298 txwi->txop = 0; /* clear leftover garbage bits */ 3299 3300 data->m = mprot; 3301 data->ni = ieee80211_ref_node(ni); 3302 3303 /* XXX TODO: methodize with MCS rates */ 3304 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 3305 if (rt2860_rates[ridx].rate == protrate) 3306 break; 3307 data->ridx = ridx; 3308 3309 mtw_set_tx_desc(sc, data); 3310 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n", 3311 m->m_pkthdr.len, rate); 3312 3313 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3314 3315 usbd_transfer_start(sc->sc_xfer[0]); 3316 3317 return (0); 3318 } 3319 3320 static int 3321 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 3322 const struct ieee80211_bpf_params *params) 3323 { 3324 struct ieee80211com *ic = ni->ni_ic; 3325 struct mtw_tx_data *data; 3326 struct mtw_txd *txd; 3327 struct mtw_txwi *txwi; 3328 uint8_t ridx; 3329 uint8_t rate; 3330 uint8_t opflags = 0; 3331 uint8_t xflags = 0; 3332 int error; 3333 3334 MTW_LOCK_ASSERT(sc, MA_OWNED); 3335 3336 KASSERT(params != NULL, ("no raw xmit params")); 3337 3338 rate = params->ibp_rate0; 3339 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3340 /* let caller free mbuf */ 3341 return (EINVAL); 3342 } 3343 3344 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3345 xflags |= MTW_TX_ACK; 3346 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) { 3347 error = mtw_sendprot(sc, m, ni, 3348 params->ibp_flags & IEEE80211_BPF_RTS ? 3349 IEEE80211_PROT_RTSCTS : 3350 IEEE80211_PROT_CTSONLY, 3351 rate); 3352 if (error) { 3353 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__, 3354 __LINE__, error); 3355 return (error); 3356 } 3357 opflags |= MTW_TX_TXOP_SIFS; 3358 } 3359 3360 if (sc->sc_epq[0].tx_nfree == 0) { 3361 /* let caller free mbuf */ 3362 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3363 "sending raw frame, but tx ring is full\n"); 3364 return (EIO); 3365 } 3366 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh); 3367 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next); 3368 sc->sc_epq[0].tx_nfree--; 3369 3370 txd = (struct mtw_txd *)&data->desc; 3371 txd->flags = htole16( 3372 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA); 3373 // txd->flags = htole16(MTW_TXD_QSEL_EDCA); 3374 txwi = (struct mtw_txwi *)(txd + 1); 3375 txwi->wcid = 0xff; 3376 txwi->xflags = xflags; 3377 txwi->txop = opflags; 3378 txwi->flags = 0; /* clear leftover garbage bits */ 3379 3380 data->m = m; 3381 data->ni = ni; 3382 /* XXX TODO: methodize with MCS rates */ 3383 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) 3384 if (rt2860_rates[ridx].rate == rate) 3385 break; 3386 data->ridx = ridx; 3387 3388 mtw_set_tx_desc(sc, data); 3389 3390 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n", 3391 m->m_pkthdr.len, rate); 3392 3393 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next); 3394 3395 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]); 3396 3397 return (0); 3398 } 3399 3400 static int 3401 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3402 const struct ieee80211_bpf_params *params) 3403 { 3404 struct mtw_softc *sc = ni->ni_ic->ic_softc; 3405 int error = 0; 3406 MTW_LOCK(sc); 3407 /* prevent management frames from being sent if we're not ready */ 3408 if (!(sc->sc_flags & MTW_RUNNING)) { 3409 error = ENETDOWN; 3410 goto done; 3411 } 3412 3413 if (params == NULL) { 3414 /* tx mgt packet */ 3415 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) { 3416 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n"); 3417 goto done; 3418 } 3419 } else { 3420 /* tx raw packet with param */ 3421 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) { 3422 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, 3423 "tx with param failed\n"); 3424 goto done; 3425 } 3426 } 3427 3428 done: 3429 3430 MTW_UNLOCK(sc); 3431 3432 if (error != 0) { 3433 if (m != NULL) 3434 m_freem(m); 3435 } 3436 3437 return (error); 3438 } 3439 3440 static int 3441 mtw_transmit(struct ieee80211com *ic, struct mbuf *m) 3442 { 3443 struct mtw_softc *sc = ic->ic_softc; 3444 int error; 3445 MTW_LOCK(sc); 3446 if ((sc->sc_flags & MTW_RUNNING) == 0) { 3447 MTW_UNLOCK(sc); 3448 return (ENXIO); 3449 } 3450 error = mbufq_enqueue(&sc->sc_snd, m); 3451 if (error) { 3452 MTW_UNLOCK(sc); 3453 return (error); 3454 } 3455 mtw_start(sc); 3456 MTW_UNLOCK(sc); 3457 3458 return (0); 3459 } 3460 3461 static void 3462 mtw_start(struct mtw_softc *sc) 3463 { 3464 struct ieee80211_node *ni; 3465 struct mbuf *m; 3466 3467 MTW_LOCK_ASSERT(sc, MA_OWNED); 3468 3469 if ((sc->sc_flags & MTW_RUNNING) == 0) { 3470 3471 return; 3472 } 3473 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 3474 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3475 if (mtw_tx(sc, m, ni) != 0) { 3476 mbufq_prepend(&sc->sc_snd, m); 3477 break; 3478 } 3479 } 3480 } 3481 3482 static void 3483 mtw_parent(struct ieee80211com *ic) 3484 { 3485 3486 struct mtw_softc *sc = ic->ic_softc; 3487 3488 MTW_LOCK(sc); 3489 if (sc->sc_detached) { 3490 MTW_UNLOCK(sc); 3491 return; 3492 } 3493 3494 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) { 3495 mtw_init_locked(sc); 3496 MTW_UNLOCK(sc); 3497 ieee80211_start_all(ic); 3498 return; 3499 } 3500 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) { 3501 mtw_update_promisc_locked(sc); 3502 MTW_UNLOCK(sc); 3503 return; 3504 } 3505 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 && 3506 ic->ic_nrunning == 0) { 3507 mtw_stop(sc); 3508 MTW_UNLOCK(sc); 3509 return; 3510 } 3511 return; 3512 } 3513 3514 static void 3515 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc) 3516 { 3517 uint8_t bbp; 3518 3519 mtw_bbp_write(sc, 66, agc); 3520 mtw_bbp_write(sc, 195, 0x87); 3521 bbp = (agc & 0xf0) | 0x08; 3522 mtw_bbp_write(sc, 196, bbp); 3523 } 3524 3525 static int 3526 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val) 3527 { 3528 struct mtw_mcu_cmd_8 cmd; 3529 3530 cmd.func = htole32(func); 3531 cmd.val = htole32(val); 3532 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8))); 3533 } 3534 3535 static int 3536 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val) 3537 { 3538 uint32_t tmp; 3539 int error, ntries, shift; 3540 3541 for (ntries = 0; ntries < 10; ntries++) { 3542 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3543 return (error); 3544 if (!(tmp & MTW_RF_CSR_KICK)) 3545 break; 3546 } 3547 if (ntries == 10) 3548 return (ETIMEDOUT); 3549 3550 if (sc->asic_ver == 0x7601) 3551 shift = MT7601_BANK_SHIFT; 3552 else 3553 shift = MT7610_BANK_SHIFT; 3554 3555 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift | 3556 reg << 8 | val; 3557 return (mtw_write(sc, MTW_RF_CSR, tmp)); 3558 } 3559 3560 void 3561 mtw_select_chan_group(struct mtw_softc *sc, int group) 3562 { 3563 uint32_t tmp; 3564 uint8_t bbp; 3565 3566 /* Tx band 20MHz 2G */ 3567 mtw_read(sc, MTW_TX_BAND_CFG, &tmp); 3568 tmp &= ~( 3569 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M); 3570 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G; 3571 mtw_write(sc, MTW_TX_BAND_CFG, tmp); 3572 3573 /* select 20 MHz bandwidth */ 3574 mtw_bbp_read(sc, 4, &bbp); 3575 bbp &= ~0x18; 3576 bbp |= 0x40; 3577 mtw_bbp_write(sc, 4, bbp); 3578 3579 /* calibrate BBP */ 3580 mtw_bbp_write(sc, 69, 0x12); 3581 mtw_bbp_write(sc, 91, 0x07); 3582 mtw_bbp_write(sc, 195, 0x23); 3583 mtw_bbp_write(sc, 196, 0x17); 3584 mtw_bbp_write(sc, 195, 0x24); 3585 mtw_bbp_write(sc, 196, 0x06); 3586 mtw_bbp_write(sc, 195, 0x81); 3587 mtw_bbp_write(sc, 196, 0x12); 3588 mtw_bbp_write(sc, 195, 0x83); 3589 mtw_bbp_write(sc, 196, 0x17); 3590 mtw_rf_write(sc, 5, 8, 0x00); 3591 // mtw_mcu_calibrate(sc, 0x6, 0x10001); 3592 3593 /* set initial AGC value */ 3594 mt7601_set_agc(sc, 0x14); 3595 } 3596 3597 static int 3598 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val) 3599 { 3600 uint32_t tmp; 3601 int error, ntries, shift; 3602 3603 for (ntries = 0; ntries < 100; ntries++) { 3604 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3605 return (error); 3606 if (!(tmp & MTW_RF_CSR_KICK)) 3607 break; 3608 } 3609 if (ntries == 100) 3610 return (ETIMEDOUT); 3611 3612 if (sc->asic_ver == 0x7601) 3613 shift = MT7601_BANK_SHIFT; 3614 else 3615 shift = MT7610_BANK_SHIFT; 3616 3617 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8; 3618 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0) 3619 return (error); 3620 3621 for (ntries = 0; ntries < 100; ntries++) { 3622 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0) 3623 return (error); 3624 if (!(tmp & MTW_RF_CSR_KICK)) 3625 break; 3626 } 3627 if (ntries == 100) 3628 return (ETIMEDOUT); 3629 3630 *val = tmp & 0xff; 3631 return (0); 3632 } 3633 static void 3634 mt7601_set_chan(struct mtw_softc *sc, u_int chan) 3635 { 3636 uint32_t tmp; 3637 uint8_t bbp, rf, txpow1; 3638 int i; 3639 /* find the settings for this channel */ 3640 for (i = 0; mt7601_rf_chan[i].chan != chan; i++) 3641 ; 3642 3643 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17); 3644 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18); 3645 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19); 3646 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20); 3647 3648 /* use Tx power values from EEPROM */ 3649 txpow1 = sc->txpow1[i]; 3650 3651 /* Tx automatic level control */ 3652 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp); 3653 tmp &= ~0x3f3f; 3654 tmp |= (txpow1 & 0x3f); 3655 mtw_write(sc, MTW_TX_ALC_CFG0, tmp); 3656 3657 /* LNA */ 3658 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]); 3659 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]); 3660 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]); 3661 3662 /* VCO calibration */ 3663 mtw_rf_write(sc, 0, 4, 0x0a); 3664 mtw_rf_write(sc, 0, 5, 0x20); 3665 mtw_rf_read(sc, 0, 4, &rf); 3666 mtw_rf_write(sc, 0, 4, rf | 0x80); 3667 3668 /* select 20 MHz bandwidth */ 3669 mtw_bbp_read(sc, 4, &bbp); 3670 bbp &= ~0x18; 3671 bbp |= 0x40; 3672 mtw_bbp_write(sc, 4, bbp); 3673 mtw_bbp_write(sc, 178, 0xff); 3674 } 3675 3676 static int 3677 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c) 3678 { 3679 struct ieee80211com *ic = &sc->sc_ic; 3680 u_int chan, group; 3681 3682 chan = ieee80211_chan2ieee(ic, c); 3683 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 3684 return (EINVAL); 3685 3686 /* determine channel group */ 3687 if (chan <= 14) 3688 group = 0; 3689 else if (chan <= 64) 3690 group = 1; 3691 else if (chan <= 128) 3692 group = 2; 3693 else 3694 group = 3; 3695 3696 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated) 3697 mtw_select_chan_group(sc, group); 3698 3699 sc->sc_chan_group = group; 3700 3701 /* chipset specific */ 3702 if (sc->asic_ver == 0x7601) 3703 mt7601_set_chan(sc, chan); 3704 3705 DELAY(1000); 3706 return (0); 3707 } 3708 3709 static void 3710 mtw_set_channel(struct ieee80211com *ic) 3711 { 3712 struct mtw_softc *sc = ic->ic_softc; 3713 3714 MTW_LOCK(sc); 3715 mtw_set_chan(sc, ic->ic_curchan); 3716 MTW_UNLOCK(sc); 3717 3718 return; 3719 } 3720 3721 static void 3722 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, 3723 struct ieee80211_channel chans[]) 3724 { 3725 // struct mtw_softc *sc = ic->ic_softc; 3726 uint8_t bands[IEEE80211_MODE_BYTES]; 3727 3728 memset(bands, 0, sizeof(bands)); 3729 setbit(bands, IEEE80211_MODE_11B); 3730 setbit(bands, IEEE80211_MODE_11G); 3731 setbit(bands, IEEE80211_MODE_11NG); 3732 3733 /* Note: for now, only support HT20 channels */ 3734 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0); 3735 } 3736 3737 static void 3738 mtw_scan_start(struct ieee80211com *ic) 3739 { 3740 struct mtw_softc *sc = ic->ic_softc; 3741 MTW_LOCK(sc); 3742 /* abort TSF synchronization */ 3743 mtw_abort_tsf_sync(sc); 3744 mtw_set_bssid(sc, ieee80211broadcastaddr); 3745 3746 MTW_UNLOCK(sc); 3747 3748 return; 3749 } 3750 3751 static void 3752 mtw_scan_end(struct ieee80211com *ic) 3753 { 3754 struct mtw_softc *sc = ic->ic_softc; 3755 3756 MTW_LOCK(sc); 3757 3758 mtw_enable_tsf_sync(sc); 3759 mtw_set_bssid(sc, sc->sc_bssid); 3760 3761 MTW_UNLOCK(sc); 3762 3763 return; 3764 } 3765 3766 /* 3767 * Could be called from ieee80211_node_timeout() 3768 * (non-sleepable thread) 3769 */ 3770 static void 3771 mtw_update_beacon(struct ieee80211vap *vap, int item) 3772 { 3773 struct ieee80211com *ic = vap->iv_ic; 3774 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 3775 struct ieee80211_node *ni = vap->iv_bss; 3776 struct mtw_softc *sc = ic->ic_softc; 3777 struct mtw_vap *rvp = MTW_VAP(vap); 3778 int mcast = 0; 3779 uint32_t i; 3780 3781 switch (item) { 3782 case IEEE80211_BEACON_ERP: 3783 mtw_updateslot(ic); 3784 break; 3785 case IEEE80211_BEACON_HTINFO: 3786 mtw_updateprot(ic); 3787 break; 3788 case IEEE80211_BEACON_TIM: 3789 mcast = 1; /*TODO*/ 3790 break; 3791 default: 3792 break; 3793 } 3794 3795 setbit(bo->bo_flags, item); 3796 if (rvp->beacon_mbuf == NULL) { 3797 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); 3798 if (rvp->beacon_mbuf == NULL) 3799 return; 3800 } 3801 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast); 3802 3803 i = MTW_CMDQ_GET(&sc->cmdq_store); 3804 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i); 3805 sc->cmdq[i].func = mtw_update_beacon_cb; 3806 sc->cmdq[i].arg0 = vap; 3807 ieee80211_runtask(ic, &sc->cmdq_task); 3808 3809 return; 3810 } 3811 3812 static void 3813 mtw_update_beacon_cb(void *arg) 3814 { 3815 3816 struct ieee80211vap *vap = arg; 3817 struct ieee80211_node *ni = vap->iv_bss; 3818 struct mtw_vap *rvp = MTW_VAP(vap); 3819 struct ieee80211com *ic = vap->iv_ic; 3820 struct mtw_softc *sc = ic->ic_softc; 3821 struct mtw_txwi txwi; 3822 struct mbuf *m; 3823 uint16_t txwisize; 3824 uint8_t ridx; 3825 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 3826 return; 3827 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) 3828 return; 3829 3830 /* 3831 * No need to call ieee80211_beacon_update(), mtw_update_beacon() 3832 * is taking care of appropriate calls. 3833 */ 3834 if (rvp->beacon_mbuf == NULL) { 3835 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni); 3836 if (rvp->beacon_mbuf == NULL) 3837 return; 3838 } 3839 m = rvp->beacon_mbuf; 3840 3841 memset(&txwi, 0, sizeof(txwi)); 3842 txwi.wcid = 0xff; 3843 txwi.len = htole16(m->m_pkthdr.len); 3844 3845 /* send beacons at the lowest available rate */ 3846 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 : 3847 MTW_RIDX_CCK1; 3848 txwi.phy = htole16(rt2860_rates[ridx].mcs); 3849 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) 3850 txwi.phy |= htole16(MTW_PHY_OFDM); 3851 txwi.txop = MTW_TX_TXOP_HT; 3852 txwi.flags = MTW_TX_TS; 3853 txwi.xflags = MTW_TX_NSEQ; 3854 3855 txwisize = sizeof(txwi); 3856 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize); 3857 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *), 3858 (m->m_pkthdr.len + 1) & ~1); 3859 } 3860 3861 static void 3862 mtw_updateprot(struct ieee80211com *ic) 3863 { 3864 struct mtw_softc *sc = ic->ic_softc; 3865 uint32_t i; 3866 3867 i = MTW_CMDQ_GET(&sc->cmdq_store); 3868 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i); 3869 sc->cmdq[i].func = mtw_updateprot_cb; 3870 sc->cmdq[i].arg0 = ic; 3871 ieee80211_runtask(ic, &sc->cmdq_task); 3872 } 3873 3874 static void 3875 mtw_updateprot_cb(void *arg) 3876 { 3877 3878 struct ieee80211com *ic = arg; 3879 struct mtw_softc *sc = ic->ic_softc; 3880 uint32_t tmp; 3881 3882 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; 3883 /* setup protection frame rate (MCS code) */ 3884 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ? 3885 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM : 3886 rt2860_rates[MTW_RIDX_CCK11].mcs; 3887 3888 /* CCK frames don't require protection */ 3889 mtw_write(sc, MTW_CCK_PROT_CFG, tmp); 3890 if (ic->ic_flags & IEEE80211_F_USEPROT) { 3891 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3892 tmp |= RT2860_PROT_CTRL_RTS_CTS; 3893 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3894 tmp |= RT2860_PROT_CTRL_CTS; 3895 } 3896 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp); 3897 } 3898 3899 static void 3900 mtw_usb_timeout_cb(void *arg) 3901 { 3902 struct ieee80211vap *vap = arg; 3903 struct mtw_softc *sc = vap->iv_ic->ic_softc; 3904 3905 MTW_LOCK_ASSERT(sc, MA_OWNED); 3906 3907 if (vap->iv_state == IEEE80211_S_SCAN) { 3908 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE, 3909 "timeout caused by scan\n"); 3910 /* cancel bgscan */ 3911 ieee80211_cancel_scan(vap); 3912 } else { 3913 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE, 3914 "timeout by unknown cause\n"); 3915 } 3916 } 3917 static int mtw_reset(struct mtw_softc *sc) 3918 { 3919 3920 usb_device_request_t req; 3921 uint16_t tmp; 3922 uint16_t actlen; 3923 3924 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 3925 req.bRequest = MTW_RESET; 3926 USETW(req.wValue, 1); 3927 USETW(req.wIndex, 0); 3928 USETW(req.wLength, 0); 3929 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, 3930 &req, &tmp, 0, &actlen, 1000)); 3931 3932 } 3933 3934 3935 static void 3936 mtw_update_promisc_locked(struct mtw_softc *sc) 3937 { 3938 3939 uint32_t tmp; 3940 3941 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp); 3942 3943 tmp |= MTW_DROP_UC_NOME; 3944 if (sc->sc_ic.ic_promisc > 0) 3945 tmp &= ~MTW_DROP_UC_NOME; 3946 3947 mtw_write(sc, MTW_RX_FILTR_CFG, tmp); 3948 3949 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n", 3950 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving"); 3951 } 3952 3953 static void 3954 mtw_update_promisc(struct ieee80211com *ic) 3955 { 3956 struct mtw_softc *sc = ic->ic_softc; 3957 3958 if ((sc->sc_flags & MTW_RUNNING) == 0) 3959 return; 3960 3961 MTW_LOCK(sc); 3962 mtw_update_promisc_locked(sc); 3963 MTW_UNLOCK(sc); 3964 } 3965 3966 static void 3967 mtw_enable_tsf_sync(struct mtw_softc *sc) 3968 { 3969 struct ieee80211com *ic = &sc->sc_ic; 3970 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3971 uint32_t tmp; 3972 int error; 3973 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp); 3974 tmp &= ~0x1fffff; 3975 tmp |= vap->iv_bss->ni_intval * 16; 3976 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN; 3977 3978 /* local TSF is always updated with remote TSF on beacon reception */ 3979 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT; 3980 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp); 3981 if (error != 0) { 3982 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n", 3983 error); 3984 } 3985 return; 3986 } 3987 3988 static void 3989 mtw_enable_mrr(struct mtw_softc *sc) 3990 { 3991 #define CCK(mcs) (mcs) 3992 3993 #define OFDM(mcs) (1 << 3 | (mcs)) 3994 mtw_write(sc, MTW_LG_FBK_CFG0, 3995 OFDM(6) << 28 | /* 54->48 */ 3996 OFDM(5) << 24 | /* 48->36 */ 3997 OFDM(4) << 20 | /* 36->24 */ 3998 OFDM(3) << 16 | /* 24->18 */ 3999 OFDM(2) << 12 | /* 18->12 */ 4000 OFDM(1) << 8 | /* 12-> 9 */ 4001 OFDM(0) << 4 | /* 9-> 6 */ 4002 OFDM(0)); /* 6-> 6 */ 4003 4004 mtw_write(sc, MTW_LG_FBK_CFG1, 4005 CCK(2) << 12 | /* 11->5.5 */ 4006 CCK(1) << 8 | /* 5.5-> 2 */ 4007 CCK(0) << 4 | /* 2-> 1 */ 4008 CCK(0)); /* 1-> 1 */ 4009 #undef OFDM 4010 #undef CCK 4011 } 4012 4013 static void 4014 mtw_set_txpreamble(struct mtw_softc *sc) 4015 { 4016 struct ieee80211com *ic = &sc->sc_ic; 4017 uint32_t tmp; 4018 4019 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp); 4020 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4021 tmp |= MTW_CCK_SHORT_EN; 4022 else 4023 tmp &= ~MTW_CCK_SHORT_EN; 4024 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp); 4025 } 4026 4027 static void 4028 mtw_set_basicrates(struct mtw_softc *sc) 4029 { 4030 struct ieee80211com *ic = &sc->sc_ic; 4031 4032 /* set basic rates mask */ 4033 if (ic->ic_curmode == IEEE80211_MODE_11B) 4034 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003); 4035 else if (ic->ic_curmode == IEEE80211_MODE_11A) 4036 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150); 4037 else /* 11g */ 4038 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f); 4039 } 4040 4041 static void 4042 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid) 4043 { 4044 mtw_write(sc, MTW_MAC_BSSID_DW0, 4045 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); 4046 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8); 4047 } 4048 4049 static void 4050 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr) 4051 { 4052 mtw_write(sc, MTW_MAC_ADDR_DW0, 4053 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); 4054 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16); 4055 } 4056 4057 static void 4058 mtw_updateslot(struct ieee80211com *ic) 4059 { 4060 4061 struct mtw_softc *sc = ic->ic_softc; 4062 uint32_t i; 4063 4064 i = MTW_CMDQ_GET(&sc->cmdq_store); 4065 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i); 4066 sc->cmdq[i].func = mtw_updateslot_cb; 4067 sc->cmdq[i].arg0 = ic; 4068 ieee80211_runtask(ic, &sc->cmdq_task); 4069 4070 return; 4071 } 4072 4073 /* ARGSUSED */ 4074 static void 4075 mtw_updateslot_cb(void *arg) 4076 { 4077 struct ieee80211com *ic = arg; 4078 struct mtw_softc *sc = ic->ic_softc; 4079 uint32_t tmp; 4080 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp); 4081 tmp &= ~0xff; 4082 tmp |= IEEE80211_GET_SLOTTIME(ic); 4083 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp); 4084 } 4085 4086 static void 4087 mtw_update_mcast(struct ieee80211com *ic) 4088 { 4089 } 4090 4091 static int8_t 4092 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain) 4093 { 4094 struct ieee80211com *ic = &sc->sc_ic; 4095 struct ieee80211_channel *c = ic->ic_curchan; 4096 int delta; 4097 4098 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4099 u_int chan = ieee80211_chan2ieee(ic, c); 4100 delta = sc->rssi_5ghz[rxchain]; 4101 4102 /* determine channel group */ 4103 if (chan <= 64) 4104 delta -= sc->lna[1]; 4105 else if (chan <= 128) 4106 delta -= sc->lna[2]; 4107 else 4108 delta -= sc->lna[3]; 4109 } else 4110 delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; 4111 4112 return (-12 - delta - rssi); 4113 } 4114 static int 4115 mt7601_bbp_init(struct mtw_softc *sc) 4116 { 4117 uint8_t bbp; 4118 int i, error, ntries; 4119 4120 /* wait for BBP to wake up */ 4121 for (ntries = 0; ntries < 20; ntries++) { 4122 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0) 4123 return (error); 4124 if (bbp != 0 && bbp != 0xff) 4125 break; 4126 } 4127 4128 if (ntries == 20) 4129 return (ETIMEDOUT); 4130 4131 mtw_bbp_read(sc, 3, &bbp); 4132 mtw_bbp_write(sc, 3, 0); 4133 mtw_bbp_read(sc, 105, &bbp); 4134 mtw_bbp_write(sc, 105, 0); 4135 4136 /* initialize BBP registers to default values */ 4137 for (i = 0; i < nitems(mt7601_def_bbp); i++) { 4138 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg, 4139 mt7601_def_bbp[i].val)) != 0) 4140 return (error); 4141 } 4142 4143 sc->sc_bw_calibrated = 0; 4144 4145 return (0); 4146 } 4147 4148 static int 4149 mt7601_rf_init(struct mtw_softc *sc) 4150 { 4151 int i, error; 4152 4153 /* RF bank 0 */ 4154 for (i = 0; i < nitems(mt7601_rf_bank0); i++) { 4155 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg, 4156 mt7601_rf_bank0[i].val); 4157 if (error != 0) 4158 return (error); 4159 } 4160 /* RF bank 4 */ 4161 for (i = 0; i < nitems(mt7601_rf_bank4); i++) { 4162 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg, 4163 mt7601_rf_bank4[i].val); 4164 if (error != 0) 4165 return (error); 4166 } 4167 /* RF bank 5 */ 4168 for (i = 0; i < nitems(mt7601_rf_bank5); i++) { 4169 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg, 4170 mt7601_rf_bank5[i].val); 4171 if (error != 0) 4172 return (error); 4173 } 4174 return (0); 4175 } 4176 4177 static int 4178 mtw_txrx_enable(struct mtw_softc *sc) 4179 { 4180 struct ieee80211com *ic = &sc->sc_ic; 4181 uint32_t tmp; 4182 int error, ntries; 4183 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN); 4184 for (ntries = 0; ntries < 200; ntries++) { 4185 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) { 4186 return (error); 4187 } 4188 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4189 break; 4190 mtw_delay(sc, 50); 4191 } 4192 if (ntries == 200) { 4193 return (ETIMEDOUT); 4194 } 4195 4196 DELAY(50); 4197 4198 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE; 4199 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4200 4201 /* enable Rx bulk aggregation (set timeout and limit) */ 4202 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN | 4203 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2); 4204 mtw_write(sc, MTW_USB_DMA_CFG, tmp); 4205 4206 /* set Rx filter */ 4207 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR; 4208 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 4209 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS | 4210 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR | 4211 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND; 4212 if (ic->ic_opmode == IEEE80211_M_STA) 4213 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL; 4214 } 4215 mtw_write(sc, MTW_RX_FILTR_CFG, tmp); 4216 4217 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN); 4218 return (0); 4219 } 4220 static int 4221 mt7601_rxdc_cal(struct mtw_softc *sc) 4222 { 4223 uint32_t tmp; 4224 uint8_t bbp; 4225 int ntries; 4226 4227 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp); 4228 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN); 4229 mtw_bbp_write(sc, 158, 0x8d); 4230 mtw_bbp_write(sc, 159, 0xfc); 4231 mtw_bbp_write(sc, 158, 0x8c); 4232 mtw_bbp_write(sc, 159, 0x4c); 4233 4234 for (ntries = 0; ntries < 20; ntries++) { 4235 DELAY(300); 4236 mtw_bbp_write(sc, 158, 0x8c); 4237 mtw_bbp_read(sc, 159, &bbp); 4238 if (bbp == 0x0c) 4239 break; 4240 } 4241 4242 if (ntries == 20) 4243 return (ETIMEDOUT); 4244 4245 mtw_write(sc, MTW_MAC_SYS_CTRL, 0); 4246 mtw_bbp_write(sc, 158, 0x8d); 4247 mtw_bbp_write(sc, 159, 0xe0); 4248 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp); 4249 return (0); 4250 } 4251 4252 static int 4253 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val) 4254 { 4255 uint8_t bbp; 4256 4257 mtw_bbp_read(sc, 47, &bbp); 4258 bbp = 0x90; 4259 mtw_bbp_write(sc, 47, bbp); 4260 bbp &= ~0x0f; 4261 bbp |= flag; 4262 mtw_bbp_write(sc, 47, bbp); 4263 return (mtw_bbp_read(sc, 49, val)); 4264 } 4265 4266 static int 4267 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val) 4268 { 4269 uint32_t rfb, rfs; 4270 uint8_t bbp; 4271 int ntries; 4272 4273 mtw_read(sc, MTW_RF_BYPASS0, &rfb); 4274 mtw_read(sc, MTW_RF_SETTING0, &rfs); 4275 mtw_write(sc, MTW_RF_BYPASS0, 0); 4276 mtw_write(sc, MTW_RF_SETTING0, 0x10); 4277 mtw_write(sc, MTW_RF_BYPASS0, 0x10); 4278 4279 mtw_bbp_read(sc, 47, &bbp); 4280 bbp &= ~0x7f; 4281 bbp |= 0x10; 4282 mtw_bbp_write(sc, 47, bbp); 4283 4284 mtw_bbp_write(sc, 22, 0x40); 4285 4286 for (ntries = 0; ntries < 10; ntries++) { 4287 mtw_bbp_read(sc, 47, &bbp); 4288 if ((bbp & 0x10) == 0) 4289 break; 4290 } 4291 if (ntries == 10) 4292 return (ETIMEDOUT); 4293 4294 mt7601_r49_read(sc, MT7601_R47_TEMP, val); 4295 4296 mtw_bbp_write(sc, 22, 0); 4297 4298 mtw_bbp_read(sc, 21, &bbp); 4299 bbp |= 0x02; 4300 mtw_bbp_write(sc, 21, bbp); 4301 bbp &= ~0x02; 4302 mtw_bbp_write(sc, 21, bbp); 4303 4304 mtw_write(sc, MTW_RF_BYPASS0, 0); 4305 mtw_write(sc, MTW_RF_SETTING0, rfs); 4306 mtw_write(sc, MTW_RF_BYPASS0, rfb); 4307 return (0); 4308 } 4309 4310 static int 4311 mt7601_rf_setup(struct mtw_softc *sc) 4312 { 4313 uint32_t tmp; 4314 uint8_t rf; 4315 int error; 4316 4317 if (sc->sc_rf_calibrated) 4318 return (0); 4319 4320 /* init RF registers */ 4321 if ((error = mt7601_rf_init(sc)) != 0) 4322 return (error); 4323 4324 /* init frequency offset */ 4325 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset); 4326 mtw_rf_read(sc, 0, 12, &rf); 4327 4328 /* read temperature */ 4329 mt7601_rf_temperature(sc, &rf); 4330 sc->bbp_temp = rf; 4331 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf); 4332 4333 mtw_rf_read(sc, 0, 7, &rf); 4334 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0) 4335 return (error); 4336 mtw_delay(sc, 100); 4337 mtw_rf_read(sc, 0, 7, &rf); 4338 4339 /* Calibrate VCO RF 0/4 */ 4340 mtw_rf_write(sc, 0, 4, 0x0a); 4341 mtw_rf_write(sc, 0, 4, 0x20); 4342 mtw_rf_read(sc, 0, 4, &rf); 4343 mtw_rf_write(sc, 0, 4, rf | 0x80); 4344 4345 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0) 4346 return (error); 4347 if ((error = mt7601_rxdc_cal(sc)) != 0) 4348 return (error); 4349 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0) 4350 return (error); 4351 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0) 4352 return (error); 4353 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0) 4354 return (error); 4355 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0) 4356 return (error); 4357 4358 mtw_read(sc, MTW_LDO_CFG0, &tmp); 4359 tmp &= ~(1 << 4); 4360 tmp |= (1 << 2); 4361 mtw_write(sc, MTW_LDO_CFG0, tmp); 4362 4363 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0) 4364 return (error); 4365 if ((error = mt7601_rxdc_cal(sc)) != 0) 4366 return (error); 4367 4368 sc->sc_rf_calibrated = 1; 4369 return (0); 4370 } 4371 4372 static void 4373 mtw_set_txrts(struct mtw_softc *sc) 4374 { 4375 uint32_t tmp; 4376 4377 /* set RTS threshold */ 4378 mtw_read(sc, MTW_TX_RTS_CFG, &tmp); 4379 tmp &= ~0xffff00; 4380 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT; 4381 mtw_write(sc, MTW_TX_RTS_CFG, tmp); 4382 } 4383 static int 4384 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val) 4385 { 4386 struct mtw_mcu_cmd_16 cmd; 4387 4388 cmd.r1 = htole32(func); 4389 cmd.r2 = htole32(val); 4390 cmd.r3 = 0; 4391 cmd.r4 = 0; 4392 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16))); 4393 } 4394 static void 4395 mtw_init_locked(struct mtw_softc *sc) 4396 { 4397 4398 struct ieee80211com *ic = &sc->sc_ic; 4399 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4400 uint32_t tmp; 4401 int i, error, ridx, ntries; 4402 if (ic->ic_nrunning > 1) 4403 return; 4404 mtw_stop(sc); 4405 4406 for (i = 0; i != MTW_EP_QUEUES; i++) 4407 mtw_setup_tx_list(sc, &sc->sc_epq[i]); 4408 4409 for (ntries = 0; ntries < 100; ntries++) { 4410 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) 4411 goto fail; 4412 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4413 break; 4414 DELAY(1000); 4415 } 4416 if (ntries == 100) { 4417 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); 4418 error = ETIMEDOUT; 4419 goto fail; 4420 } 4421 tmp &= 0xff0; 4422 tmp |= MTW_TX_WB_DDONE; 4423 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4424 4425 mtw_set_leds(sc, MTW_LED_MODE_ON); 4426 /* reset MAC and baseband */ 4427 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST); 4428 mtw_write(sc, MTW_USB_DMA_CFG, 0); 4429 mtw_write(sc, MTW_MAC_SYS_CTRL, 0); 4430 4431 /* init MAC values */ 4432 if (sc->asic_ver == 0x7601) { 4433 for (i = 0; i < nitems(mt7601_def_mac); i++) 4434 mtw_write(sc, mt7601_def_mac[i].reg, 4435 mt7601_def_mac[i].val); 4436 } 4437 4438 /* wait while MAC is busy */ 4439 for (ntries = 0; ntries < 100; ntries++) { 4440 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0) 4441 goto fail; 4442 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY))) 4443 break; 4444 DELAY(1000); 4445 } 4446 if (ntries == 100) { 4447 error = ETIMEDOUT; 4448 goto fail; 4449 } 4450 4451 /* set MAC address */ 4452 4453 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); 4454 4455 /* clear WCID attribute table */ 4456 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32); 4457 4458 mtw_write(sc, 0x1648, 0x00830083); 4459 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp); 4460 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN; 4461 mtw_write(sc, MTW_FCE_L2_STUFF, tmp); 4462 4463 /* RTS config */ 4464 mtw_set_txrts(sc); 4465 4466 /* clear Host to MCU mailbox */ 4467 mtw_write(sc, MTW_BBP_CSR, 0); 4468 mtw_write(sc, MTW_H2M_MAILBOX, 0); 4469 4470 /* clear RX WCID search table */ 4471 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512); 4472 4473 /* abort TSF synchronization */ 4474 mtw_abort_tsf_sync(sc); 4475 4476 mtw_read(sc, MTW_US_CYC_CNT, &tmp); 4477 tmp = (tmp & ~0xff); 4478 if (sc->asic_ver == 0x7601) 4479 tmp |= 0x1e; 4480 mtw_write(sc, MTW_US_CYC_CNT, tmp); 4481 4482 /* clear shared key table */ 4483 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32); 4484 4485 /* clear IV/EIV table */ 4486 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32); 4487 4488 /* clear shared key mode */ 4489 mtw_write(sc, MTW_SKEY_MODE_0_7, 0); 4490 mtw_write(sc, MTW_SKEY_MODE_8_15, 0); 4491 4492 /* txop truncation */ 4493 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f); 4494 4495 /* init Tx power for all Tx rates */ 4496 for (ridx = 0; ridx < 5; ridx++) { 4497 if (sc->txpow20mhz[ridx] == 0xffffffff) 4498 continue; 4499 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); 4500 } 4501 mtw_write(sc, MTW_TX_PWR_CFG7, 0); 4502 mtw_write(sc, MTW_TX_PWR_CFG9, 0); 4503 4504 mtw_read(sc, MTW_CMB_CTRL, &tmp); 4505 tmp &= ~(1 << 18 | 1 << 14); 4506 mtw_write(sc, MTW_CMB_CTRL, tmp); 4507 4508 /* clear USB DMA */ 4509 mtw_write(sc, MTW_USB_DMA_CFG, 4510 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN | 4511 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP); 4512 mtw_delay(sc, 50); 4513 mtw_read(sc, MTW_USB_DMA_CFG, &tmp); 4514 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP); 4515 mtw_write(sc, MTW_USB_DMA_CFG, tmp); 4516 4517 /* enable radio */ 4518 mtw_mcu_radio(sc, 0x31, 0); 4519 4520 /* init RF registers */ 4521 if (sc->asic_ver == 0x7601) 4522 mt7601_rf_init(sc); 4523 4524 /* init baseband registers */ 4525 if (sc->asic_ver == 0x7601) 4526 error = mt7601_bbp_init(sc); 4527 4528 if (error != 0) { 4529 device_printf(sc->sc_dev, "could not initialize BBP\n"); 4530 goto fail; 4531 } 4532 4533 /* setup and calibrate RF */ 4534 error = mt7601_rf_setup(sc); 4535 4536 if (error != 0) { 4537 device_printf(sc->sc_dev, "could not initialize RF\n"); 4538 goto fail; 4539 } 4540 4541 /* select default channel */ 4542 mtw_set_chan(sc, ic->ic_curchan); 4543 4544 /* setup initial protection mode */ 4545 mtw_updateprot_cb(ic); 4546 4547 sc->sc_flags |= MTW_RUNNING; 4548 sc->cmdq_run = MTW_CMDQ_GO; 4549 for (i = 0; i != MTW_N_XFER; i++) 4550 usbd_xfer_set_stall(sc->sc_xfer[i]); 4551 4552 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]); 4553 4554 error = mtw_txrx_enable(sc); 4555 if (error != 0) { 4556 goto fail; 4557 } 4558 4559 return; 4560 4561 fail: 4562 4563 mtw_stop(sc); 4564 return; 4565 } 4566 4567 static void 4568 mtw_stop(void *arg) 4569 { 4570 struct mtw_softc *sc = (struct mtw_softc *)arg; 4571 uint32_t tmp; 4572 int i, ntries, error; 4573 4574 MTW_LOCK_ASSERT(sc, MA_OWNED); 4575 4576 sc->sc_flags &= ~MTW_RUNNING; 4577 4578 sc->ratectl_run = MTW_RATECTL_OFF; 4579 sc->cmdq_run = sc->cmdq_key_set; 4580 4581 MTW_UNLOCK(sc); 4582 4583 for (i = 0; i < MTW_N_XFER; i++) 4584 usbd_transfer_drain(sc->sc_xfer[i]); 4585 4586 MTW_LOCK(sc); 4587 4588 mtw_drain_mbufq(sc); 4589 4590 if (sc->rx_m != NULL) { 4591 m_free(sc->rx_m); 4592 sc->rx_m = NULL; 4593 } 4594 4595 /* Disable Tx/Rx DMA. */ 4596 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp); 4597 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN); 4598 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp); 4599 // mtw_usb_dma_write(sc, 0); 4600 4601 for (ntries = 0; ntries < 100; ntries++) { 4602 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0) 4603 break; 4604 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0) 4605 break; 4606 DELAY(10); 4607 } 4608 if (ntries == 100) { 4609 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); 4610 } 4611 4612 /* stop MAC Tx/Rx */ 4613 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp); 4614 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN); 4615 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp); 4616 4617 /* disable RTS retry */ 4618 mtw_read(sc, MTW_TX_RTS_CFG, &tmp); 4619 tmp &= ~0xff; 4620 mtw_write(sc, MTW_TX_RTS_CFG, tmp); 4621 4622 /* US_CYC_CFG */ 4623 mtw_read(sc, MTW_US_CYC_CNT, &tmp); 4624 tmp = (tmp & ~0xff); 4625 mtw_write(sc, MTW_US_CYC_CNT, tmp); 4626 4627 /* stop PBF */ 4628 mtw_read(sc, MTW_PBF_CFG, &tmp); 4629 tmp &= ~0x3; 4630 mtw_write(sc, MTW_PBF_CFG, tmp); 4631 4632 /* wait for pending Tx to complete */ 4633 for (ntries = 0; ntries < 100; ntries++) { 4634 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0) 4635 break; 4636 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0) 4637 break; 4638 } 4639 4640 } 4641 4642 static void 4643 mtw_delay(struct mtw_softc *sc, u_int ms) 4644 { 4645 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL, 4646 USB_MS_TO_TICKS(ms)); 4647 } 4648 4649 static void 4650 mtw_update_chw(struct ieee80211com *ic) 4651 { 4652 4653 printf("%s: TODO\n", __func__); 4654 } 4655 4656 static int 4657 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 4658 { 4659 4660 /* For now, no A-MPDU TX support in the driver */ 4661 return (0); 4662 } 4663 4664 static device_method_t mtw_methods[] = { 4665 /* Device interface */ 4666 DEVMETHOD(device_probe, mtw_match), 4667 DEVMETHOD(device_attach, mtw_attach), 4668 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END 4669 }; 4670 4671 static driver_t mtw_driver = { .name = "mtw", 4672 .methods = mtw_methods, 4673 .size = sizeof(struct mtw_softc) }; 4674 4675 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL); 4676 MODULE_DEPEND(mtw, wlan, 1, 1, 1); 4677 MODULE_DEPEND(mtw, usb, 1, 1, 1); 4678 MODULE_DEPEND(mtw, firmware, 1, 1, 1); 4679 MODULE_VERSION(mtw, 1); 4680 USB_PNP_HOST_INFO(mtw_devs); 4681