1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2008 Atheros Communications Inc. 8 * 9 * Permission to use, copy, modify, and/or distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 #include <sys/param.h> 23 #include <sys/types.h> 24 #include <sys/signal.h> 25 #include <sys/stream.h> 26 #include <sys/termio.h> 27 #include <sys/errno.h> 28 #include <sys/file.h> 29 #include <sys/cmn_err.h> 30 #include <sys/stropts.h> 31 #include <sys/strsubr.h> 32 #include <sys/strtty.h> 33 #include <sys/kbio.h> 34 #include <sys/cred.h> 35 #include <sys/stat.h> 36 #include <sys/consdev.h> 37 #include <sys/kmem.h> 38 #include <sys/modctl.h> 39 #include <sys/ddi.h> 40 #include <sys/sunddi.h> 41 #include <sys/pci.h> 42 #include <sys/errno.h> 43 #include <sys/mac_provider.h> 44 #include <sys/dlpi.h> 45 #include <sys/ethernet.h> 46 #include <sys/list.h> 47 #include <sys/byteorder.h> 48 #include <sys/strsun.h> 49 #include <sys/policy.h> 50 #include <inet/common.h> 51 #include <inet/nd.h> 52 #include <inet/mi.h> 53 #include <inet/wifi_ioctl.h> 54 #include <sys/mac_wifi.h> 55 56 #include "arn_ath9k.h" 57 #include "arn_core.h" 58 #include "arn_reg.h" 59 #include "arn_hw.h" 60 61 #define ARN_MAX_RSSI 63 /* max rssi */ 62 63 /* 64 * PIO access attributes for registers 65 */ 66 static ddi_device_acc_attr_t arn_reg_accattr = { 67 DDI_DEVICE_ATTR_V0, 68 DDI_STRUCTURE_LE_ACC, 69 DDI_STRICTORDER_ACC 70 }; 71 72 /* 73 * DMA access attributes for descriptors: NOT to be byte swapped. 74 */ 75 static ddi_device_acc_attr_t arn_desc_accattr = { 76 DDI_DEVICE_ATTR_V0, 77 DDI_STRUCTURE_LE_ACC, 78 DDI_STRICTORDER_ACC 79 }; 80 81 /* 82 * Describes the chip's DMA engine 83 */ 84 static ddi_dma_attr_t arn_dma_attr = { 85 DMA_ATTR_V0, /* version number */ 86 0, /* low address */ 87 0xffffffffU, /* high address */ 88 0x3ffffU, /* counter register max */ 89 1, /* alignment */ 90 0xFFF, /* burst sizes */ 91 1, /* minimum transfer size */ 92 0x3ffffU, /* max transfer size */ 93 0xffffffffU, /* address register max */ 94 1, /* no scatter-gather */ 95 1, /* granularity of device */ 96 0, /* DMA flags */ 97 }; 98 99 static ddi_dma_attr_t arn_desc_dma_attr = { 100 DMA_ATTR_V0, /* version number */ 101 0, /* low address */ 102 0xffffffffU, /* high address */ 103 0xffffffffU, /* counter register max */ 104 0x1000, /* alignment */ 105 0xFFF, /* burst sizes */ 106 1, /* minimum transfer size */ 107 0xffffffffU, /* max transfer size */ 108 0xffffffffU, /* address register max */ 109 1, /* no scatter-gather */ 110 1, /* granularity of device */ 111 0, /* DMA flags */ 112 }; 113 114 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */ 115 116 static kmutex_t arn_loglock; 117 static void *arn_soft_state_p = NULL; 118 /* scan interval, ms? */ 119 static int arn_dwelltime = 200; /* 150 */ 120 121 static int arn_m_stat(void *, uint_t, uint64_t *); 122 static int arn_m_start(void *); 123 static void arn_m_stop(void *); 124 static int arn_m_promisc(void *, boolean_t); 125 static int arn_m_multicst(void *, boolean_t, const uint8_t *); 126 static int arn_m_unicst(void *, const uint8_t *); 127 static mblk_t *arn_m_tx(void *, mblk_t *); 128 static void arn_m_ioctl(void *, queue_t *, mblk_t *); 129 static int arn_m_setprop(void *, const char *, mac_prop_id_t, 130 uint_t, const void *); 131 static int arn_m_getprop(void *, const char *, mac_prop_id_t, 132 uint_t, uint_t, void *, uint_t *); 133 134 /* MAC Callcack Functions */ 135 static mac_callbacks_t arn_m_callbacks = { 136 MC_IOCTL | MC_SETPROP | MC_GETPROP, 137 arn_m_stat, 138 arn_m_start, 139 arn_m_stop, 140 arn_m_promisc, 141 arn_m_multicst, 142 arn_m_unicst, 143 arn_m_tx, 144 arn_m_ioctl, 145 NULL, 146 NULL, 147 NULL, 148 arn_m_setprop, 149 arn_m_getprop 150 }; 151 152 /* 153 * ARN_DBG_HW 154 * ARN_DBG_REG_IO 155 * ARN_DBG_QUEUE 156 * ARN_DBG_EEPROM 157 * ARN_DBG_XMIT 158 * ARN_DBG_RECV 159 * ARN_DBG_CALIBRATE 160 * ARN_DBG_CHANNEL 161 * ARN_DBG_INTERRUPT 162 * ARN_DBG_REGULATORY 163 * ARN_DBG_ANI 164 * ARN_DBG_POWER_MGMT 165 * ARN_DBG_KEYCACHE 166 * ARN_DBG_BEACON 167 * ARN_DBG_RATE 168 * ARN_DBG_INIT 169 * ARN_DBG_ATTACH 170 * ARN_DBG_DEATCH 171 * ARN_DBG_AGGR 172 * ARN_DBG_RESET 173 * ARN_DBG_FATAL 174 * ARN_DBG_ANY 175 * ARN_DBG_ALL 176 */ 177 uint32_t arn_dbg_mask = 0; 178 179 /* 180 * Exception/warning cases not leading to panic. 181 */ 182 void 183 arn_problem(const int8_t *fmt, ...) 184 { 185 va_list args; 186 187 mutex_enter(&arn_loglock); 188 189 va_start(args, fmt); 190 vcmn_err(CE_WARN, fmt, args); 191 va_end(args); 192 193 mutex_exit(&arn_loglock); 194 } 195 196 /* 197 * Normal log information independent of debug. 198 */ 199 void 200 arn_log(const int8_t *fmt, ...) 201 { 202 va_list args; 203 204 mutex_enter(&arn_loglock); 205 206 va_start(args, fmt); 207 vcmn_err(CE_CONT, fmt, args); 208 va_end(args); 209 210 mutex_exit(&arn_loglock); 211 } 212 213 void 214 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...) 215 { 216 va_list args; 217 218 if (dbg_flags & arn_dbg_mask) { 219 mutex_enter(&arn_loglock); 220 va_start(args, fmt); 221 vcmn_err(CE_CONT, fmt, args); 222 va_end(args); 223 mutex_exit(&arn_loglock); 224 } 225 } 226 227 /* 228 * Read and write, they both share the same lock. We do this to serialize 229 * reads and writes on Atheros 802.11n PCI devices only. This is required 230 * as the FIFO on these devices can only accept sanely 2 requests. After 231 * that the device goes bananas. Serializing the reads/writes prevents this 232 * from happening. 233 */ 234 void 235 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val) 236 { 237 struct arn_softc *sc = ah->ah_sc; 238 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) { 239 mutex_enter(&sc->sc_serial_rw); 240 ddi_put32(sc->sc_io_handle, 241 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val); 242 mutex_exit(&sc->sc_serial_rw); 243 } else { 244 ddi_put32(sc->sc_io_handle, 245 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val); 246 } 247 } 248 249 unsigned int 250 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset) 251 { 252 uint32_t val; 253 struct arn_softc *sc = ah->ah_sc; 254 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) { 255 mutex_enter(&sc->sc_serial_rw); 256 val = ddi_get32(sc->sc_io_handle, 257 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset))); 258 mutex_exit(&sc->sc_serial_rw); 259 } else { 260 val = ddi_get32(sc->sc_io_handle, 261 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset))); 262 } 263 264 return (val); 265 } 266 267 void 268 arn_rx_buf_link(struct arn_softc *sc, struct ath_buf *bf) 269 { 270 struct ath_desc *ds; 271 272 ds = bf->bf_desc; 273 ds->ds_link = bf->bf_daddr; 274 ds->ds_data = bf->bf_dma.cookie.dmac_address; 275 /* virtual addr of the beginning of the buffer. */ 276 ds->ds_vdata = bf->bf_dma.mem_va; 277 278 /* 279 * setup rx descriptors. The bf_dma.alength here tells the H/W 280 * how much data it can DMA to us and that we are prepared 281 * to process 282 */ 283 (void) ath9k_hw_setuprxdesc(sc->sc_ah, ds, 284 bf->bf_dma.alength, /* buffer size */ 285 0); 286 287 if (sc->sc_rxlink != NULL) 288 *sc->sc_rxlink = bf->bf_daddr; 289 sc->sc_rxlink = &ds->ds_link; 290 } 291 292 /* 293 * Allocate an area of memory and a DMA handle for accessing it 294 */ 295 static int 296 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize, 297 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags, 298 uint_t bind_flags, dma_area_t *dma_p) 299 { 300 int err; 301 302 /* 303 * Allocate handle 304 */ 305 err = ddi_dma_alloc_handle(devinfo, dma_attr, 306 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 307 if (err != DDI_SUCCESS) 308 return (DDI_FAILURE); 309 310 /* 311 * Allocate memory 312 */ 313 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 314 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va, 315 &dma_p->alength, &dma_p->acc_hdl); 316 if (err != DDI_SUCCESS) 317 return (DDI_FAILURE); 318 319 /* 320 * Bind the two together 321 */ 322 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 323 dma_p->mem_va, dma_p->alength, bind_flags, 324 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies); 325 if (err != DDI_DMA_MAPPED) 326 return (DDI_FAILURE); 327 328 dma_p->nslots = ~0U; 329 dma_p->size = ~0U; 330 dma_p->token = ~0U; 331 dma_p->offset = 0; 332 return (DDI_SUCCESS); 333 } 334 335 /* 336 * Free one allocated area of DMAable memory 337 */ 338 static void 339 arn_free_dma_mem(dma_area_t *dma_p) 340 { 341 if (dma_p->dma_hdl != NULL) { 342 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 343 if (dma_p->acc_hdl != NULL) { 344 ddi_dma_mem_free(&dma_p->acc_hdl); 345 dma_p->acc_hdl = NULL; 346 } 347 ddi_dma_free_handle(&dma_p->dma_hdl); 348 dma_p->ncookies = 0; 349 dma_p->dma_hdl = NULL; 350 } 351 } 352 353 /* 354 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for 355 * each buffer. 356 */ 357 static int 358 arn_buflist_setup(dev_info_t *devinfo, struct arn_softc *sc, list_t *bflist, 359 struct ath_buf **pbf, struct ath_desc **pds, int nbuf, uint_t dmabflags) 360 { 361 int i, err; 362 struct ath_buf *bf = *pbf; 363 struct ath_desc *ds = *pds; 364 365 list_create(bflist, sizeof (struct ath_buf), 366 offsetof(struct ath_buf, bf_node)); 367 for (i = 0; i < nbuf; i++, bf++, ds++) { 368 bf->bf_desc = ds; 369 bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address + 370 ((uintptr_t)ds - (uintptr_t)sc->sc_desc); 371 list_insert_tail(bflist, bf); 372 373 /* alloc DMA memory */ 374 err = arn_alloc_dma_mem(devinfo, &arn_dma_attr, 375 sc->sc_dmabuf_size, &arn_desc_accattr, DDI_DMA_STREAMING, 376 dmabflags, &bf->bf_dma); 377 if (err != DDI_SUCCESS) 378 return (err); 379 } 380 *pbf = bf; 381 *pds = ds; 382 383 return (DDI_SUCCESS); 384 } 385 386 /* 387 * Destroy tx, rx or beacon buffer list. Free DMA memory. 388 */ 389 static void 390 arn_buflist_cleanup(list_t *buflist) 391 { 392 struct ath_buf *bf; 393 394 if (!buflist) 395 return; 396 397 bf = list_head(buflist); 398 while (bf != NULL) { 399 if (bf->bf_m != NULL) { 400 freemsg(bf->bf_m); 401 bf->bf_m = NULL; 402 } 403 /* Free DMA buffer */ 404 arn_free_dma_mem(&bf->bf_dma); 405 if (bf->bf_in != NULL) { 406 ieee80211_free_node(bf->bf_in); 407 bf->bf_in = NULL; 408 } 409 list_remove(buflist, bf); 410 bf = list_head(buflist); 411 } 412 list_destroy(buflist); 413 } 414 415 static void 416 arn_desc_free(struct arn_softc *sc) 417 { 418 arn_buflist_cleanup(&sc->sc_txbuf_list); 419 arn_buflist_cleanup(&sc->sc_rxbuf_list); 420 #ifdef ARN_IBSS 421 arn_buflist_cleanup(&sc->sc_bcbuf_list); 422 #endif 423 424 /* Free descriptor DMA buffer */ 425 arn_free_dma_mem(&sc->sc_desc_dma); 426 427 kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen); 428 sc->sc_vbufptr = NULL; 429 } 430 431 static int 432 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc) 433 { 434 int err; 435 size_t size; 436 struct ath_desc *ds; 437 struct ath_buf *bf; 438 439 #ifdef ARN_IBSS 440 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF); 441 #else 442 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF); 443 #endif 444 445 err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size, 446 &arn_desc_accattr, DDI_DMA_CONSISTENT, 447 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma); 448 449 /* virtual address of the first descriptor */ 450 sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va; 451 452 ds = sc->sc_desc; 453 ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: " 454 "%p (%d) -> %p\n", 455 sc->sc_desc, sc->sc_desc_dma.alength, 456 sc->sc_desc_dma.cookie.dmac_address)); 457 458 /* allocate data structures to describe TX/RX DMA buffers */ 459 #ifdef ARN_IBSS 460 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 461 ATH_BCBUF); 462 #else 463 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF); 464 #endif 465 bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP); 466 sc->sc_vbufptr = bf; 467 468 /* DMA buffer size for each TX/RX packet */ 469 sc->sc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) + 470 IEEE80211_MTU + IEEE80211_CRC_LEN + 471 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 472 IEEE80211_WEP_CRCLEN), sc->sc_cachelsz); 473 474 /* create RX buffer list */ 475 err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds, 476 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING); 477 if (err != DDI_SUCCESS) { 478 arn_desc_free(sc); 479 return (err); 480 } 481 482 /* create TX buffer list */ 483 err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds, 484 ATH_TXBUF, DDI_DMA_STREAMING); 485 if (err != DDI_SUCCESS) { 486 arn_desc_free(sc); 487 return (err); 488 } 489 490 /* create beacon buffer list */ 491 #ifdef ARN_IBSS 492 err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds, 493 ATH_BCBUF, DDI_DMA_STREAMING); 494 if (err != DDI_SUCCESS) { 495 arn_desc_free(sc); 496 return (err); 497 } 498 #endif 499 500 return (DDI_SUCCESS); 501 } 502 503 static struct ath_rate_table * 504 /* LINTED E_STATIC_UNUSED */ 505 arn_get_ratetable(struct arn_softc *sc, uint32_t mode) 506 { 507 struct ath_rate_table *rate_table = NULL; 508 509 switch (mode) { 510 case IEEE80211_MODE_11A: 511 rate_table = sc->hw_rate_table[ATH9K_MODE_11A]; 512 break; 513 case IEEE80211_MODE_11B: 514 rate_table = sc->hw_rate_table[ATH9K_MODE_11B]; 515 break; 516 case IEEE80211_MODE_11G: 517 rate_table = sc->hw_rate_table[ATH9K_MODE_11G]; 518 break; 519 #ifdef ARB_11N 520 case IEEE80211_MODE_11NA_HT20: 521 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20]; 522 break; 523 case IEEE80211_MODE_11NG_HT20: 524 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20]; 525 break; 526 case IEEE80211_MODE_11NA_HT40PLUS: 527 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS]; 528 break; 529 case IEEE80211_MODE_11NA_HT40MINUS: 530 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS]; 531 break; 532 case IEEE80211_MODE_11NG_HT40PLUS: 533 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS]; 534 break; 535 case IEEE80211_MODE_11NG_HT40MINUS: 536 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS]; 537 break; 538 #endif 539 default: 540 ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): " 541 "invalid mode %u\n", mode)); 542 return (NULL); 543 } 544 545 return (rate_table); 546 547 } 548 549 static void 550 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode) 551 { 552 struct ath_rate_table *rt; 553 int i; 554 555 for (i = 0; i < sizeof (sc->asc_rixmap); i++) 556 sc->asc_rixmap[i] = 0xff; 557 558 rt = sc->hw_rate_table[mode]; 559 ASSERT(rt != NULL); 560 561 for (i = 0; i < rt->rate_cnt; i++) 562 sc->asc_rixmap[rt->info[i].dot11rate & 563 IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */ 564 565 sc->sc_currates = rt; 566 sc->sc_curmode = mode; 567 568 /* 569 * All protection frames are transmited at 2Mb/s for 570 * 11g, otherwise at 1Mb/s. 571 * XXX select protection rate index from rate table. 572 */ 573 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0); 574 } 575 576 static enum wireless_mode 577 arn_chan2mode(struct ath9k_channel *chan) 578 { 579 if (chan->chanmode == CHANNEL_A) 580 return (ATH9K_MODE_11A); 581 else if (chan->chanmode == CHANNEL_G) 582 return (ATH9K_MODE_11G); 583 else if (chan->chanmode == CHANNEL_B) 584 return (ATH9K_MODE_11B); 585 else if (chan->chanmode == CHANNEL_A_HT20) 586 return (ATH9K_MODE_11NA_HT20); 587 else if (chan->chanmode == CHANNEL_G_HT20) 588 return (ATH9K_MODE_11NG_HT20); 589 else if (chan->chanmode == CHANNEL_A_HT40PLUS) 590 return (ATH9K_MODE_11NA_HT40PLUS); 591 else if (chan->chanmode == CHANNEL_A_HT40MINUS) 592 return (ATH9K_MODE_11NA_HT40MINUS); 593 else if (chan->chanmode == CHANNEL_G_HT40PLUS) 594 return (ATH9K_MODE_11NG_HT40PLUS); 595 else if (chan->chanmode == CHANNEL_G_HT40MINUS) 596 return (ATH9K_MODE_11NG_HT40MINUS); 597 598 return (ATH9K_MODE_11B); 599 } 600 601 static void 602 arn_update_txpow(struct arn_softc *sc) 603 { 604 struct ath_hal *ah = sc->sc_ah; 605 uint32_t txpow; 606 607 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) { 608 (void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit); 609 /* read back in case value is clamped */ 610 (void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); 611 sc->sc_curtxpow = (uint32_t)txpow; 612 } 613 } 614 615 static void 616 arn_setup_rates(struct arn_softc *sc, uint32_t mode) 617 { 618 int i, maxrates; 619 struct ath_rate_table *rate_table = NULL; 620 struct ieee80211_rateset *rateset; 621 ieee80211com_t *ic = (ieee80211com_t *)sc; 622 623 /* rate_table = arn_get_ratetable(sc, mode); */ 624 switch (mode) { 625 case IEEE80211_MODE_11A: 626 rate_table = sc->hw_rate_table[ATH9K_MODE_11A]; 627 break; 628 case IEEE80211_MODE_11B: 629 rate_table = sc->hw_rate_table[ATH9K_MODE_11B]; 630 break; 631 case IEEE80211_MODE_11G: 632 rate_table = sc->hw_rate_table[ATH9K_MODE_11G]; 633 break; 634 #ifdef ARN_11N 635 case IEEE80211_MODE_11NA_HT20: 636 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20]; 637 break; 638 case IEEE80211_MODE_11NG_HT20: 639 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20]; 640 break; 641 case IEEE80211_MODE_11NA_HT40PLUS: 642 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS]; 643 break; 644 case IEEE80211_MODE_11NA_HT40MINUS: 645 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS]; 646 break; 647 case IEEE80211_MODE_11NG_HT40PLUS: 648 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS]; 649 break; 650 case IEEE80211_MODE_11NG_HT40MINUS: 651 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS]; 652 break; 653 #endif 654 default: 655 ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): " 656 "invalid mode %u\n", mode)); 657 break; 658 } 659 if (rate_table == NULL) 660 return; 661 if (rate_table->rate_cnt > ATH_RATE_MAX) { 662 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): " 663 "rate table too small (%u > %u)\n", 664 rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE)); 665 maxrates = ATH_RATE_MAX; 666 } else 667 maxrates = rate_table->rate_cnt; 668 669 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): " 670 "maxrates is %d\n", maxrates)); 671 672 rateset = &ic->ic_sup_rates[mode]; 673 for (i = 0; i < maxrates; i++) { 674 rateset->ir_rates[i] = rate_table->info[i].dot11rate; 675 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): " 676 "%d\n", rate_table->info[i].dot11rate)); 677 } 678 rateset->ir_nrates = (uint8_t)maxrates; /* ??? */ 679 } 680 681 static int 682 arn_setup_channels(struct arn_softc *sc) 683 { 684 struct ath_hal *ah = sc->sc_ah; 685 ieee80211com_t *ic = (ieee80211com_t *)sc; 686 int nchan, i, index; 687 uint8_t regclassids[ATH_REGCLASSIDS_MAX]; 688 uint32_t nregclass = 0; 689 struct ath9k_channel *c; 690 691 /* Fill in ah->ah_channels */ 692 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan, 693 regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT, 694 B_FALSE, 1)) { 695 uint32_t rd = ah->ah_currentRD; 696 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): " 697 "unable to collect channel list; " 698 "regdomain likely %u country code %u\n", 699 rd, CTRY_DEFAULT)); 700 return (EINVAL); 701 } 702 703 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): " 704 "number of channel is %d\n", nchan)); 705 706 for (i = 0; i < nchan; i++) { 707 c = &ah->ah_channels[i]; 708 uint16_t flags; 709 index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags); 710 711 if (index > IEEE80211_CHAN_MAX) { 712 ARN_DBG((ARN_DBG_CHANNEL, 713 "arn: arn_setup_channels(): " 714 "bad hal channel %d (%u/%x) ignored\n", 715 index, c->channel, c->channelFlags)); 716 continue; 717 } 718 /* NB: flags are known to be compatible */ 719 if (index < 0) { 720 /* 721 * can't handle frequency <2400MHz (negative 722 * channels) right now 723 */ 724 ARN_DBG((ARN_DBG_CHANNEL, 725 "arn: arn_setup_channels(): " 726 "hal channel %d (%u/%x) " 727 "cannot be handled, ignored\n", 728 index, c->channel, c->channelFlags)); 729 continue; 730 } 731 732 /* 733 * Calculate net80211 flags; most are compatible 734 * but some need massaging. Note the static turbo 735 * conversion can be removed once net80211 is updated 736 * to understand static vs. dynamic turbo. 737 */ 738 739 flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE); 740 741 if (ic->ic_sup_channels[index].ich_freq == 0) { 742 ic->ic_sup_channels[index].ich_freq = c->channel; 743 ic->ic_sup_channels[index].ich_flags = flags; 744 } else { 745 /* channels overlap; e.g. 11g and 11b */ 746 ic->ic_sup_channels[index].ich_flags |= flags; 747 } 748 if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) { 749 sc->sc_have11g = 1; 750 ic->ic_caps |= IEEE80211_C_SHPREAMBLE | 751 IEEE80211_C_SHSLOT; /* short slot time */ 752 } 753 } 754 755 return (0); 756 } 757 758 uint32_t 759 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan) 760 { 761 static const uint32_t modeflags[] = { 762 0, /* IEEE80211_MODE_AUTO */ 763 CHANNEL_A, /* IEEE80211_MODE_11A */ 764 CHANNEL_B, /* IEEE80211_MODE_11B */ 765 CHANNEL_G, /* IEEE80211_MODE_11G */ 766 0, /* */ 767 0, /* */ 768 0 /* */ 769 }; 770 return (modeflags[ieee80211_chan2mode(isc, chan)]); 771 } 772 773 /* 774 * Update internal state after a channel change. 775 */ 776 void 777 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan) 778 { 779 struct ieee80211com *ic = &sc->sc_isc; 780 enum ieee80211_phymode mode; 781 enum wireless_mode wlmode; 782 783 /* 784 * Change channels and update the h/w rate map 785 * if we're switching; e.g. 11a to 11b/g. 786 */ 787 mode = ieee80211_chan2mode(ic, chan); 788 switch (mode) { 789 case IEEE80211_MODE_11A: 790 wlmode = ATH9K_MODE_11A; 791 break; 792 case IEEE80211_MODE_11B: 793 wlmode = ATH9K_MODE_11B; 794 break; 795 case IEEE80211_MODE_11G: 796 wlmode = ATH9K_MODE_11B; 797 break; 798 default: 799 break; 800 } 801 if (wlmode != sc->sc_curmode) 802 arn_setcurmode(sc, wlmode); 803 804 } 805 806 /* 807 * Set/change channels. If the channel is really being changed, it's done 808 * by reseting the chip. To accomplish this we must first cleanup any pending 809 * DMA, then restart stuff. 810 */ 811 static int 812 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan) 813 { 814 struct ath_hal *ah = sc->sc_ah; 815 ieee80211com_t *ic = &sc->sc_isc; 816 boolean_t fastcc = B_TRUE; 817 boolean_t stopped; 818 struct ieee80211_channel chan; 819 enum wireless_mode curmode; 820 821 if (sc->sc_flags & SC_OP_INVALID) 822 return (EIO); 823 824 if (hchan->channel != sc->sc_ah->ah_curchan->channel || 825 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags || 826 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) || 827 (sc->sc_flags & SC_OP_FULL_RESET)) { 828 int status; 829 830 /* 831 * This is only performed if the channel settings have 832 * actually changed. 833 * 834 * To switch channels clear any pending DMA operations; 835 * wait long enough for the RX fifo to drain, reset the 836 * hardware at the new frequency, and then re-enable 837 * the relevant bits of the h/w. 838 */ 839 (void) ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ 840 arn_draintxq(sc, B_FALSE); /* clear pending tx frames */ 841 stopped = arn_stoprecv(sc); /* turn off frame recv */ 842 843 /* 844 * XXX: do not flush receive queue here. We don't want 845 * to flush data frames already in queue because of 846 * changing channel. 847 */ 848 849 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 850 fastcc = B_FALSE; 851 852 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): " 853 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n", 854 sc->sc_ah->ah_curchan->channel, 855 hchan->channel, hchan->channelFlags, sc->tx_chan_width)); 856 857 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width, 858 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 859 sc->sc_ht_extprotspacing, fastcc, &status)) { 860 ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): " 861 "unable to reset channel %u (%uMhz) " 862 "flags 0x%x hal status %u\n", 863 ath9k_hw_mhz2ieee(ah, hchan->channel, 864 hchan->channelFlags), 865 hchan->channel, hchan->channelFlags, status)); 866 return (EIO); 867 } 868 869 sc->sc_curchan = *hchan; 870 871 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE; 872 sc->sc_flags &= ~SC_OP_FULL_RESET; 873 874 if (arn_startrecv(sc) != 0) { 875 arn_problem("arn: arn_set_channel(): " 876 "unable to restart recv logic\n"); 877 return (EIO); 878 } 879 880 chan.ich_freq = hchan->channel; 881 chan.ich_flags = hchan->channelFlags; 882 ic->ic_ibss_chan = &chan; 883 884 /* 885 * Change channels and update the h/w rate map 886 * if we're switching; e.g. 11a to 11b/g. 887 */ 888 curmode = arn_chan2mode(hchan); 889 if (curmode != sc->sc_curmode) 890 arn_setcurmode(sc, arn_chan2mode(hchan)); 891 892 arn_update_txpow(sc); 893 894 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask); 895 } 896 897 return (0); 898 } 899 900 /* 901 * This routine performs the periodic noise floor calibration function 902 * that is used to adjust and optimize the chip performance. This 903 * takes environmental changes (location, temperature) into account. 904 * When the task is complete, it reschedules itself depending on the 905 * appropriate interval that was calculated. 906 */ 907 static void 908 arn_ani_calibrate(void *arg) 909 910 { 911 ieee80211com_t *ic = (ieee80211com_t *)arg; 912 struct arn_softc *sc = (struct arn_softc *)ic; 913 struct ath_hal *ah = sc->sc_ah; 914 boolean_t longcal = B_FALSE; 915 boolean_t shortcal = B_FALSE; 916 boolean_t aniflag = B_FALSE; 917 unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000; 918 uint32_t cal_interval; 919 920 /* 921 * don't calibrate when we're scanning. 922 * we are most likely not on our home channel. 923 */ 924 if (ic->ic_state != IEEE80211_S_RUN) 925 goto settimer; 926 927 /* Long calibration runs independently of short calibration. */ 928 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) { 929 longcal = B_TRUE; 930 ARN_DBG((ARN_DBG_CALIBRATE, "arn: " 931 "%s: longcal @%lu\n", __func__, drv_hztousec)); 932 sc->sc_ani.sc_longcal_timer = timestamp; 933 } 934 935 /* Short calibration applies only while sc_caldone is FALSE */ 936 if (!sc->sc_ani.sc_caldone) { 937 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >= 938 ATH_SHORT_CALINTERVAL) { 939 shortcal = B_TRUE; 940 ARN_DBG((ARN_DBG_CALIBRATE, "arn: " 941 "%s: shortcal @%lu\n", 942 __func__, drv_hztousec)); 943 sc->sc_ani.sc_shortcal_timer = timestamp; 944 sc->sc_ani.sc_resetcal_timer = timestamp; 945 } 946 } else { 947 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >= 948 ATH_RESTART_CALINTERVAL) { 949 ath9k_hw_reset_calvalid(ah, ah->ah_curchan, 950 &sc->sc_ani.sc_caldone); 951 if (sc->sc_ani.sc_caldone) 952 sc->sc_ani.sc_resetcal_timer = timestamp; 953 } 954 } 955 956 /* Verify whether we must check ANI */ 957 if ((timestamp - sc->sc_ani.sc_checkani_timer) >= 958 ATH_ANI_POLLINTERVAL) { 959 aniflag = B_TRUE; 960 sc->sc_ani.sc_checkani_timer = timestamp; 961 } 962 963 /* Skip all processing if there's nothing to do. */ 964 if (longcal || shortcal || aniflag) { 965 /* Call ANI routine if necessary */ 966 if (aniflag) 967 ath9k_hw_ani_monitor(ah, &sc->sc_halstats, 968 ah->ah_curchan); 969 970 /* Perform calibration if necessary */ 971 if (longcal || shortcal) { 972 boolean_t iscaldone = B_FALSE; 973 974 if (ath9k_hw_calibrate(ah, ah->ah_curchan, 975 sc->sc_rx_chainmask, longcal, &iscaldone)) { 976 if (longcal) 977 sc->sc_ani.sc_noise_floor = 978 ath9k_hw_getchan_noise(ah, 979 ah->ah_curchan); 980 981 ARN_DBG((ARN_DBG_CALIBRATE, "arn: " 982 "%s: calibrate chan %u/%x nf: %d\n", 983 __func__, 984 ah->ah_curchan->channel, 985 ah->ah_curchan->channelFlags, 986 sc->sc_ani.sc_noise_floor)); 987 } else { 988 ARN_DBG((ARN_DBG_CALIBRATE, "arn: " 989 "%s: calibrate chan %u/%x failed\n", 990 __func__, 991 ah->ah_curchan->channel, 992 ah->ah_curchan->channelFlags)); 993 } 994 sc->sc_ani.sc_caldone = iscaldone; 995 } 996 } 997 998 settimer: 999 /* 1000 * Set timer interval based on previous results. 1001 * The interval must be the shortest necessary to satisfy ANI, 1002 * short calibration and long calibration. 1003 */ 1004 cal_interval = ATH_LONG_CALINTERVAL; 1005 if (sc->sc_ah->ah_config.enable_ani) 1006 cal_interval = 1007 min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL); 1008 1009 if (!sc->sc_ani.sc_caldone) 1010 cal_interval = min(cal_interval, 1011 (uint32_t)ATH_SHORT_CALINTERVAL); 1012 1013 sc->sc_scan_timer = 0; 1014 sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc, 1015 drv_usectohz(cal_interval * 1000)); 1016 } 1017 1018 static void 1019 arn_stop_caltimer(struct arn_softc *sc) 1020 { 1021 timeout_id_t tmp_id = 0; 1022 1023 while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) { 1024 tmp_id = sc->sc_cal_timer; 1025 (void) untimeout(tmp_id); 1026 } 1027 sc->sc_cal_timer = 0; 1028 } 1029 1030 static uint_t 1031 arn_isr(caddr_t arg) 1032 { 1033 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1034 struct arn_softc *sc = (struct arn_softc *)arg; 1035 struct ath_hal *ah = sc->sc_ah; 1036 enum ath9k_int status; 1037 ieee80211com_t *ic = (ieee80211com_t *)sc; 1038 1039 ARN_LOCK(sc); 1040 1041 if (sc->sc_flags & SC_OP_INVALID) { 1042 /* 1043 * The hardware is not ready/present, don't 1044 * touch anything. Note this can happen early 1045 * on if the IRQ is shared. 1046 */ 1047 ARN_UNLOCK(sc); 1048 return (DDI_INTR_UNCLAIMED); 1049 } 1050 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */ 1051 ARN_UNLOCK(sc); 1052 return (DDI_INTR_UNCLAIMED); 1053 } 1054 1055 /* 1056 * Figure out the reason(s) for the interrupt. Note 1057 * that the hal returns a pseudo-ISR that may include 1058 * bits we haven't explicitly enabled so we mask the 1059 * value to insure we only process bits we requested. 1060 */ 1061 (void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ 1062 1063 status &= sc->sc_imask; /* discard unasked-for bits */ 1064 1065 /* 1066 * If there are no status bits set, then this interrupt was not 1067 * for me (should have been caught above). 1068 */ 1069 if (!status) { 1070 ARN_UNLOCK(sc); 1071 return (DDI_INTR_UNCLAIMED); 1072 } 1073 1074 sc->sc_intrstatus = status; 1075 1076 if (status & ATH9K_INT_FATAL) { 1077 /* need a chip reset */ 1078 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1079 "ATH9K_INT_FATAL\n")); 1080 goto reset; 1081 } else if (status & ATH9K_INT_RXORN) { 1082 /* need a chip reset */ 1083 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1084 "ATH9K_INT_RXORN\n")); 1085 goto reset; 1086 } else { 1087 if (status & ATH9K_INT_RXEOL) { 1088 /* 1089 * NB: the hardware should re-read the link when 1090 * RXE bit is written, but it doesn't work 1091 * at least on older hardware revs. 1092 */ 1093 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1094 "ATH9K_INT_RXEOL\n")); 1095 sc->sc_rxlink = NULL; 1096 } 1097 if (status & ATH9K_INT_TXURN) { 1098 /* bump tx trigger level */ 1099 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1100 "ATH9K_INT_TXURN\n")); 1101 (void) ath9k_hw_updatetxtriglevel(ah, B_TRUE); 1102 } 1103 /* XXX: optimize this */ 1104 if (status & ATH9K_INT_RX) { 1105 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1106 "ATH9K_INT_RX\n")); 1107 sc->sc_rx_pend = 1; 1108 ddi_trigger_softintr(sc->sc_softint_id); 1109 } 1110 if (status & ATH9K_INT_TX) { 1111 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1112 "ATH9K_INT_TX\n")); 1113 if (ddi_taskq_dispatch(sc->sc_tq, 1114 arn_tx_int_proc, sc, DDI_NOSLEEP) != 1115 DDI_SUCCESS) { 1116 arn_problem("arn: arn_isr(): " 1117 "No memory for tx taskq\n"); 1118 } 1119 } 1120 #ifdef ARN_ATH9K_INT_MIB 1121 if (status & ATH9K_INT_MIB) { 1122 /* 1123 * Disable interrupts until we service the MIB 1124 * interrupt; otherwise it will continue to 1125 * fire. 1126 */ 1127 (void) ath9k_hw_set_interrupts(ah, 0); 1128 /* 1129 * Let the hal handle the event. We assume 1130 * it will clear whatever condition caused 1131 * the interrupt. 1132 */ 1133 ath9k_hw_procmibevent(ah, &sc->sc_halstats); 1134 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask); 1135 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1136 "ATH9K_INT_MIB\n")); 1137 } 1138 #endif 1139 1140 #ifdef ARN_ATH9K_INT_TIM_TIMER 1141 if (status & ATH9K_INT_TIM_TIMER) { 1142 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1143 "ATH9K_INT_TIM_TIMER\n")); 1144 if (!(ah->ah_caps.hw_caps & 1145 ATH9K_HW_CAP_AUTOSLEEP)) { 1146 /* 1147 * Clear RxAbort bit so that we can 1148 * receive frames 1149 */ 1150 ath9k_hw_setrxabort(ah, 0); 1151 goto reset; 1152 } 1153 } 1154 #endif 1155 1156 if (status & ATH9K_INT_BMISS) { 1157 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1158 "ATH9K_INT_BMISS\n")); 1159 1160 if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc, 1161 sc, DDI_NOSLEEP) != DDI_SUCCESS) { 1162 arn_problem("arn: arn_isr(): " 1163 "No memory available for bmiss taskq\n"); 1164 } 1165 } 1166 1167 ARN_UNLOCK(sc); 1168 1169 #ifdef ARN_ATH9K_INT_CST 1170 /* carrier sense timeout */ 1171 if (status & ATH9K_INT_CST) { 1172 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1173 "ATH9K_INT_CST\n")); 1174 return (DDI_INTR_CLAIMED); 1175 } 1176 #endif 1177 1178 if (status & ATH9K_INT_SWBA) { 1179 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): " 1180 "ATH9K_INT_SWBA\n")); 1181 /* This will occur only in Host-AP or Ad-Hoc mode */ 1182 return (DDI_INTR_CLAIMED); 1183 } 1184 } 1185 1186 return (DDI_INTR_CLAIMED); 1187 reset: 1188 ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n")); 1189 (void) arn_reset(ic); 1190 ARN_UNLOCK(sc); 1191 return (DDI_INTR_CLAIMED); 1192 } 1193 1194 static int 1195 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan) 1196 { 1197 int i; 1198 1199 for (i = 0; i < sc->sc_ah->ah_nchan; i++) { 1200 if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq) 1201 return (i); 1202 } 1203 1204 return (-1); 1205 } 1206 1207 int 1208 arn_reset(ieee80211com_t *ic) 1209 { 1210 struct arn_softc *sc = (struct arn_softc *)ic; 1211 struct ath_hal *ah = sc->sc_ah; 1212 int status; 1213 int error = 0; 1214 1215 (void) ath9k_hw_set_interrupts(ah, 0); 1216 arn_draintxq(sc, 0); 1217 (void) arn_stoprecv(sc); 1218 1219 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width, 1220 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 1221 sc->sc_ht_extprotspacing, B_FALSE, &status)) { 1222 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): " 1223 "unable to reset hardware; hal status %u\n", status)); 1224 error = EIO; 1225 } 1226 1227 if (arn_startrecv(sc) != 0) 1228 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): " 1229 "unable to start recv logic\n")); 1230 1231 /* 1232 * We may be doing a reset in response to a request 1233 * that changes the channel so update any state that 1234 * might change as a result. 1235 */ 1236 arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan)); 1237 1238 arn_update_txpow(sc); 1239 1240 if (sc->sc_flags & SC_OP_BEACONS) 1241 arn_beacon_config(sc); /* restart beacons */ 1242 1243 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask); 1244 1245 return (error); 1246 } 1247 1248 int 1249 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc) 1250 { 1251 int qnum; 1252 1253 switch (queue) { 1254 case WME_AC_VO: 1255 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO]; 1256 break; 1257 case WME_AC_VI: 1258 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI]; 1259 break; 1260 case WME_AC_BE: 1261 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; 1262 break; 1263 case WME_AC_BK: 1264 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK]; 1265 break; 1266 default: 1267 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; 1268 break; 1269 } 1270 1271 return (qnum); 1272 } 1273 1274 static struct { 1275 uint32_t version; 1276 const char *name; 1277 } ath_mac_bb_names[] = { 1278 { AR_SREV_VERSION_5416_PCI, "5416" }, 1279 { AR_SREV_VERSION_5416_PCIE, "5418" }, 1280 { AR_SREV_VERSION_9100, "9100" }, 1281 { AR_SREV_VERSION_9160, "9160" }, 1282 { AR_SREV_VERSION_9280, "9280" }, 1283 { AR_SREV_VERSION_9285, "9285" } 1284 }; 1285 1286 static struct { 1287 uint16_t version; 1288 const char *name; 1289 } ath_rf_names[] = { 1290 { 0, "5133" }, 1291 { AR_RAD5133_SREV_MAJOR, "5133" }, 1292 { AR_RAD5122_SREV_MAJOR, "5122" }, 1293 { AR_RAD2133_SREV_MAJOR, "2133" }, 1294 { AR_RAD2122_SREV_MAJOR, "2122" } 1295 }; 1296 1297 /* 1298 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. 1299 */ 1300 1301 static const char * 1302 arn_mac_bb_name(uint32_t mac_bb_version) 1303 { 1304 int i; 1305 1306 for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) { 1307 if (ath_mac_bb_names[i].version == mac_bb_version) { 1308 return (ath_mac_bb_names[i].name); 1309 } 1310 } 1311 1312 return ("????"); 1313 } 1314 1315 /* 1316 * Return the RF name. "????" is returned if the RF is unknown. 1317 */ 1318 1319 static const char * 1320 arn_rf_name(uint16_t rf_version) 1321 { 1322 int i; 1323 1324 for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) { 1325 if (ath_rf_names[i].version == rf_version) { 1326 return (ath_rf_names[i].name); 1327 } 1328 } 1329 1330 return ("????"); 1331 } 1332 1333 static void 1334 arn_next_scan(void *arg) 1335 { 1336 ieee80211com_t *ic = arg; 1337 struct arn_softc *sc = (struct arn_softc *)ic; 1338 1339 sc->sc_scan_timer = 0; 1340 if (ic->ic_state == IEEE80211_S_SCAN) { 1341 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc, 1342 drv_usectohz(arn_dwelltime * 1000)); 1343 ieee80211_next_scan(ic); 1344 } 1345 } 1346 1347 static void 1348 arn_stop_scantimer(struct arn_softc *sc) 1349 { 1350 timeout_id_t tmp_id = 0; 1351 1352 while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) { 1353 tmp_id = sc->sc_scan_timer; 1354 (void) untimeout(tmp_id); 1355 } 1356 sc->sc_scan_timer = 0; 1357 } 1358 1359 static int32_t 1360 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1361 { 1362 struct arn_softc *sc = (struct arn_softc *)ic; 1363 struct ath_hal *ah = sc->sc_ah; 1364 struct ieee80211_node *in; 1365 int32_t i, error; 1366 uint8_t *bssid; 1367 uint32_t rfilt; 1368 enum ieee80211_state ostate; 1369 struct ath9k_channel *channel; 1370 int pos; 1371 1372 /* Should set up & init LED here */ 1373 1374 if (sc->sc_flags & SC_OP_INVALID) 1375 return (0); 1376 1377 ostate = ic->ic_state; 1378 ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): " 1379 "%x -> %x!\n", ostate, nstate)); 1380 1381 ARN_LOCK(sc); 1382 1383 if (nstate != IEEE80211_S_SCAN) 1384 arn_stop_scantimer(sc); 1385 if (nstate != IEEE80211_S_RUN) 1386 arn_stop_caltimer(sc); 1387 1388 /* Should set LED here */ 1389 1390 if (nstate == IEEE80211_S_INIT) { 1391 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1392 /* 1393 * Disable interrupts. 1394 */ 1395 (void) ath9k_hw_set_interrupts 1396 (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL); 1397 1398 #ifdef ARN_IBSS 1399 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1400 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq); 1401 arn_beacon_return(sc); 1402 } 1403 #endif 1404 ARN_UNLOCK(sc); 1405 ieee80211_stop_watchdog(ic); 1406 goto done; 1407 } 1408 in = ic->ic_bss; 1409 1410 pos = arn_get_channel(sc, ic->ic_curchan); 1411 1412 if (pos == -1) { 1413 ARN_DBG((ARN_DBG_FATAL, "arn: " 1414 "%s: Invalid channel\n", __func__)); 1415 error = EINVAL; 1416 ARN_UNLOCK(sc); 1417 goto bad; 1418 } 1419 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 1420 sc->sc_ah->ah_channels[pos].chanmode = 1421 arn_chan2flags(ic, ic->ic_curchan); 1422 channel = &sc->sc_ah->ah_channels[pos]; 1423 if (channel == NULL) { 1424 arn_problem("arn_newstate(): channel == NULL"); 1425 ARN_UNLOCK(sc); 1426 goto bad; 1427 } 1428 error = arn_set_channel(sc, channel); 1429 if (error != 0) { 1430 if (nstate != IEEE80211_S_SCAN) { 1431 ARN_UNLOCK(sc); 1432 ieee80211_reset_chan(ic); 1433 goto bad; 1434 } 1435 } 1436 1437 /* 1438 * Get the receive filter according to the 1439 * operating mode and state 1440 */ 1441 rfilt = arn_calcrxfilter(sc); 1442 1443 if (nstate == IEEE80211_S_SCAN) 1444 bssid = ic->ic_macaddr; 1445 else 1446 bssid = in->in_bssid; 1447 1448 ath9k_hw_setrxfilter(ah, rfilt); 1449 1450 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS) 1451 ath9k_hw_write_associd(ah, bssid, in->in_associd); 1452 else 1453 ath9k_hw_write_associd(ah, bssid, 0); 1454 1455 /* Check for WLAN_CAPABILITY_PRIVACY ? */ 1456 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 1457 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1458 if (ath9k_hw_keyisvalid(ah, (uint16_t)i)) 1459 (void) ath9k_hw_keysetmac(ah, (uint16_t)i, 1460 bssid); 1461 } 1462 } 1463 1464 if (nstate == IEEE80211_S_RUN) { 1465 switch (ic->ic_opmode) { 1466 #ifdef ARN_IBSS 1467 case IEEE80211_M_IBSS: 1468 /* 1469 * Allocate and setup the beacon frame. 1470 * Stop any previous beacon DMA. 1471 */ 1472 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq); 1473 arn_beacon_return(sc); 1474 error = arn_beacon_alloc(sc, in); 1475 if (error != 0) { 1476 ARN_UNLOCK(sc); 1477 goto bad; 1478 } 1479 /* 1480 * If joining an adhoc network defer beacon timer 1481 * configuration to the next beacon frame so we 1482 * have a current TSF to use. Otherwise we're 1483 * starting an ibss/bss so there's no need to delay. 1484 */ 1485 if (ic->ic_opmode == IEEE80211_M_IBSS && 1486 ic->ic_bss->in_tstamp.tsf != 0) { 1487 sc->sc_bsync = 1; 1488 } else { 1489 arn_beacon_config(sc); 1490 } 1491 break; 1492 #endif /* ARN_IBSS */ 1493 case IEEE80211_M_STA: 1494 if (ostate != IEEE80211_S_RUN) { 1495 /* 1496 * Defer beacon timer configuration to the next 1497 * beacon frame so we have a current TSF to use. 1498 * Any TSF collected when scanning is likely old 1499 */ 1500 #ifdef ARN_IBSS 1501 sc->sc_bsync = 1; 1502 #else 1503 /* Configure the beacon and sleep timers. */ 1504 arn_beacon_config(sc); 1505 #endif /* ARN_IBSS */ 1506 } 1507 break; 1508 default: 1509 break; 1510 } 1511 } else { 1512 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1513 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask); 1514 } 1515 1516 /* 1517 * Reset the rate control state. 1518 */ 1519 arn_rate_ctl_reset(sc, nstate); 1520 1521 ARN_UNLOCK(sc); 1522 done: 1523 /* 1524 * Invoke the parent method to complete the work. 1525 */ 1526 error = sc->sc_newstate(ic, nstate, arg); 1527 1528 /* 1529 * Finally, start any timers. 1530 */ 1531 if (nstate == IEEE80211_S_RUN) { 1532 ieee80211_start_watchdog(ic, 1); 1533 ASSERT(sc->sc_cal_timer == 0); 1534 sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc, 1535 drv_usectohz(100 * 1000)); 1536 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) { 1537 /* start ap/neighbor scan timer */ 1538 /* ASSERT(sc->sc_scan_timer == 0); */ 1539 if (sc->sc_scan_timer != 0) { 1540 (void) untimeout(sc->sc_scan_timer); 1541 sc->sc_scan_timer = 0; 1542 } 1543 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc, 1544 drv_usectohz(arn_dwelltime * 1000)); 1545 } 1546 1547 bad: 1548 return (error); 1549 } 1550 1551 static void 1552 arn_watchdog(void *arg) 1553 { 1554 struct arn_softc *sc = arg; 1555 ieee80211com_t *ic = &sc->sc_isc; 1556 int ntimer = 0; 1557 1558 ARN_LOCK(sc); 1559 ic->ic_watchdog_timer = 0; 1560 if (sc->sc_flags & SC_OP_INVALID) { 1561 ARN_UNLOCK(sc); 1562 return; 1563 } 1564 1565 if (ic->ic_state == IEEE80211_S_RUN) { 1566 /* 1567 * Start the background rate control thread if we 1568 * are not configured to use a fixed xmit rate. 1569 */ 1570 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1571 sc->sc_stats.ast_rate_calls ++; 1572 if (ic->ic_opmode == IEEE80211_M_STA) 1573 arn_rate_ctl(ic, ic->ic_bss); 1574 else 1575 ieee80211_iterate_nodes(&ic->ic_sta, 1576 arn_rate_ctl, sc); 1577 } 1578 1579 ntimer = 1; 1580 } 1581 ARN_UNLOCK(sc); 1582 1583 ieee80211_watchdog(ic); 1584 if (ntimer != 0) 1585 ieee80211_start_watchdog(ic, ntimer); 1586 } 1587 1588 static struct ieee80211_node * 1589 arn_node_alloc(ieee80211com_t *ic) 1590 { 1591 struct ath_node *an; 1592 struct arn_softc *sc = (struct arn_softc *)ic; 1593 1594 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP); 1595 arn_rate_update(sc, &an->an_node, 0); 1596 1597 return ((an != NULL) ? &an->an_node : NULL); 1598 } 1599 1600 static void 1601 arn_node_free(struct ieee80211_node *in) 1602 { 1603 ieee80211com_t *ic = in->in_ic; 1604 struct arn_softc *sc = (struct arn_softc *)ic; 1605 struct ath_buf *bf; 1606 struct ath_txq *txq; 1607 int32_t i; 1608 1609 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1610 if (ARN_TXQ_SETUP(sc, i)) { 1611 txq = &sc->sc_txq[i]; 1612 mutex_enter(&txq->axq_lock); 1613 bf = list_head(&txq->axq_list); 1614 while (bf != NULL) { 1615 if (bf->bf_in == in) { 1616 bf->bf_in = NULL; 1617 } 1618 bf = list_next(&txq->axq_list, bf); 1619 } 1620 mutex_exit(&txq->axq_lock); 1621 } 1622 } 1623 1624 ic->ic_node_cleanup(in); 1625 if (in->in_wpa_ie != NULL) 1626 ieee80211_free(in->in_wpa_ie); 1627 kmem_free(in, sizeof (struct ath_node)); 1628 } 1629 1630 /* 1631 * Allocate tx/rx key slots for TKIP. We allocate one slot for 1632 * each key. MIC is right after the decrypt/encrypt key. 1633 */ 1634 static uint16_t 1635 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix, 1636 ieee80211_keyix *rxkeyix) 1637 { 1638 uint16_t i, keyix; 1639 1640 ASSERT(!sc->sc_splitmic); 1641 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) { 1642 uint8_t b = sc->sc_keymap[i]; 1643 if (b == 0xff) 1644 continue; 1645 for (keyix = i * NBBY; keyix < (i + 1) * NBBY; 1646 keyix++, b >>= 1) { 1647 if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) { 1648 /* full pair unavailable */ 1649 continue; 1650 } 1651 set_bit(keyix, sc->sc_keymap); 1652 set_bit(keyix+64, sc->sc_keymap); 1653 ARN_DBG((ARN_DBG_KEYCACHE, 1654 "arn_key_alloc_pair(): key pair %u,%u\n", 1655 keyix, keyix+64)); 1656 *txkeyix = *rxkeyix = keyix; 1657 return (1); 1658 } 1659 } 1660 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():" 1661 " out of pair space\n")); 1662 1663 return (0); 1664 } 1665 1666 /* 1667 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1668 * each key, one for decrypt/encrypt and the other for the MIC. 1669 */ 1670 static int 1671 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix, 1672 ieee80211_keyix *rxkeyix) 1673 { 1674 uint16_t i, keyix; 1675 1676 ASSERT(sc->sc_splitmic); 1677 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) { 1678 uint8_t b = sc->sc_keymap[i]; 1679 if (b != 0xff) { 1680 /* 1681 * One or more slots in this byte are free. 1682 */ 1683 keyix = i*NBBY; 1684 while (b & 1) { 1685 again: 1686 keyix++; 1687 b >>= 1; 1688 } 1689 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1690 if (is_set(keyix+32, sc->sc_keymap) || 1691 is_set(keyix+64, sc->sc_keymap) || 1692 is_set(keyix+32+64, sc->sc_keymap)) { 1693 /* full pair unavailable */ 1694 if (keyix == (i+1)*NBBY) { 1695 /* no slots were appropriate, advance */ 1696 continue; 1697 } 1698 goto again; 1699 } 1700 set_bit(keyix, sc->sc_keymap); 1701 set_bit(keyix+64, sc->sc_keymap); 1702 set_bit(keyix+32, sc->sc_keymap); 1703 set_bit(keyix+32+64, sc->sc_keymap); 1704 ARN_DBG((ARN_DBG_KEYCACHE, 1705 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n", 1706 keyix, keyix+64, 1707 keyix+32, keyix+32+64)); 1708 *txkeyix = *rxkeyix = keyix; 1709 return (1); 1710 } 1711 } 1712 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): " 1713 " out of pair space\n")); 1714 1715 return (0); 1716 } 1717 /* 1718 * Allocate a single key cache slot. 1719 */ 1720 static int 1721 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix, 1722 ieee80211_keyix *rxkeyix) 1723 { 1724 uint16_t i, keyix; 1725 1726 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1727 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) { 1728 uint8_t b = sc->sc_keymap[i]; 1729 1730 if (b != 0xff) { 1731 /* 1732 * One or more slots are free. 1733 */ 1734 keyix = i*NBBY; 1735 while (b & 1) 1736 keyix++, b >>= 1; 1737 set_bit(keyix, sc->sc_keymap); 1738 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): " 1739 "key %u\n", keyix)); 1740 *txkeyix = *rxkeyix = keyix; 1741 return (1); 1742 } 1743 } 1744 return (0); 1745 } 1746 1747 /* 1748 * Allocate one or more key cache slots for a unicast key. The 1749 * key itself is needed only to identify the cipher. For hardware 1750 * TKIP with split cipher+MIC keys we allocate two key cache slot 1751 * pairs so that we can setup separate TX and RX MIC keys. Note 1752 * that the MIC key for a TKIP key at slot i is assumed by the 1753 * hardware to be at slot i+64. This limits TKIP keys to the first 1754 * 64 entries. 1755 */ 1756 /* ARGSUSED */ 1757 int 1758 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k, 1759 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 1760 { 1761 struct arn_softc *sc = (struct arn_softc *)ic; 1762 1763 /* 1764 * We allocate two pair for TKIP when using the h/w to do 1765 * the MIC. For everything else, including software crypto, 1766 * we allocate a single entry. Note that s/w crypto requires 1767 * a pass-through slot on the 5211 and 5212. The 5210 does 1768 * not support pass-through cache entries and we map all 1769 * those requests to slot 0. 1770 */ 1771 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1772 return (arn_key_alloc_single(sc, keyix, rxkeyix)); 1773 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1774 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1775 if (sc->sc_splitmic) 1776 return (arn_key_alloc_2pair(sc, keyix, rxkeyix)); 1777 else 1778 return (arn_key_alloc_pair(sc, keyix, rxkeyix)); 1779 } else { 1780 return (arn_key_alloc_single(sc, keyix, rxkeyix)); 1781 } 1782 } 1783 1784 /* 1785 * Delete an entry in the key cache allocated by ath_key_alloc. 1786 */ 1787 int 1788 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k) 1789 { 1790 struct arn_softc *sc = (struct arn_softc *)ic; 1791 struct ath_hal *ah = sc->sc_ah; 1792 const struct ieee80211_cipher *cip = k->wk_cipher; 1793 ieee80211_keyix keyix = k->wk_keyix; 1794 1795 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():" 1796 " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher)); 1797 1798 (void) ath9k_hw_keyreset(ah, keyix); 1799 /* 1800 * Handle split tx/rx keying required for TKIP with h/w MIC. 1801 */ 1802 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1803 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1804 (void) ath9k_hw_keyreset(ah, keyix+32); /* RX key */ 1805 1806 if (keyix >= IEEE80211_WEP_NKID) { 1807 /* 1808 * Don't touch keymap entries for global keys so 1809 * they are never considered for dynamic allocation. 1810 */ 1811 clr_bit(keyix, sc->sc_keymap); 1812 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1813 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1814 /* 1815 * If splitmic is true +64 is TX key MIC, 1816 * else +64 is RX key + RX key MIC. 1817 */ 1818 clr_bit(keyix+64, sc->sc_keymap); 1819 if (sc->sc_splitmic) { 1820 /* Rx key */ 1821 clr_bit(keyix+32, sc->sc_keymap); 1822 /* RX key MIC */ 1823 clr_bit(keyix+32+64, sc->sc_keymap); 1824 } 1825 } 1826 } 1827 return (1); 1828 } 1829 1830 /* 1831 * Set a TKIP key into the hardware. This handles the 1832 * potential distribution of key state to multiple key 1833 * cache slots for TKIP. 1834 */ 1835 static int 1836 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k, 1837 struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN]) 1838 { 1839 uint8_t *key_rxmic = NULL; 1840 uint8_t *key_txmic = NULL; 1841 uint8_t *key = (uint8_t *)&(k->wk_key[0]); 1842 struct ath_hal *ah = sc->sc_ah; 1843 1844 key_txmic = key + 16; 1845 key_rxmic = key + 24; 1846 1847 if (mac == NULL) { 1848 /* Group key installation */ 1849 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic)); 1850 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, 1851 mac, B_FALSE)); 1852 } 1853 if (!sc->sc_splitmic) { 1854 /* 1855 * data key goes at first index, 1856 * the hal handles the MIC keys at index+64. 1857 */ 1858 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic)); 1859 (void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic)); 1860 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, 1861 mac, B_FALSE)); 1862 } 1863 /* 1864 * TX key goes at first index, RX key at +32. 1865 * The hal handles the MIC keys at index+64. 1866 */ 1867 (void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic)); 1868 if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL, 1869 B_FALSE))) { 1870 /* Txmic entry failed. No need to proceed further */ 1871 ARN_DBG((ARN_DBG_KEYCACHE, 1872 "%s Setting TX MIC Key Failed\n", __func__)); 1873 return (0); 1874 } 1875 1876 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic)); 1877 1878 /* XXX delete tx key on failure? */ 1879 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE)); 1880 1881 } 1882 1883 int 1884 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k, 1885 const uint8_t mac[IEEE80211_ADDR_LEN]) 1886 { 1887 struct arn_softc *sc = (struct arn_softc *)ic; 1888 const struct ieee80211_cipher *cip = k->wk_cipher; 1889 struct ath9k_keyval hk; 1890 1891 /* cipher table */ 1892 static const uint8_t ciphermap[] = { 1893 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1894 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1895 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1896 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1897 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1898 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1899 }; 1900 1901 bzero(&hk, sizeof (hk)); 1902 1903 /* 1904 * Software crypto uses a "clear key" so non-crypto 1905 * state kept in the key cache are maintainedd so that 1906 * rx frames have an entry to match. 1907 */ 1908 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1909 ASSERT(cip->ic_cipher < 6); 1910 hk.kv_type = ciphermap[cip->ic_cipher]; 1911 hk.kv_len = k->wk_keylen; 1912 bcopy(k->wk_key, hk.kv_val, k->wk_keylen); 1913 } else { 1914 hk.kv_type = ATH9K_CIPHER_CLR; 1915 } 1916 1917 if (hk.kv_type == ATH9K_CIPHER_TKIP && 1918 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1919 return (arn_keyset_tkip(sc, k, &hk, mac)); 1920 } else { 1921 return (ath9k_hw_set_keycache_entry(sc->sc_ah, 1922 k->wk_keyix, &hk, mac, B_FALSE)); 1923 } 1924 } 1925 1926 /* 1927 * Enable/Disable short slot timing 1928 */ 1929 void 1930 arn_set_shortslot(ieee80211com_t *ic, int onoff) 1931 { 1932 struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah; 1933 1934 if (onoff) 1935 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9); 1936 else 1937 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20); 1938 } 1939 1940 static int 1941 arn_open(struct arn_softc *sc) 1942 { 1943 ieee80211com_t *ic = (ieee80211com_t *)sc; 1944 struct ieee80211_channel *curchan = ic->ic_curchan; 1945 struct ath9k_channel *init_channel; 1946 int error = 0, pos, status; 1947 1948 ARN_LOCK_ASSERT(sc); 1949 1950 pos = arn_get_channel(sc, curchan); 1951 if (pos == -1) { 1952 ARN_DBG((ARN_DBG_FATAL, "arn: " 1953 "%s: Invalid channel\n", __func__)); 1954 error = EINVAL; 1955 goto error; 1956 } 1957 1958 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 1959 1960 if (sc->sc_curmode == ATH9K_MODE_11A) { 1961 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A; 1962 } else { 1963 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G; 1964 } 1965 1966 init_channel = &sc->sc_ah->ah_channels[pos]; 1967 1968 /* Reset SERDES registers */ 1969 ath9k_hw_configpcipowersave(sc->sc_ah, 0); 1970 1971 /* 1972 * The basic interface to setting the hardware in a good 1973 * state is ``reset''. On return the hardware is known to 1974 * be powered up and with interrupts disabled. This must 1975 * be followed by initialization of the appropriate bits 1976 * and then setup of the interrupt mask. 1977 */ 1978 if (!ath9k_hw_reset(sc->sc_ah, init_channel, 1979 sc->tx_chan_width, sc->sc_tx_chainmask, 1980 sc->sc_rx_chainmask, sc->sc_ht_extprotspacing, 1981 B_FALSE, &status)) { 1982 ARN_DBG((ARN_DBG_FATAL, "arn: " 1983 "%s: unable to reset hardware; hal status %u " 1984 "(freq %u flags 0x%x)\n", __func__, status, 1985 init_channel->channel, init_channel->channelFlags)); 1986 1987 error = EIO; 1988 goto error; 1989 } 1990 1991 /* 1992 * This is needed only to setup initial state 1993 * but it's best done after a reset. 1994 */ 1995 arn_update_txpow(sc); 1996 1997 /* 1998 * Setup the hardware after reset: 1999 * The receive engine is set going. 2000 * Frame transmit is handled entirely 2001 * in the frame output path; there's nothing to do 2002 * here except setup the interrupt mask. 2003 */ 2004 if (arn_startrecv(sc) != 0) { 2005 ARN_DBG((ARN_DBG_INIT, "arn: " 2006 "%s: unable to start recv logic\n", __func__)); 2007 error = EIO; 2008 goto error; 2009 } 2010 2011 /* Setup our intr mask. */ 2012 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX | 2013 ATH9K_INT_RXEOL | ATH9K_INT_RXORN | 2014 ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 2015 #ifdef ARN_ATH9K_HW_CAP_GTT 2016 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT) 2017 sc->sc_imask |= ATH9K_INT_GTT; 2018 #endif 2019 2020 #ifdef ARN_ATH9K_HW_CAP_GTT 2021 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) 2022 sc->sc_imask |= ATH9K_INT_CST; 2023 #endif 2024 2025 /* 2026 * Enable MIB interrupts when there are hardware phy counters. 2027 * Note we only do this (at the moment) for station mode. 2028 */ 2029 #ifdef ARN_ATH9K_INT_MIB 2030 if (ath9k_hw_phycounters(sc->sc_ah) && 2031 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) || 2032 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))) 2033 sc->sc_imask |= ATH9K_INT_MIB; 2034 #endif 2035 /* 2036 * Some hardware processes the TIM IE and fires an 2037 * interrupt when the TIM bit is set. For hardware 2038 * that does, if not overridden by configuration, 2039 * enable the TIM interrupt when operating as station. 2040 */ 2041 #ifdef ARN_ATH9K_INT_TIM 2042 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) && 2043 (sc->sc_ah->ah_opmode == ATH9K_M_STA) && 2044 !sc->sc_config.swBeaconProcess) 2045 sc->sc_imask |= ATH9K_INT_TIM; 2046 #endif 2047 if (arn_chan2mode(init_channel) != sc->sc_curmode) 2048 arn_setcurmode(sc, arn_chan2mode(init_channel)); 2049 ARN_DBG((ARN_DBG_INIT, "arn: " 2050 "%s: current mode after arn_setcurmode is %d\n", 2051 __func__, sc->sc_curmode)); 2052 2053 sc->sc_isrunning = 1; 2054 2055 /* Disable BMISS interrupt when we're not associated */ 2056 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 2057 (void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask); 2058 2059 return (0); 2060 2061 error: 2062 return (error); 2063 } 2064 2065 static void 2066 arn_close(struct arn_softc *sc) 2067 { 2068 ieee80211com_t *ic = (ieee80211com_t *)sc; 2069 struct ath_hal *ah = sc->sc_ah; 2070 2071 ARN_LOCK_ASSERT(sc); 2072 2073 if (!sc->sc_isrunning) 2074 return; 2075 2076 /* 2077 * Shutdown the hardware and driver 2078 * Note that some of this work is not possible if the 2079 * hardware is gone (invalid). 2080 */ 2081 ARN_UNLOCK(sc); 2082 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2083 ieee80211_stop_watchdog(ic); 2084 ARN_LOCK(sc); 2085 2086 /* 2087 * make sure h/w will not generate any interrupt 2088 * before setting the invalid flag. 2089 */ 2090 (void) ath9k_hw_set_interrupts(ah, 0); 2091 2092 if (!(sc->sc_flags & SC_OP_INVALID)) { 2093 arn_draintxq(sc, 0); 2094 (void) arn_stoprecv(sc); 2095 (void) ath9k_hw_phy_disable(ah); 2096 } else { 2097 sc->sc_rxlink = NULL; 2098 } 2099 2100 sc->sc_isrunning = 0; 2101 } 2102 2103 /* 2104 * MAC callback functions 2105 */ 2106 static int 2107 arn_m_stat(void *arg, uint_t stat, uint64_t *val) 2108 { 2109 struct arn_softc *sc = arg; 2110 ieee80211com_t *ic = (ieee80211com_t *)sc; 2111 struct ieee80211_node *in; 2112 struct ieee80211_rateset *rs; 2113 2114 ARN_LOCK(sc); 2115 switch (stat) { 2116 case MAC_STAT_IFSPEED: 2117 in = ic->ic_bss; 2118 rs = &in->in_rates; 2119 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 * 2120 1000000ull; 2121 break; 2122 case MAC_STAT_NOXMTBUF: 2123 *val = sc->sc_stats.ast_tx_nobuf + 2124 sc->sc_stats.ast_tx_nobufmgt; 2125 break; 2126 case MAC_STAT_IERRORS: 2127 *val = sc->sc_stats.ast_rx_tooshort; 2128 break; 2129 case MAC_STAT_RBYTES: 2130 *val = ic->ic_stats.is_rx_bytes; 2131 break; 2132 case MAC_STAT_IPACKETS: 2133 *val = ic->ic_stats.is_rx_frags; 2134 break; 2135 case MAC_STAT_OBYTES: 2136 *val = ic->ic_stats.is_tx_bytes; 2137 break; 2138 case MAC_STAT_OPACKETS: 2139 *val = ic->ic_stats.is_tx_frags; 2140 break; 2141 case MAC_STAT_OERRORS: 2142 case WIFI_STAT_TX_FAILED: 2143 *val = sc->sc_stats.ast_tx_fifoerr + 2144 sc->sc_stats.ast_tx_xretries + 2145 sc->sc_stats.ast_tx_discard; 2146 break; 2147 case WIFI_STAT_TX_RETRANS: 2148 *val = sc->sc_stats.ast_tx_xretries; 2149 break; 2150 case WIFI_STAT_FCS_ERRORS: 2151 *val = sc->sc_stats.ast_rx_crcerr; 2152 break; 2153 case WIFI_STAT_WEP_ERRORS: 2154 *val = sc->sc_stats.ast_rx_badcrypt; 2155 break; 2156 case WIFI_STAT_TX_FRAGS: 2157 case WIFI_STAT_MCAST_TX: 2158 case WIFI_STAT_RTS_SUCCESS: 2159 case WIFI_STAT_RTS_FAILURE: 2160 case WIFI_STAT_ACK_FAILURE: 2161 case WIFI_STAT_RX_FRAGS: 2162 case WIFI_STAT_MCAST_RX: 2163 case WIFI_STAT_RX_DUPS: 2164 ARN_UNLOCK(sc); 2165 return (ieee80211_stat(ic, stat, val)); 2166 default: 2167 ARN_UNLOCK(sc); 2168 return (ENOTSUP); 2169 } 2170 ARN_UNLOCK(sc); 2171 2172 return (0); 2173 } 2174 2175 int 2176 arn_m_start(void *arg) 2177 { 2178 struct arn_softc *sc = arg; 2179 int err = 0; 2180 2181 ARN_LOCK(sc); 2182 2183 /* 2184 * Stop anything previously setup. This is safe 2185 * whether this is the first time through or not. 2186 */ 2187 2188 arn_close(sc); 2189 2190 if ((err = arn_open(sc)) != 0) { 2191 ARN_UNLOCK(sc); 2192 return (err); 2193 } 2194 2195 /* H/W is reday now */ 2196 sc->sc_flags &= ~SC_OP_INVALID; 2197 2198 ARN_UNLOCK(sc); 2199 2200 return (0); 2201 } 2202 2203 static void 2204 arn_m_stop(void *arg) 2205 { 2206 struct arn_softc *sc = arg; 2207 2208 ARN_LOCK(sc); 2209 arn_close(sc); 2210 2211 /* disable HAL and put h/w to sleep */ 2212 (void) ath9k_hw_disable(sc->sc_ah); 2213 ath9k_hw_configpcipowersave(sc->sc_ah, 1); 2214 2215 /* XXX: hardware will not be ready in suspend state */ 2216 sc->sc_flags |= SC_OP_INVALID; 2217 ARN_UNLOCK(sc); 2218 } 2219 2220 static int 2221 arn_m_promisc(void *arg, boolean_t on) 2222 { 2223 struct arn_softc *sc = arg; 2224 struct ath_hal *ah = sc->sc_ah; 2225 uint32_t rfilt; 2226 2227 ARN_LOCK(sc); 2228 2229 rfilt = ath9k_hw_getrxfilter(ah); 2230 if (on) 2231 rfilt |= ATH9K_RX_FILTER_PROM; 2232 else 2233 rfilt &= ~ATH9K_RX_FILTER_PROM; 2234 sc->sc_promisc = on; 2235 ath9k_hw_setrxfilter(ah, rfilt); 2236 2237 ARN_UNLOCK(sc); 2238 2239 return (0); 2240 } 2241 2242 static int 2243 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2244 { 2245 struct arn_softc *sc = arg; 2246 struct ath_hal *ah = sc->sc_ah; 2247 uint32_t val, index, bit; 2248 uint8_t pos; 2249 uint32_t *mfilt = sc->sc_mcast_hash; 2250 2251 ARN_LOCK(sc); 2252 2253 /* calculate XOR of eight 6bit values */ 2254 val = ARN_LE_READ_32(mca + 0); 2255 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2256 val = ARN_LE_READ_32(mca + 3); 2257 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2258 pos &= 0x3f; 2259 index = pos / 32; 2260 bit = 1 << (pos % 32); 2261 2262 if (add) { /* enable multicast */ 2263 sc->sc_mcast_refs[pos]++; 2264 mfilt[index] |= bit; 2265 } else { /* disable multicast */ 2266 if (--sc->sc_mcast_refs[pos] == 0) 2267 mfilt[index] &= ~bit; 2268 } 2269 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 2270 2271 ARN_UNLOCK(sc); 2272 return (0); 2273 } 2274 2275 static int 2276 arn_m_unicst(void *arg, const uint8_t *macaddr) 2277 { 2278 struct arn_softc *sc = arg; 2279 struct ath_hal *ah = sc->sc_ah; 2280 ieee80211com_t *ic = (ieee80211com_t *)sc; 2281 2282 ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): " 2283 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 2284 macaddr[0], macaddr[1], macaddr[2], 2285 macaddr[3], macaddr[4], macaddr[5])); 2286 2287 ARN_LOCK(sc); 2288 IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr); 2289 (void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr); 2290 (void) arn_reset(ic); 2291 ARN_UNLOCK(sc); 2292 return (0); 2293 } 2294 2295 static mblk_t * 2296 arn_m_tx(void *arg, mblk_t *mp) 2297 { 2298 struct arn_softc *sc = arg; 2299 int error = 0; 2300 mblk_t *next; 2301 ieee80211com_t *ic = (ieee80211com_t *)sc; 2302 2303 /* 2304 * No data frames go out unless we're associated; this 2305 * should not happen as the 802.11 layer does not enable 2306 * the xmit queue until we enter the RUN state. 2307 */ 2308 if (ic->ic_state != IEEE80211_S_RUN) { 2309 ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): " 2310 "discard, state %u\n", ic->ic_state)); 2311 sc->sc_stats.ast_tx_discard++; 2312 freemsgchain(mp); 2313 return (NULL); 2314 } 2315 2316 while (mp != NULL) { 2317 next = mp->b_next; 2318 mp->b_next = NULL; 2319 error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA); 2320 if (error != 0) { 2321 mp->b_next = next; 2322 if (error == ENOMEM) { 2323 break; 2324 } else { 2325 freemsgchain(mp); 2326 return (NULL); 2327 } 2328 } 2329 mp = next; 2330 } 2331 2332 return (mp); 2333 } 2334 2335 static void 2336 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2337 { 2338 struct arn_softc *sc = arg; 2339 int32_t err; 2340 2341 err = ieee80211_ioctl(&sc->sc_isc, wq, mp); 2342 2343 ARN_LOCK(sc); 2344 if (err == ENETRESET) { 2345 if (!(sc->sc_flags & SC_OP_INVALID)) { 2346 ARN_UNLOCK(sc); 2347 2348 (void) arn_m_start(sc); 2349 2350 (void) ieee80211_new_state(&sc->sc_isc, 2351 IEEE80211_S_SCAN, -1); 2352 ARN_LOCK(sc); 2353 } 2354 } 2355 ARN_UNLOCK(sc); 2356 } 2357 2358 static int 2359 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2360 uint_t wldp_length, const void *wldp_buf) 2361 { 2362 struct arn_softc *sc = arg; 2363 int err; 2364 2365 err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num, 2366 wldp_length, wldp_buf); 2367 2368 ARN_LOCK(sc); 2369 2370 if (err == ENETRESET) { 2371 if (!(sc->sc_flags & SC_OP_INVALID)) { 2372 ARN_UNLOCK(sc); 2373 (void) arn_m_start(sc); 2374 (void) ieee80211_new_state(&sc->sc_isc, 2375 IEEE80211_S_SCAN, -1); 2376 ARN_LOCK(sc); 2377 } 2378 err = 0; 2379 } 2380 2381 ARN_UNLOCK(sc); 2382 2383 return (err); 2384 } 2385 2386 /* ARGSUSED */ 2387 static int 2388 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2389 uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm) 2390 { 2391 struct arn_softc *sc = arg; 2392 int err = 0; 2393 2394 err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num, 2395 pr_flags, wldp_length, wldp_buf, perm); 2396 2397 return (err); 2398 } 2399 2400 /* return bus cachesize in 4B word units */ 2401 static void 2402 arn_pci_config_cachesize(struct arn_softc *sc) 2403 { 2404 uint8_t csz; 2405 2406 /* 2407 * Cache line size is used to size and align various 2408 * structures used to communicate with the hardware. 2409 */ 2410 csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ); 2411 if (csz == 0) { 2412 /* 2413 * We must have this setup properly for rx buffer 2414 * DMA to work so force a reasonable value here if it 2415 * comes up zero. 2416 */ 2417 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t); 2418 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ, 2419 csz); 2420 } 2421 sc->sc_cachelsz = csz << 2; 2422 } 2423 2424 static int 2425 arn_pci_setup(struct arn_softc *sc) 2426 { 2427 uint16_t command; 2428 2429 /* 2430 * Enable memory mapping and bus mastering 2431 */ 2432 ASSERT(sc != NULL); 2433 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM); 2434 command |= PCI_COMM_MAE | PCI_COMM_ME; 2435 pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command); 2436 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM); 2437 if ((command & PCI_COMM_MAE) == 0) { 2438 arn_problem("arn: arn_pci_setup(): " 2439 "failed to enable memory mapping\n"); 2440 return (EIO); 2441 } 2442 if ((command & PCI_COMM_ME) == 0) { 2443 arn_problem("arn: arn_pci_setup(): " 2444 "failed to enable bus mastering\n"); 2445 return (EIO); 2446 } 2447 ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): " 2448 "set command reg to 0x%x \n", command)); 2449 2450 return (0); 2451 } 2452 2453 static void 2454 arn_get_hw_encap(struct arn_softc *sc) 2455 { 2456 ieee80211com_t *ic; 2457 struct ath_hal *ah; 2458 2459 ic = (ieee80211com_t *)sc; 2460 ah = sc->sc_ah; 2461 2462 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2463 ATH9K_CIPHER_AES_CCM, NULL)) 2464 ic->ic_caps |= IEEE80211_C_AES_CCM; 2465 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2466 ATH9K_CIPHER_AES_OCB, NULL)) 2467 ic->ic_caps |= IEEE80211_C_AES; 2468 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2469 ATH9K_CIPHER_TKIP, NULL)) 2470 ic->ic_caps |= IEEE80211_C_TKIP; 2471 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2472 ATH9K_CIPHER_WEP, NULL)) 2473 ic->ic_caps |= IEEE80211_C_WEP; 2474 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2475 ATH9K_CIPHER_MIC, NULL)) 2476 ic->ic_caps |= IEEE80211_C_TKIPMIC; 2477 } 2478 2479 static int 2480 arn_resume(dev_info_t *devinfo) 2481 { 2482 struct arn_softc *sc; 2483 int ret = DDI_SUCCESS; 2484 2485 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo)); 2486 if (sc == NULL) { 2487 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): " 2488 "failed to get soft state\n")); 2489 return (DDI_FAILURE); 2490 } 2491 2492 ARN_LOCK(sc); 2493 /* 2494 * Set up config space command register(s). Refuse 2495 * to resume on failure. 2496 */ 2497 if (arn_pci_setup(sc) != 0) { 2498 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): " 2499 "ath_pci_setup() failed\n")); 2500 ARN_UNLOCK(sc); 2501 return (DDI_FAILURE); 2502 } 2503 2504 if (!(sc->sc_flags & SC_OP_INVALID)) 2505 ret = arn_open(sc); 2506 ARN_UNLOCK(sc); 2507 2508 return (ret); 2509 } 2510 2511 static int 2512 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2513 { 2514 struct arn_softc *sc; 2515 int instance; 2516 int status; 2517 int32_t err; 2518 uint16_t vendor_id; 2519 uint16_t device_id; 2520 uint32_t i; 2521 uint32_t val; 2522 char strbuf[32]; 2523 ieee80211com_t *ic; 2524 struct ath_hal *ah; 2525 wifi_data_t wd = { 0 }; 2526 mac_register_t *macp; 2527 2528 switch (cmd) { 2529 case DDI_ATTACH: 2530 break; 2531 case DDI_RESUME: 2532 return (arn_resume(devinfo)); 2533 default: 2534 return (DDI_FAILURE); 2535 } 2536 2537 instance = ddi_get_instance(devinfo); 2538 if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) { 2539 ARN_DBG((ARN_DBG_ATTACH, "arn: " 2540 "%s: Unable to alloc softstate\n", __func__)); 2541 return (DDI_FAILURE); 2542 } 2543 2544 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo)); 2545 ic = (ieee80211com_t *)sc; 2546 sc->sc_dev = devinfo; 2547 2548 mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL); 2549 mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL); 2550 mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL); 2551 mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL); 2552 mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL); 2553 #ifdef ARN_IBSS 2554 mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL); 2555 #endif 2556 2557 sc->sc_flags |= SC_OP_INVALID; 2558 2559 err = pci_config_setup(devinfo, &sc->sc_cfg_handle); 2560 if (err != DDI_SUCCESS) { 2561 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2562 "pci_config_setup() failed")); 2563 goto attach_fail0; 2564 } 2565 2566 if (arn_pci_setup(sc) != 0) 2567 goto attach_fail1; 2568 2569 /* Cache line size set up */ 2570 arn_pci_config_cachesize(sc); 2571 2572 vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID); 2573 device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID); 2574 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, " 2575 "device id 0x%x, cache size %d\n", 2576 vendor_id, device_id, 2577 pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ))); 2578 2579 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8); 2580 val = pci_config_get32(sc->sc_cfg_handle, 0x40); 2581 if ((val & 0x0000ff00) != 0) 2582 pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff); 2583 2584 err = ddi_regs_map_setup(devinfo, 1, 2585 &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle); 2586 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2587 "regs map1 = %x err=%d\n", sc->mem, err)); 2588 if (err != DDI_SUCCESS) { 2589 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2590 "ddi_regs_map_setup() failed")); 2591 goto attach_fail1; 2592 } 2593 2594 ah = ath9k_hw_attach(device_id, sc, sc->mem, &status); 2595 if (ah == NULL) { 2596 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2597 "unable to attach hw: H/W status %u\n", 2598 status)); 2599 goto attach_fail2; 2600 } 2601 sc->sc_ah = ah; 2602 2603 ath9k_hw_getmac(ah, ic->ic_macaddr); 2604 2605 /* Get the hardware key cache size. */ 2606 sc->sc_keymax = ah->ah_caps.keycache_size; 2607 if (sc->sc_keymax > ATH_KEYMAX) { 2608 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2609 "Warning, using only %u entries in %u key cache\n", 2610 ATH_KEYMAX, sc->sc_keymax)); 2611 sc->sc_keymax = ATH_KEYMAX; 2612 } 2613 2614 /* 2615 * Reset the key cache since some parts do not 2616 * reset the contents on initial power up. 2617 */ 2618 for (i = 0; i < sc->sc_keymax; i++) 2619 (void) ath9k_hw_keyreset(ah, (uint16_t)i); 2620 /* 2621 * Mark key cache slots associated with global keys 2622 * as in use. If we knew TKIP was not to be used we 2623 * could leave the +32, +64, and +32+64 slots free. 2624 * XXX only for splitmic. 2625 */ 2626 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2627 set_bit(i, sc->sc_keymap); 2628 set_bit(i + 32, sc->sc_keymap); 2629 set_bit(i + 64, sc->sc_keymap); 2630 set_bit(i + 32 + 64, sc->sc_keymap); 2631 } 2632 2633 /* Collect the channel list using the default country code */ 2634 err = arn_setup_channels(sc); 2635 if (err == EINVAL) { 2636 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2637 "ERR:arn_setup_channels\n")); 2638 goto attach_fail3; 2639 } 2640 2641 /* default to STA mode */ 2642 sc->sc_ah->ah_opmode = ATH9K_M_STA; 2643 2644 /* Setup rate tables */ 2645 arn_rate_attach(sc); 2646 arn_setup_rates(sc, IEEE80211_MODE_11A); 2647 arn_setup_rates(sc, IEEE80211_MODE_11B); 2648 arn_setup_rates(sc, IEEE80211_MODE_11G); 2649 2650 /* Setup current mode here */ 2651 arn_setcurmode(sc, ATH9K_MODE_11G); 2652 2653 /* 802.11g features */ 2654 if (sc->sc_have11g) 2655 ic->ic_caps |= IEEE80211_C_SHPREAMBLE | 2656 IEEE80211_C_SHSLOT; /* short slot time */ 2657 2658 /* temp workaround */ 2659 sc->sc_mrretry = 1; 2660 2661 /* Setup tx/rx descriptors */ 2662 err = arn_desc_alloc(devinfo, sc); 2663 if (err != DDI_SUCCESS) { 2664 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2665 "failed to allocate descriptors: %d\n", err)); 2666 goto attach_fail3; 2667 } 2668 2669 if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1, 2670 TASKQ_DEFAULTPRI, 0)) == NULL) { 2671 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2672 "ERR:ddi_taskq_create\n")); 2673 goto attach_fail4; 2674 } 2675 2676 /* 2677 * Allocate hardware transmit queues: one queue for 2678 * beacon frames and one data queue for each QoS 2679 * priority. Note that the hal handles reseting 2680 * these queues at the needed time. 2681 */ 2682 #ifdef ARN_IBSS 2683 sc->sc_beaconq = arn_beaconq_setup(ah); 2684 if (sc->sc_beaconq == (-1)) { 2685 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2686 "unable to setup a beacon xmit queue\n")); 2687 goto attach_fail4; 2688 } 2689 #endif 2690 #ifdef ARN_HOSTAP 2691 sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 2692 if (sc->sc_cabq == NULL) { 2693 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2694 "unable to setup CAB xmit queue\n")); 2695 goto attach_fail4; 2696 } 2697 2698 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME; 2699 ath_cabq_update(sc); 2700 #endif 2701 2702 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++) 2703 sc->sc_haltype2q[i] = -1; 2704 2705 /* Setup data queues */ 2706 /* NB: ensure BK queue is the lowest priority h/w queue */ 2707 if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) { 2708 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2709 "unable to setup xmit queue for BK traffic\n")); 2710 goto attach_fail4; 2711 } 2712 if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) { 2713 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2714 "unable to setup xmit queue for BE traffic\n")); 2715 goto attach_fail4; 2716 } 2717 if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) { 2718 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2719 "unable to setup xmit queue for VI traffic\n")); 2720 goto attach_fail4; 2721 } 2722 if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) { 2723 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2724 "unable to setup xmit queue for VO traffic\n")); 2725 goto attach_fail4; 2726 } 2727 2728 /* 2729 * Initializes the noise floor to a reasonable default value. 2730 * Later on this will be updated during ANI processing. 2731 */ 2732 2733 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR; 2734 2735 2736 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2737 ATH9K_CIPHER_TKIP, NULL)) { 2738 /* 2739 * Whether we should enable h/w TKIP MIC. 2740 * XXX: if we don't support WME TKIP MIC, then we wouldn't 2741 * report WMM capable, so it's always safe to turn on 2742 * TKIP MIC in this case. 2743 */ 2744 (void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 2745 0, 1, NULL); 2746 } 2747 2748 /* Get cipher releated capability information */ 2749 arn_get_hw_encap(sc); 2750 2751 /* 2752 * Check whether the separate key cache entries 2753 * are required to handle both tx+rx MIC keys. 2754 * With split mic keys the number of stations is limited 2755 * to 27 otherwise 59. 2756 */ 2757 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2758 ATH9K_CIPHER_TKIP, NULL) && 2759 ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 2760 ATH9K_CIPHER_MIC, NULL) && 2761 ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT, 2762 0, NULL)) 2763 sc->sc_splitmic = 1; 2764 2765 /* turn on mcast key search if possible */ 2766 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) 2767 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1, 2768 1, NULL); 2769 2770 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX; 2771 sc->sc_config.txpowlimit_override = 0; 2772 2773 #ifdef ARN_11N 2774 /* 11n Capabilities */ 2775 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 2776 sc->sc_flags |= SC_OP_TXAGGR; 2777 sc->sc_flags |= SC_OP_RXAGGR; 2778 } 2779 #endif 2780 2781 #ifdef ARN_11N 2782 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; 2783 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; 2784 #else 2785 sc->sc_tx_chainmask = 1; 2786 sc->sc_rx_chainmask = 1; 2787 #endif 2788 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2789 "tx_chainmask = %d, rx_chainmask = %d\n", 2790 sc->sc_tx_chainmask, sc->sc_rx_chainmask)); 2791 2792 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL); 2793 sc->sc_defant = ath9k_hw_getdefantenna(ah); 2794 2795 ath9k_hw_getmac(ah, sc->sc_myaddr); 2796 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) { 2797 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask); 2798 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask); 2799 (void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); 2800 } 2801 2802 /* set default value to short slot time */ 2803 sc->sc_slottime = ATH9K_SLOT_TIME_9; 2804 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9); 2805 2806 /* initialize beacon slots */ 2807 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++) 2808 sc->sc_bslot[i] = ATH_IF_ID_ANY; 2809 2810 /* save MISC configurations */ 2811 sc->sc_config.swBeaconProcess = 1; 2812 2813 2814 ic->ic_caps |= IEEE80211_C_WPA; /* Support WPA/WPA2 */ 2815 ic->ic_phytype = IEEE80211_T_OFDM; 2816 ic->ic_opmode = IEEE80211_M_STA; 2817 ic->ic_state = IEEE80211_S_INIT; 2818 ic->ic_maxrssi = ARN_MAX_RSSI; 2819 ic->ic_set_shortslot = arn_set_shortslot; 2820 ic->ic_xmit = arn_tx; 2821 ieee80211_attach(ic); 2822 2823 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2824 "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq)); 2825 2826 /* different instance has different WPA door */ 2827 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 2828 ddi_driver_name(devinfo), 2829 ddi_get_instance(devinfo)); 2830 2831 /* Override 80211 default routines */ 2832 ic->ic_reset = arn_reset; 2833 sc->sc_newstate = ic->ic_newstate; 2834 ic->ic_newstate = arn_newstate; 2835 #ifdef ARN_IBSS 2836 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 2837 ic->ic_recv_mgmt = arn_recv_mgmt; 2838 #endif 2839 ic->ic_watchdog = arn_watchdog; 2840 ic->ic_node_alloc = arn_node_alloc; 2841 ic->ic_node_free = arn_node_free; 2842 ic->ic_crypto.cs_key_alloc = arn_key_alloc; 2843 ic->ic_crypto.cs_key_delete = arn_key_delete; 2844 ic->ic_crypto.cs_key_set = arn_key_set; 2845 2846 ieee80211_media_init(ic); 2847 2848 /* 2849 * initialize default tx key 2850 */ 2851 ic->ic_def_txkey = 0; 2852 2853 sc->sc_rx_pend = 0; 2854 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0); 2855 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, 2856 &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc); 2857 if (err != DDI_SUCCESS) { 2858 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2859 "ddi_add_softintr() failed....\n")); 2860 goto attach_fail5; 2861 } 2862 2863 if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock) 2864 != DDI_SUCCESS) { 2865 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2866 "Can not get iblock cookie for INT\n")); 2867 goto attach_fail6; 2868 } 2869 2870 if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr, 2871 (caddr_t)sc) != DDI_SUCCESS) { 2872 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2873 "Can not set intr for ARN driver\n")); 2874 goto attach_fail6; 2875 } 2876 2877 /* 2878 * Provide initial settings for the WiFi plugin; whenever this 2879 * information changes, we need to call mac_plugindata_update() 2880 */ 2881 wd.wd_opmode = ic->ic_opmode; 2882 wd.wd_secalloc = WIFI_SEC_NONE; 2883 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid); 2884 2885 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2886 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)" 2887 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 2888 wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2], 2889 wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5])); 2890 2891 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 2892 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2893 "MAC version mismatch\n")); 2894 goto attach_fail7; 2895 } 2896 2897 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 2898 macp->m_driver = sc; 2899 macp->m_dip = devinfo; 2900 macp->m_src_addr = ic->ic_macaddr; 2901 macp->m_callbacks = &arn_m_callbacks; 2902 macp->m_min_sdu = 0; 2903 macp->m_max_sdu = IEEE80211_MTU; 2904 macp->m_pdata = &wd; 2905 macp->m_pdata_size = sizeof (wd); 2906 2907 err = mac_register(macp, &ic->ic_mach); 2908 mac_free(macp); 2909 if (err != 0) { 2910 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2911 "mac_register err %x\n", err)); 2912 goto attach_fail7; 2913 } 2914 2915 /* Create minor node of type DDI_NT_NET_WIFI */ 2916 (void) snprintf(strbuf, sizeof (strbuf), "%s%d", 2917 ARN_NODENAME, instance); 2918 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR, 2919 instance + 1, DDI_NT_NET_WIFI, 0); 2920 if (err != DDI_SUCCESS) 2921 ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): " 2922 "Create minor node failed - %d\n", err)); 2923 2924 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 2925 2926 sc->sc_promisc = B_FALSE; 2927 bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs)); 2928 bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash)); 2929 2930 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): " 2931 "Atheros AR%s MAC/BB Rev:%x " 2932 "AR%s RF Rev:%x: mem=0x%lx\n", 2933 arn_mac_bb_name(ah->ah_macVersion), 2934 ah->ah_macRev, 2935 arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)), 2936 ah->ah_phyRev, 2937 (unsigned long)sc->mem)); 2938 2939 /* XXX: hardware will not be ready until arn_open() being called */ 2940 sc->sc_flags |= SC_OP_INVALID; 2941 sc->sc_isrunning = 0; 2942 2943 return (DDI_SUCCESS); 2944 2945 attach_fail7: 2946 ddi_remove_intr(devinfo, 0, sc->sc_iblock); 2947 attach_fail6: 2948 ddi_remove_softintr(sc->sc_softint_id); 2949 attach_fail5: 2950 (void) ieee80211_detach(ic); 2951 attach_fail4: 2952 arn_desc_free(sc); 2953 if (sc->sc_tq) 2954 ddi_taskq_destroy(sc->sc_tq); 2955 attach_fail3: 2956 ath9k_hw_detach(ah); 2957 attach_fail2: 2958 ddi_regs_map_free(&sc->sc_io_handle); 2959 attach_fail1: 2960 pci_config_teardown(&sc->sc_cfg_handle); 2961 attach_fail0: 2962 sc->sc_flags |= SC_OP_INVALID; 2963 /* cleanup tx queues */ 2964 mutex_destroy(&sc->sc_txbuflock); 2965 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2966 if (ARN_TXQ_SETUP(sc, i)) { 2967 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */ 2968 mutex_destroy(&((&sc->sc_txq[i])->axq_lock)); 2969 } 2970 } 2971 mutex_destroy(&sc->sc_rxbuflock); 2972 mutex_destroy(&sc->sc_serial_rw); 2973 mutex_destroy(&sc->sc_genlock); 2974 mutex_destroy(&sc->sc_resched_lock); 2975 #ifdef ARN_IBSS 2976 mutex_destroy(&sc->sc_bcbuflock); 2977 #endif 2978 2979 ddi_soft_state_free(arn_soft_state_p, instance); 2980 2981 return (DDI_FAILURE); 2982 2983 } 2984 2985 /* 2986 * Suspend transmit/receive for powerdown 2987 */ 2988 static int 2989 arn_suspend(struct arn_softc *sc) 2990 { 2991 ARN_LOCK(sc); 2992 arn_close(sc); 2993 ARN_UNLOCK(sc); 2994 2995 return (DDI_SUCCESS); 2996 } 2997 2998 static int32_t 2999 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3000 { 3001 struct arn_softc *sc; 3002 int i; 3003 3004 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo)); 3005 ASSERT(sc != NULL); 3006 3007 switch (cmd) { 3008 case DDI_DETACH: 3009 break; 3010 3011 case DDI_SUSPEND: 3012 return (arn_suspend(sc)); 3013 3014 default: 3015 return (DDI_FAILURE); 3016 } 3017 3018 if (mac_disable(sc->sc_isc.ic_mach) != 0) 3019 return (DDI_FAILURE); 3020 3021 arn_stop_scantimer(sc); 3022 arn_stop_caltimer(sc); 3023 3024 /* disable interrupts */ 3025 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0); 3026 3027 /* 3028 * Unregister from the MAC layer subsystem 3029 */ 3030 (void) mac_unregister(sc->sc_isc.ic_mach); 3031 3032 /* free intterrupt resources */ 3033 ddi_remove_intr(devinfo, 0, sc->sc_iblock); 3034 ddi_remove_softintr(sc->sc_softint_id); 3035 3036 /* 3037 * NB: the order of these is important: 3038 * o call the 802.11 layer before detaching the hal to 3039 * insure callbacks into the driver to delete global 3040 * key cache entries can be handled 3041 * o reclaim the tx queue data structures after calling 3042 * the 802.11 layer as we'll get called back to reclaim 3043 * node state and potentially want to use them 3044 * o to cleanup the tx queues the hal is called, so detach 3045 * it last 3046 */ 3047 ieee80211_detach(&sc->sc_isc); 3048 3049 arn_desc_free(sc); 3050 3051 ddi_taskq_destroy(sc->sc_tq); 3052 3053 if (!(sc->sc_flags & SC_OP_INVALID)) 3054 (void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 3055 3056 /* cleanup tx queues */ 3057 mutex_destroy(&sc->sc_txbuflock); 3058 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 3059 if (ARN_TXQ_SETUP(sc, i)) { 3060 arn_tx_cleanupq(sc, &sc->sc_txq[i]); 3061 mutex_destroy(&((&sc->sc_txq[i])->axq_lock)); 3062 } 3063 } 3064 3065 ath9k_hw_detach(sc->sc_ah); 3066 3067 /* free io handle */ 3068 ddi_regs_map_free(&sc->sc_io_handle); 3069 pci_config_teardown(&sc->sc_cfg_handle); 3070 3071 /* destroy locks */ 3072 mutex_destroy(&sc->sc_genlock); 3073 mutex_destroy(&sc->sc_serial_rw); 3074 mutex_destroy(&sc->sc_rxbuflock); 3075 mutex_destroy(&sc->sc_resched_lock); 3076 #ifdef ARN_IBSS 3077 mutex_destroy(&sc->sc_bcbuflock); 3078 #endif 3079 3080 ddi_remove_minor_node(devinfo, NULL); 3081 ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo)); 3082 3083 return (DDI_SUCCESS); 3084 } 3085 3086 /* 3087 * quiesce(9E) entry point. 3088 * 3089 * This function is called when the system is single-threaded at high 3090 * PIL with preemption disabled. Therefore, this function must not be 3091 * blocked. 3092 * 3093 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3094 * DDI_FAILURE indicates an error condition and should almost never happen. 3095 */ 3096 static int32_t 3097 arn_quiesce(dev_info_t *devinfo) 3098 { 3099 struct arn_softc *sc; 3100 int i; 3101 struct ath_hal *ah; 3102 3103 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo)); 3104 3105 if (sc == NULL || (ah = sc->sc_ah) == NULL) 3106 return (DDI_FAILURE); 3107 3108 /* 3109 * Disable interrupts 3110 */ 3111 (void) ath9k_hw_set_interrupts(ah, 0); 3112 3113 /* 3114 * Disable TX HW 3115 */ 3116 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 3117 if (ARN_TXQ_SETUP(sc, i)) 3118 (void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum); 3119 } 3120 3121 /* 3122 * Disable RX HW 3123 */ 3124 ath9k_hw_stoppcurecv(ah); 3125 ath9k_hw_setrxfilter(ah, 0); 3126 (void) ath9k_hw_stopdmarecv(ah); 3127 drv_usecwait(3000); 3128 3129 /* 3130 * Power down HW 3131 */ 3132 (void) ath9k_hw_phy_disable(ah); 3133 3134 return (DDI_SUCCESS); 3135 } 3136 3137 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach, 3138 nodev, NULL, D_MP, NULL, arn_quiesce); 3139 3140 static struct modldrv arn_modldrv = { 3141 &mod_driverops, /* Type of module. This one is a driver */ 3142 "arn-Atheros 9000 series driver:vertion 1.1", /* short description */ 3143 &arn_dev_ops /* driver specific ops */ 3144 }; 3145 3146 static struct modlinkage modlinkage = { 3147 MODREV_1, (void *)&arn_modldrv, NULL 3148 }; 3149 3150 int 3151 _info(struct modinfo *modinfop) 3152 { 3153 return (mod_info(&modlinkage, modinfop)); 3154 } 3155 3156 int 3157 _init(void) 3158 { 3159 int status; 3160 3161 status = ddi_soft_state_init 3162 (&arn_soft_state_p, sizeof (struct arn_softc), 1); 3163 if (status != 0) 3164 return (status); 3165 3166 mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL); 3167 mac_init_ops(&arn_dev_ops, "arn"); 3168 status = mod_install(&modlinkage); 3169 if (status != 0) { 3170 mac_fini_ops(&arn_dev_ops); 3171 mutex_destroy(&arn_loglock); 3172 ddi_soft_state_fini(&arn_soft_state_p); 3173 } 3174 3175 return (status); 3176 } 3177 3178 int 3179 _fini(void) 3180 { 3181 int status; 3182 3183 status = mod_remove(&modlinkage); 3184 if (status == 0) { 3185 mac_fini_ops(&arn_dev_ops); 3186 mutex_destroy(&arn_loglock); 3187 ddi_soft_state_fini(&arn_soft_state_p); 3188 } 3189 return (status); 3190 } 3191