1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 18 * redistribution must be conditioned upon including a substantially 19 * similar Disclaimer requirement for further binary redistribution. 20 * 3. Neither the names of the above-listed copyright holders nor the names 21 * of any contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * NO WARRANTY 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 28 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 29 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 30 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 33 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 35 * THE POSSIBILITY OF SUCH DAMAGES. 36 * 37 */ 38 39 40 /* 41 * Driver for the Atheros Wireless LAN controller. 42 * 43 * The Atheros driver calls into net80211 module for IEEE80211 protocol 44 * management functionalities. The driver includes a LLD(Low Level Driver) 45 * part to implement H/W related operations. 46 * The following is the high level structure of ath driver. 47 * (The arrows between modules indicate function call direction.) 48 * 49 * 50 * | 51 * | GLD thread 52 * V 53 * ================== ========================================= 54 * | | |[1] | 55 * | | | GLDv3 Callback functions registered | 56 * | Net80211 | ========================= by | 57 * | module | | | driver | 58 * | | V | | 59 * | |======================== | | 60 * | Functions exported by net80211 | | | 61 * | | | | 62 * ========================================== ================= 63 * | | 64 * V | 65 * +----------------------------------+ | 66 * |[2] | | 67 * | Net80211 Callback functions | | 68 * | registered by LLD | | 69 * +----------------------------------+ | 70 * | | 71 * V v 72 * +-----------------------------------------------------------+ 73 * |[3] | 74 * | LLD Internal functions | 75 * | | 76 * +-----------------------------------------------------------+ 77 * ^ 78 * | Software interrupt thread 79 * | 80 * 81 * The short description of each module is as below: 82 * Module 1: GLD callback functions, which are intercepting the calls from 83 * GLD to LLD. 84 * Module 2: Net80211 callback functions registered by LLD, which 85 * calls into LLD for H/W related functions needed by net80211. 86 * Module 3: LLD Internal functions, which are responsible for allocing 87 * descriptor/buffer, handling interrupt and other H/W 88 * operations. 89 * 90 * All functions are running in 3 types of thread: 91 * 1. GLD callbacks threads, such as ioctl, intr, etc. 92 * 2. Clock interruptt thread which is responsible for scan, rate control and 93 * calibration. 94 * 3. Software Interrupt thread originated in LLD. 95 * 96 * The lock strategy is as below: 97 * There have 4 queues for tx, each queue has one asc_txqlock[i] to 98 * prevent conflicts access to queue resource from different thread. 99 * 100 * All the transmit buffers are contained in asc_txbuf which are 101 * protected by asc_txbuflock. 102 * 103 * Each receive buffers are contained in asc_rxbuf which are protected 104 * by asc_rxbuflock. 105 * 106 * In ath struct, asc_genlock is a general lock, protecting most other 107 * operational data in ath_softc struct and HAL accesses. 108 * It is acquired by the interupt handler and most "mode-ctrl" routines. 109 * 110 * Any of the locks can be acquired singly, but where multiple 111 * locks are acquired, they *must* be in the order: 112 * asc_genlock >> asc_txqlock[i] >> asc_txbuflock >> asc_rxbuflock 113 */ 114 115 #include <sys/param.h> 116 #include <sys/types.h> 117 #include <sys/signal.h> 118 #include <sys/stream.h> 119 #include <sys/termio.h> 120 #include <sys/errno.h> 121 #include <sys/file.h> 122 #include <sys/cmn_err.h> 123 #include <sys/stropts.h> 124 #include <sys/strsubr.h> 125 #include <sys/strtty.h> 126 #include <sys/kbio.h> 127 #include <sys/cred.h> 128 #include <sys/stat.h> 129 #include <sys/consdev.h> 130 #include <sys/kmem.h> 131 #include <sys/modctl.h> 132 #include <sys/ddi.h> 133 #include <sys/sunddi.h> 134 #include <sys/pci.h> 135 #include <sys/errno.h> 136 #include <sys/mac.h> 137 #include <sys/dlpi.h> 138 #include <sys/ethernet.h> 139 #include <sys/list.h> 140 #include <sys/byteorder.h> 141 #include <sys/strsun.h> 142 #include <sys/policy.h> 143 #include <inet/common.h> 144 #include <inet/nd.h> 145 #include <inet/mi.h> 146 #include <inet/wifi_ioctl.h> 147 #include <sys/mac_wifi.h> 148 #include "ath_hal.h" 149 #include "ath_impl.h" 150 #include "ath_aux.h" 151 #include "ath_rate.h" 152 153 #define ATH_MAX_RSSI 63 /* max rssi */ 154 155 extern void ath_halfix_init(void); 156 extern void ath_halfix_finit(void); 157 extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd); 158 159 /* 160 * PIO access attributes for registers 161 */ 162 static ddi_device_acc_attr_t ath_reg_accattr = { 163 DDI_DEVICE_ATTR_V0, 164 DDI_STRUCTURE_LE_ACC, 165 DDI_STRICTORDER_ACC 166 }; 167 168 /* 169 * DMA access attributes for descriptors: NOT to be byte swapped. 170 */ 171 static ddi_device_acc_attr_t ath_desc_accattr = { 172 DDI_DEVICE_ATTR_V0, 173 DDI_STRUCTURE_LE_ACC, 174 DDI_STRICTORDER_ACC 175 }; 176 177 /* 178 * Describes the chip's DMA engine 179 */ 180 static ddi_dma_attr_t ath_dma_attr = { 181 DMA_ATTR_V0, /* version number */ 182 0, /* low address */ 183 0xffffffffU, /* high address */ 184 0x3ffffU, /* counter register max */ 185 1, /* alignment */ 186 0xFFF, /* burst sizes */ 187 1, /* minimum transfer size */ 188 0x3ffffU, /* max transfer size */ 189 0xffffffffU, /* address register max */ 190 1, /* no scatter-gather */ 191 1, /* granularity of device */ 192 0, /* DMA flags */ 193 }; 194 195 static ddi_dma_attr_t ath_desc_dma_attr = { 196 DMA_ATTR_V0, /* version number */ 197 0, /* low address */ 198 0xffffffffU, /* high address */ 199 0xffffffffU, /* counter register max */ 200 0x1000, /* alignment */ 201 0xFFF, /* burst sizes */ 202 1, /* minimum transfer size */ 203 0xffffffffU, /* max transfer size */ 204 0xffffffffU, /* address register max */ 205 1, /* no scatter-gather */ 206 1, /* granularity of device */ 207 0, /* DMA flags */ 208 }; 209 210 static kmutex_t ath_loglock; 211 static void *ath_soft_state_p = NULL; 212 static int ath_dwelltime = 150; /* scan interval, ms */ 213 214 static int ath_m_stat(void *, uint_t, uint64_t *); 215 static int ath_m_start(void *); 216 static void ath_m_stop(void *); 217 static int ath_m_promisc(void *, boolean_t); 218 static int ath_m_multicst(void *, boolean_t, const uint8_t *); 219 static int ath_m_unicst(void *, const uint8_t *); 220 static mblk_t *ath_m_tx(void *, mblk_t *); 221 static void ath_m_ioctl(void *, queue_t *, mblk_t *); 222 static int ath_m_setprop(void *, const char *, mac_prop_id_t, 223 uint_t, const void *); 224 static int ath_m_getprop(void *, const char *, mac_prop_id_t, 225 uint_t, uint_t, void *); 226 227 static mac_callbacks_t ath_m_callbacks = { 228 MC_IOCTL | MC_SETPROP | MC_GETPROP, 229 ath_m_stat, 230 ath_m_start, 231 ath_m_stop, 232 ath_m_promisc, 233 ath_m_multicst, 234 ath_m_unicst, 235 ath_m_tx, 236 NULL, /* mc_resources; */ 237 ath_m_ioctl, 238 NULL, /* mc_getcapab */ 239 NULL, 240 NULL, 241 ath_m_setprop, 242 ath_m_getprop 243 }; 244 245 /* 246 * Available debug flags: 247 * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH, 248 * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP 249 */ 250 uint32_t ath_dbg_flags = 0; 251 252 /* 253 * Exception/warning cases not leading to panic. 254 */ 255 void 256 ath_problem(const int8_t *fmt, ...) 257 { 258 va_list args; 259 260 mutex_enter(&ath_loglock); 261 262 va_start(args, fmt); 263 vcmn_err(CE_WARN, fmt, args); 264 va_end(args); 265 266 mutex_exit(&ath_loglock); 267 } 268 269 /* 270 * Normal log information independent of debug. 271 */ 272 void 273 ath_log(const int8_t *fmt, ...) 274 { 275 va_list args; 276 277 mutex_enter(&ath_loglock); 278 279 va_start(args, fmt); 280 vcmn_err(CE_CONT, fmt, args); 281 va_end(args); 282 283 mutex_exit(&ath_loglock); 284 } 285 286 void 287 ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...) 288 { 289 va_list args; 290 291 if (dbg_flags & ath_dbg_flags) { 292 mutex_enter(&ath_loglock); 293 va_start(args, fmt); 294 vcmn_err(CE_CONT, fmt, args); 295 va_end(args); 296 mutex_exit(&ath_loglock); 297 } 298 } 299 300 void 301 ath_setup_desc(ath_t *asc, struct ath_buf *bf) 302 { 303 struct ath_desc *ds; 304 305 ds = bf->bf_desc; 306 ds->ds_link = bf->bf_daddr; 307 ds->ds_data = bf->bf_dma.cookie.dmac_address; 308 ds->ds_vdata = bf->bf_dma.mem_va; 309 ATH_HAL_SETUPRXDESC(asc->asc_ah, ds, 310 bf->bf_dma.alength, /* buffer size */ 311 0); 312 313 if (asc->asc_rxlink != NULL) 314 *asc->asc_rxlink = bf->bf_daddr; 315 asc->asc_rxlink = &ds->ds_link; 316 } 317 318 319 /* 320 * Allocate an area of memory and a DMA handle for accessing it 321 */ 322 static int 323 ath_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize, 324 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags, 325 uint_t bind_flags, dma_area_t *dma_p) 326 { 327 int err; 328 329 /* 330 * Allocate handle 331 */ 332 err = ddi_dma_alloc_handle(devinfo, dma_attr, 333 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 334 if (err != DDI_SUCCESS) 335 return (DDI_FAILURE); 336 337 /* 338 * Allocate memory 339 */ 340 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 341 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va, 342 &dma_p->alength, &dma_p->acc_hdl); 343 if (err != DDI_SUCCESS) 344 return (DDI_FAILURE); 345 346 /* 347 * Bind the two together 348 */ 349 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 350 dma_p->mem_va, dma_p->alength, bind_flags, 351 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies); 352 if (err != DDI_DMA_MAPPED) 353 return (DDI_FAILURE); 354 355 dma_p->nslots = ~0U; 356 dma_p->size = ~0U; 357 dma_p->token = ~0U; 358 dma_p->offset = 0; 359 return (DDI_SUCCESS); 360 } 361 362 /* 363 * Free one allocated area of DMAable memory 364 */ 365 static void 366 ath_free_dma_mem(dma_area_t *dma_p) 367 { 368 if (dma_p->dma_hdl != NULL) { 369 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 370 if (dma_p->acc_hdl != NULL) { 371 ddi_dma_mem_free(&dma_p->acc_hdl); 372 dma_p->acc_hdl = NULL; 373 } 374 ddi_dma_free_handle(&dma_p->dma_hdl); 375 dma_p->ncookies = 0; 376 dma_p->dma_hdl = NULL; 377 } 378 } 379 380 381 static int 382 ath_desc_alloc(dev_info_t *devinfo, ath_t *asc) 383 { 384 int i, err; 385 size_t size; 386 struct ath_desc *ds; 387 struct ath_buf *bf; 388 389 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF); 390 391 err = ath_alloc_dma_mem(devinfo, &ath_desc_dma_attr, size, 392 &ath_desc_accattr, DDI_DMA_CONSISTENT, 393 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &asc->asc_desc_dma); 394 395 /* virtual address of the first descriptor */ 396 asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va; 397 398 ds = asc->asc_desc; 399 ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: " 400 "%p (%d) -> %p\n", 401 asc->asc_desc, asc->asc_desc_dma.alength, 402 asc->asc_desc_dma.cookie.dmac_address)); 403 404 /* allocate data structures to describe TX/RX DMA buffers */ 405 asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF); 406 bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP); 407 asc->asc_vbufptr = bf; 408 409 /* DMA buffer size for each TX/RX packet */ 410 asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) + 411 IEEE80211_MTU + IEEE80211_CRC_LEN + 412 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 413 IEEE80211_WEP_CRCLEN), asc->asc_cachelsz); 414 415 /* create RX buffer list and allocate DMA memory */ 416 list_create(&asc->asc_rxbuf_list, sizeof (struct ath_buf), 417 offsetof(struct ath_buf, bf_node)); 418 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) { 419 bf->bf_desc = ds; 420 bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address + 421 ((uintptr_t)ds - (uintptr_t)asc->asc_desc); 422 list_insert_tail(&asc->asc_rxbuf_list, bf); 423 424 /* alloc DMA memory */ 425 err = ath_alloc_dma_mem(devinfo, &ath_dma_attr, 426 asc->asc_dmabuf_size, &ath_desc_accattr, 427 DDI_DMA_STREAMING, DDI_DMA_READ | DDI_DMA_STREAMING, 428 &bf->bf_dma); 429 if (err != DDI_SUCCESS) 430 return (err); 431 } 432 433 /* create TX buffer list and allocate DMA memory */ 434 list_create(&asc->asc_txbuf_list, sizeof (struct ath_buf), 435 offsetof(struct ath_buf, bf_node)); 436 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++) { 437 bf->bf_desc = ds; 438 bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address + 439 ((uintptr_t)ds - (uintptr_t)asc->asc_desc); 440 list_insert_tail(&asc->asc_txbuf_list, bf); 441 442 /* alloc DMA memory */ 443 err = ath_alloc_dma_mem(devinfo, &ath_dma_attr, 444 asc->asc_dmabuf_size, &ath_desc_accattr, 445 DDI_DMA_STREAMING, DDI_DMA_STREAMING, &bf->bf_dma); 446 if (err != DDI_SUCCESS) 447 return (err); 448 } 449 450 return (DDI_SUCCESS); 451 } 452 453 static void 454 ath_desc_free(ath_t *asc) 455 { 456 struct ath_buf *bf; 457 458 /* Free TX DMA buffer */ 459 bf = list_head(&asc->asc_txbuf_list); 460 while (bf != NULL) { 461 ath_free_dma_mem(&bf->bf_dma); 462 list_remove(&asc->asc_txbuf_list, bf); 463 bf = list_head(&asc->asc_txbuf_list); 464 } 465 list_destroy(&asc->asc_txbuf_list); 466 467 /* Free RX DMA uffer */ 468 bf = list_head(&asc->asc_rxbuf_list); 469 while (bf != NULL) { 470 ath_free_dma_mem(&bf->bf_dma); 471 list_remove(&asc->asc_rxbuf_list, bf); 472 bf = list_head(&asc->asc_rxbuf_list); 473 } 474 list_destroy(&asc->asc_rxbuf_list); 475 476 /* Free descriptor DMA buffer */ 477 ath_free_dma_mem(&asc->asc_desc_dma); 478 479 kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen); 480 asc->asc_vbufptr = NULL; 481 } 482 483 static void 484 ath_printrxbuf(struct ath_buf *bf, int32_t done) 485 { 486 struct ath_desc *ds = bf->bf_desc; 487 488 ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x " 489 "%08x %08x %08x %c\n", 490 ds, bf->bf_daddr, 491 ds->ds_link, ds->ds_data, 492 ds->ds_ctl0, ds->ds_ctl1, 493 ds->ds_hw[0], ds->ds_hw[1], 494 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!')); 495 } 496 497 static void 498 ath_rx_handler(ath_t *asc) 499 { 500 ieee80211com_t *ic = (ieee80211com_t *)asc; 501 struct ath_buf *bf; 502 struct ath_hal *ah = asc->asc_ah; 503 struct ath_desc *ds; 504 mblk_t *rx_mp; 505 struct ieee80211_frame *wh; 506 int32_t len, loop = 1; 507 uint8_t phyerr; 508 HAL_STATUS status; 509 HAL_NODE_STATS hal_node_stats; 510 struct ieee80211_node *in; 511 512 do { 513 mutex_enter(&asc->asc_rxbuflock); 514 bf = list_head(&asc->asc_rxbuf_list); 515 if (bf == NULL) { 516 ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): " 517 "no buffer\n")); 518 mutex_exit(&asc->asc_rxbuflock); 519 break; 520 } 521 ASSERT(bf->bf_dma.cookie.dmac_address != NULL); 522 ds = bf->bf_desc; 523 if (ds->ds_link == bf->bf_daddr) { 524 /* 525 * Never process the self-linked entry at the end, 526 * this may be met at heavy load. 527 */ 528 mutex_exit(&asc->asc_rxbuflock); 529 break; 530 } 531 532 status = ATH_HAL_RXPROCDESC(ah, ds, 533 bf->bf_daddr, 534 ATH_PA2DESC(asc, ds->ds_link)); 535 if (status == HAL_EINPROGRESS) { 536 mutex_exit(&asc->asc_rxbuflock); 537 break; 538 } 539 list_remove(&asc->asc_rxbuf_list, bf); 540 mutex_exit(&asc->asc_rxbuflock); 541 542 if (ds->ds_rxstat.rs_status != 0) { 543 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) 544 asc->asc_stats.ast_rx_crcerr++; 545 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) 546 asc->asc_stats.ast_rx_fifoerr++; 547 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) 548 asc->asc_stats.ast_rx_badcrypt++; 549 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { 550 asc->asc_stats.ast_rx_phyerr++; 551 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; 552 asc->asc_stats.ast_rx_phy[phyerr]++; 553 } 554 goto rx_next; 555 } 556 len = ds->ds_rxstat.rs_datalen; 557 558 /* less than sizeof(struct ieee80211_frame) */ 559 if (len < 20) { 560 asc->asc_stats.ast_rx_tooshort++; 561 goto rx_next; 562 } 563 564 if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) { 565 ath_problem("ath: ath_rx_handler(): " 566 "allocing mblk buffer failed.\n"); 567 return; 568 } 569 570 ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU); 571 bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len); 572 573 rx_mp->b_wptr += len; 574 wh = (struct ieee80211_frame *)rx_mp->b_rptr; 575 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 576 IEEE80211_FC0_TYPE_CTL) { 577 /* 578 * Ignore control frame received in promisc mode. 579 */ 580 freemsg(rx_mp); 581 goto rx_next; 582 } 583 /* Remove the CRC at the end of IEEE80211 frame */ 584 rx_mp->b_wptr -= IEEE80211_CRC_LEN; 585 #ifdef DEBUG 586 ath_printrxbuf(bf, status == HAL_OK); 587 #endif /* DEBUG */ 588 /* 589 * Locate the node for sender, track state, and then 590 * pass the (referenced) node up to the 802.11 layer 591 * for its use. 592 */ 593 in = ieee80211_find_rxnode(ic, wh); 594 595 /* 596 * Send frame up for processing. 597 */ 598 (void) ieee80211_input(ic, rx_mp, in, 599 ds->ds_rxstat.rs_rssi, 600 ds->ds_rxstat.rs_tstamp); 601 602 ieee80211_free_node(in); 603 604 rx_next: 605 mutex_enter(&asc->asc_rxbuflock); 606 list_insert_tail(&asc->asc_rxbuf_list, bf); 607 mutex_exit(&asc->asc_rxbuflock); 608 ath_setup_desc(asc, bf); 609 } while (loop); 610 611 /* rx signal state monitoring */ 612 ATH_HAL_RXMONITOR(ah, &hal_node_stats, &asc->asc_curchan); 613 } 614 615 static void 616 ath_printtxbuf(struct ath_buf *bf, int done) 617 { 618 struct ath_desc *ds = bf->bf_desc; 619 620 ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x" 621 " %08x %08x %08x %c\n", 622 ds, bf->bf_daddr, 623 ds->ds_link, ds->ds_data, 624 ds->ds_ctl0, ds->ds_ctl1, 625 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 626 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!')); 627 } 628 629 /* 630 * The input parameter mp has following assumption: 631 * For data packets, GLDv3 mac_wifi plugin allocates and fills the 632 * ieee80211 header. For management packets, net80211 allocates and 633 * fills the ieee80211 header. In both cases, enough spaces in the 634 * header are left for encryption option. 635 */ 636 static int32_t 637 ath_tx_start(ath_t *asc, struct ieee80211_node *in, struct ath_buf *bf, 638 mblk_t *mp) 639 { 640 ieee80211com_t *ic = (ieee80211com_t *)asc; 641 struct ieee80211_frame *wh; 642 struct ath_hal *ah = asc->asc_ah; 643 uint32_t subtype, flags, ctsduration; 644 int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0; 645 uint8_t rix, cix, txrate, ctsrate; 646 struct ath_desc *ds; 647 struct ath_txq *txq; 648 HAL_PKT_TYPE atype; 649 const HAL_RATE_TABLE *rt; 650 HAL_BOOL shortPreamble; 651 struct ath_node *an; 652 caddr_t dest; 653 654 /* 655 * CRC are added by H/W, not encaped by driver, 656 * but we must count it in pkt length. 657 */ 658 pktlen = IEEE80211_CRC_LEN; 659 660 wh = (struct ieee80211_frame *)mp->b_rptr; 661 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 662 keyix = HAL_TXKEYIX_INVALID; 663 hdrlen = sizeof (struct ieee80211_frame); 664 if (iswep != 0) { 665 const struct ieee80211_cipher *cip; 666 struct ieee80211_key *k; 667 668 /* 669 * Construct the 802.11 header+trailer for an encrypted 670 * frame. The only reason this can fail is because of an 671 * unknown or unsupported cipher/key type. 672 */ 673 k = ieee80211_crypto_encap(ic, mp); 674 if (k == NULL) { 675 ATH_DEBUG((ATH_DBG_AUX, "crypto_encap failed\n")); 676 /* 677 * This can happen when the key is yanked after the 678 * frame was queued. Just discard the frame; the 679 * 802.11 layer counts failures and provides 680 * debugging/diagnostics. 681 */ 682 return (EIO); 683 } 684 cip = k->wk_cipher; 685 /* 686 * Adjust the packet + header lengths for the crypto 687 * additions and calculate the h/w key index. When 688 * a s/w mic is done the frame will have had any mic 689 * added to it prior to entry so m0->m_pkthdr.len above will 690 * account for it. Otherwise we need to add it to the 691 * packet length. 692 */ 693 hdrlen += cip->ic_header; 694 pktlen += cip->ic_trailer; 695 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 696 pktlen += cip->ic_miclen; 697 keyix = k->wk_keyix; 698 699 /* packet header may have moved, reset our local pointer */ 700 wh = (struct ieee80211_frame *)mp->b_rptr; 701 } 702 703 dest = bf->bf_dma.mem_va; 704 for (; mp != NULL; mp = mp->b_cont) { 705 mblen = MBLKL(mp); 706 bcopy(mp->b_rptr, dest, mblen); 707 dest += mblen; 708 } 709 mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va; 710 pktlen += mbslen; 711 712 bf->bf_in = in; 713 714 /* setup descriptors */ 715 ds = bf->bf_desc; 716 rt = asc->asc_currates; 717 ASSERT(rt != NULL); 718 719 /* 720 * The 802.11 layer marks whether or not we should 721 * use short preamble based on the current mode and 722 * negotiated parameters. 723 */ 724 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 725 (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 726 shortPreamble = AH_TRUE; 727 asc->asc_stats.ast_tx_shortpre++; 728 } else { 729 shortPreamble = AH_FALSE; 730 } 731 732 an = ATH_NODE(in); 733 734 /* 735 * Calculate Atheros packet type from IEEE80211 packet header 736 * and setup for rate calculations. 737 */ 738 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 739 case IEEE80211_FC0_TYPE_MGT: 740 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 741 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 742 atype = HAL_PKT_TYPE_BEACON; 743 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 744 atype = HAL_PKT_TYPE_PROBE_RESP; 745 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 746 atype = HAL_PKT_TYPE_ATIM; 747 else 748 atype = HAL_PKT_TYPE_NORMAL; 749 rix = 0; /* lowest rate */ 750 try0 = ATH_TXMAXTRY; 751 if (shortPreamble) 752 txrate = an->an_tx_mgtratesp; 753 else 754 txrate = an->an_tx_mgtrate; 755 /* force all ctl frames to highest queue */ 756 txq = asc->asc_ac2q[WME_AC_VO]; 757 break; 758 case IEEE80211_FC0_TYPE_CTL: 759 atype = HAL_PKT_TYPE_PSPOLL; 760 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 761 rix = 0; /* lowest rate */ 762 try0 = ATH_TXMAXTRY; 763 if (shortPreamble) 764 txrate = an->an_tx_mgtratesp; 765 else 766 txrate = an->an_tx_mgtrate; 767 /* force all ctl frames to highest queue */ 768 txq = asc->asc_ac2q[WME_AC_VO]; 769 break; 770 case IEEE80211_FC0_TYPE_DATA: 771 atype = HAL_PKT_TYPE_NORMAL; 772 rix = an->an_tx_rix0; 773 try0 = an->an_tx_try0; 774 if (shortPreamble) 775 txrate = an->an_tx_rate0sp; 776 else 777 txrate = an->an_tx_rate0; 778 /* Always use background queue */ 779 txq = asc->asc_ac2q[WME_AC_BK]; 780 break; 781 default: 782 /* Unknown 802.11 frame */ 783 asc->asc_stats.ast_tx_invalid++; 784 return (1); 785 } 786 /* 787 * Calculate miscellaneous flags. 788 */ 789 flags = HAL_TXDESC_CLRDMASK; 790 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 791 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 792 asc->asc_stats.ast_tx_noack++; 793 } else if (pktlen > ic->ic_rtsthreshold) { 794 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 795 asc->asc_stats.ast_tx_rts++; 796 } 797 798 /* 799 * Calculate duration. This logically belongs in the 802.11 800 * layer but it lacks sufficient information to calculate it. 801 */ 802 if ((flags & HAL_TXDESC_NOACK) == 0 && 803 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 804 IEEE80211_FC0_TYPE_CTL) { 805 uint16_t dur; 806 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE, 807 rix, shortPreamble); 808 /* LINTED E_BAD_PTR_CAST_ALIGN */ 809 *(uint16_t *)wh->i_dur = LE_16(dur); 810 } 811 812 /* 813 * Calculate RTS/CTS rate and duration if needed. 814 */ 815 ctsduration = 0; 816 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 817 /* 818 * CTS transmit rate is derived from the transmit rate 819 * by looking in the h/w rate table. We must also factor 820 * in whether or not a short preamble is to be used. 821 */ 822 cix = rt->info[rix].controlRate; 823 ctsrate = rt->info[cix].rateCode; 824 if (shortPreamble) 825 ctsrate |= rt->info[cix].shortPreamble; 826 /* 827 * Compute the transmit duration based on the size 828 * of an ACK frame. We call into the HAL to do the 829 * computation since it depends on the characteristics 830 * of the actual PHY being used. 831 */ 832 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */ 833 ctsduration += ath_hal_computetxtime(ah, 834 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 835 } 836 /* SIFS + data */ 837 ctsduration += ath_hal_computetxtime(ah, 838 rt, pktlen, rix, shortPreamble); 839 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 840 ctsduration += ath_hal_computetxtime(ah, 841 rt, IEEE80211_ACK_SIZE, cix, shortPreamble); 842 } 843 } else 844 ctsrate = 0; 845 846 if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) { 847 flags |= HAL_TXDESC_INTREQ; 848 txq->axq_intrcnt = 0; 849 } 850 851 /* 852 * Formulate first tx descriptor with tx controls. 853 */ 854 ATH_HAL_SETUPTXDESC(ah, ds, 855 pktlen, /* packet length */ 856 hdrlen, /* header length */ 857 atype, /* Atheros packet type */ 858 MIN(in->in_txpower, 60), /* txpower */ 859 txrate, try0, /* series 0 rate/tries */ 860 keyix, /* key cache index */ 861 an->an_tx_antenna, /* antenna mode */ 862 flags, /* flags */ 863 ctsrate, /* rts/cts rate */ 864 ctsduration); /* rts/cts duration */ 865 bf->bf_flags = flags; 866 867 /* LINTED E_BAD_PTR_CAST_ALIGN */ 868 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d " 869 "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d " 870 "qnum=%d rix=%d sht=%d dur = %d\n", 871 ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp, 872 an->an_tx_rate2sp, an->an_tx_rate3sp, 873 txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur)); 874 875 /* 876 * Setup the multi-rate retry state only when we're 877 * going to use it. This assumes ath_hal_setuptxdesc 878 * initializes the descriptors (so we don't have to) 879 * when the hardware supports multi-rate retry and 880 * we don't use it. 881 */ 882 if (try0 != ATH_TXMAXTRY) 883 ATH_HAL_SETUPXTXDESC(ah, ds, 884 an->an_tx_rate1sp, 2, /* series 1 */ 885 an->an_tx_rate2sp, 2, /* series 2 */ 886 an->an_tx_rate3sp, 2); /* series 3 */ 887 888 ds->ds_link = 0; 889 ds->ds_data = bf->bf_dma.cookie.dmac_address; 890 ATH_HAL_FILLTXDESC(ah, ds, 891 mbslen, /* segment length */ 892 AH_TRUE, /* first segment */ 893 AH_TRUE, /* last segment */ 894 ds); /* first descriptor */ 895 896 ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV); 897 898 mutex_enter(&txq->axq_lock); 899 list_insert_tail(&txq->axq_list, bf); 900 if (txq->axq_link == NULL) { 901 ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr); 902 } else { 903 *txq->axq_link = bf->bf_daddr; 904 } 905 txq->axq_link = &ds->ds_link; 906 mutex_exit(&txq->axq_lock); 907 908 ATH_HAL_TXSTART(ah, txq->axq_qnum); 909 910 ic->ic_stats.is_tx_frags++; 911 ic->ic_stats.is_tx_bytes += pktlen; 912 913 return (0); 914 } 915 916 /* 917 * Transmit a management frame. On failure we reclaim the skbuff. 918 * Note that management frames come directly from the 802.11 layer 919 * and do not honor the send queue flow control. Need to investigate 920 * using priority queueing so management frames can bypass data. 921 */ 922 static int 923 ath_xmit(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 924 { 925 ath_t *asc = (ath_t *)ic; 926 struct ath_hal *ah = asc->asc_ah; 927 struct ieee80211_node *in = NULL; 928 struct ath_buf *bf = NULL; 929 struct ieee80211_frame *wh; 930 int error = 0; 931 932 ASSERT(mp->b_next == NULL); 933 934 if (!ATH_IS_RUNNING(asc)) { 935 if ((type & IEEE80211_FC0_TYPE_MASK) != 936 IEEE80211_FC0_TYPE_DATA) { 937 freemsg(mp); 938 } 939 return (ENXIO); 940 } 941 942 /* Grab a TX buffer */ 943 mutex_enter(&asc->asc_txbuflock); 944 bf = list_head(&asc->asc_txbuf_list); 945 if (bf != NULL) 946 list_remove(&asc->asc_txbuf_list, bf); 947 if (list_empty(&asc->asc_txbuf_list)) { 948 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): " 949 "stop queue\n")); 950 asc->asc_stats.ast_tx_qstop++; 951 } 952 mutex_exit(&asc->asc_txbuflock); 953 if (bf == NULL) { 954 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, " 955 "no xmit buf\n")); 956 ic->ic_stats.is_tx_nobuf++; 957 if ((type & IEEE80211_FC0_TYPE_MASK) == 958 IEEE80211_FC0_TYPE_DATA) { 959 asc->asc_stats.ast_tx_nobuf++; 960 mutex_enter(&asc->asc_resched_lock); 961 asc->asc_resched_needed = B_TRUE; 962 mutex_exit(&asc->asc_resched_lock); 963 } else { 964 asc->asc_stats.ast_tx_nobufmgt++; 965 freemsg(mp); 966 } 967 return (ENOMEM); 968 } 969 970 wh = (struct ieee80211_frame *)mp->b_rptr; 971 972 /* Locate node */ 973 in = ieee80211_find_txnode(ic, wh->i_addr1); 974 if (in == NULL) { 975 error = EIO; 976 goto bad; 977 } 978 979 in->in_inact = 0; 980 switch (type & IEEE80211_FC0_TYPE_MASK) { 981 case IEEE80211_FC0_TYPE_DATA: 982 (void) ieee80211_encap(ic, mp, in); 983 break; 984 default: 985 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 986 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 987 /* fill time stamp */ 988 uint64_t tsf; 989 uint32_t *tstamp; 990 991 tsf = ATH_HAL_GETTSF64(ah); 992 /* adjust 100us delay to xmit */ 993 tsf += 100; 994 /* LINTED E_BAD_PTR_CAST_ALIGN */ 995 tstamp = (uint32_t *)&wh[1]; 996 tstamp[0] = LE_32(tsf & 0xffffffff); 997 tstamp[1] = LE_32(tsf >> 32); 998 } 999 asc->asc_stats.ast_tx_mgmt++; 1000 break; 1001 } 1002 1003 error = ath_tx_start(asc, in, bf, mp); 1004 if (error != 0) { 1005 bad: 1006 ic->ic_stats.is_tx_failed++; 1007 if (bf != NULL) { 1008 mutex_enter(&asc->asc_txbuflock); 1009 list_insert_tail(&asc->asc_txbuf_list, bf); 1010 mutex_exit(&asc->asc_txbuflock); 1011 } 1012 } 1013 if (in != NULL) 1014 ieee80211_free_node(in); 1015 if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA || 1016 error == 0) { 1017 freemsg(mp); 1018 } 1019 1020 return (error); 1021 } 1022 1023 static mblk_t * 1024 ath_m_tx(void *arg, mblk_t *mp) 1025 { 1026 ath_t *asc = arg; 1027 ieee80211com_t *ic = (ieee80211com_t *)asc; 1028 mblk_t *next; 1029 int error = 0; 1030 1031 /* 1032 * No data frames go out unless we're associated; this 1033 * should not happen as the 802.11 layer does not enable 1034 * the xmit queue until we enter the RUN state. 1035 */ 1036 if (ic->ic_state != IEEE80211_S_RUN) { 1037 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_m_tx(): " 1038 "discard, state %u\n", ic->ic_state)); 1039 asc->asc_stats.ast_tx_discard++; 1040 freemsgchain(mp); 1041 return (NULL); 1042 } 1043 1044 while (mp != NULL) { 1045 next = mp->b_next; 1046 mp->b_next = NULL; 1047 error = ath_xmit(ic, mp, IEEE80211_FC0_TYPE_DATA); 1048 if (error != 0) { 1049 mp->b_next = next; 1050 if (error == ENOMEM) { 1051 break; 1052 } else { 1053 freemsgchain(mp); /* CR6501759 issues */ 1054 return (NULL); 1055 } 1056 } 1057 mp = next; 1058 } 1059 1060 return (mp); 1061 1062 } 1063 1064 static int 1065 ath_tx_processq(ath_t *asc, struct ath_txq *txq) 1066 { 1067 ieee80211com_t *ic = (ieee80211com_t *)asc; 1068 struct ath_hal *ah = asc->asc_ah; 1069 struct ath_buf *bf; 1070 struct ath_desc *ds; 1071 struct ieee80211_node *in; 1072 int32_t sr, lr, nacked = 0; 1073 HAL_STATUS status; 1074 struct ath_node *an; 1075 1076 for (;;) { 1077 mutex_enter(&txq->axq_lock); 1078 bf = list_head(&txq->axq_list); 1079 if (bf == NULL) { 1080 txq->axq_link = NULL; 1081 mutex_exit(&txq->axq_lock); 1082 break; 1083 } 1084 ds = bf->bf_desc; /* last decriptor */ 1085 status = ATH_HAL_TXPROCDESC(ah, ds); 1086 #ifdef DEBUG 1087 ath_printtxbuf(bf, status == HAL_OK); 1088 #endif 1089 if (status == HAL_EINPROGRESS) { 1090 mutex_exit(&txq->axq_lock); 1091 break; 1092 } 1093 list_remove(&txq->axq_list, bf); 1094 mutex_exit(&txq->axq_lock); 1095 in = bf->bf_in; 1096 if (in != NULL) { 1097 an = ATH_NODE(in); 1098 /* Successful transmition */ 1099 if (ds->ds_txstat.ts_status == 0) { 1100 an->an_tx_ok++; 1101 an->an_tx_antenna = 1102 ds->ds_txstat.ts_antenna; 1103 if (ds->ds_txstat.ts_rate & 1104 HAL_TXSTAT_ALTRATE) 1105 asc->asc_stats.ast_tx_altrate++; 1106 asc->asc_stats.ast_tx_rssidelta = 1107 ds->ds_txstat.ts_rssi - 1108 asc->asc_stats.ast_tx_rssi; 1109 asc->asc_stats.ast_tx_rssi = 1110 ds->ds_txstat.ts_rssi; 1111 } else { 1112 an->an_tx_err++; 1113 if (ds->ds_txstat.ts_status & 1114 HAL_TXERR_XRETRY) 1115 asc->asc_stats. 1116 ast_tx_xretries++; 1117 if (ds->ds_txstat.ts_status & 1118 HAL_TXERR_FIFO) 1119 asc->asc_stats.ast_tx_fifoerr++; 1120 if (ds->ds_txstat.ts_status & 1121 HAL_TXERR_FILT) 1122 asc->asc_stats. 1123 ast_tx_filtered++; 1124 an->an_tx_antenna = 0; /* invalidate */ 1125 } 1126 sr = ds->ds_txstat.ts_shortretry; 1127 lr = ds->ds_txstat.ts_longretry; 1128 asc->asc_stats.ast_tx_shortretry += sr; 1129 asc->asc_stats.ast_tx_longretry += lr; 1130 /* 1131 * Hand the descriptor to the rate control algorithm. 1132 */ 1133 if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 && 1134 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) { 1135 /* 1136 * If frame was ack'd update the last rx time 1137 * used to workaround phantom bmiss interrupts. 1138 */ 1139 if (ds->ds_txstat.ts_status == 0) { 1140 nacked++; 1141 an->an_tx_ok++; 1142 } else { 1143 an->an_tx_err++; 1144 } 1145 an->an_tx_retr += sr + lr; 1146 } 1147 } 1148 bf->bf_in = NULL; 1149 mutex_enter(&asc->asc_txbuflock); 1150 list_insert_tail(&asc->asc_txbuf_list, bf); 1151 mutex_exit(&asc->asc_txbuflock); 1152 /* 1153 * Reschedule stalled outbound packets 1154 */ 1155 mutex_enter(&asc->asc_resched_lock); 1156 if (asc->asc_resched_needed) { 1157 asc->asc_resched_needed = B_FALSE; 1158 mac_tx_update(ic->ic_mach); 1159 } 1160 mutex_exit(&asc->asc_resched_lock); 1161 } 1162 return (nacked); 1163 } 1164 1165 1166 static void 1167 ath_tx_handler(ath_t *asc) 1168 { 1169 int i; 1170 1171 /* 1172 * Process each active queue. 1173 */ 1174 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 1175 if (ATH_TXQ_SETUP(asc, i)) { 1176 (void) ath_tx_processq(asc, &asc->asc_txq[i]); 1177 } 1178 } 1179 } 1180 1181 static struct ieee80211_node * 1182 ath_node_alloc(ieee80211com_t *ic) 1183 { 1184 struct ath_node *an; 1185 ath_t *asc = (ath_t *)ic; 1186 1187 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP); 1188 ath_rate_update(asc, &an->an_node, 0); 1189 return (&an->an_node); 1190 } 1191 1192 static void 1193 ath_node_free(struct ieee80211_node *in) 1194 { 1195 ieee80211com_t *ic = in->in_ic; 1196 ath_t *asc = (ath_t *)ic; 1197 struct ath_buf *bf; 1198 struct ath_txq *txq; 1199 int32_t i; 1200 1201 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 1202 if (ATH_TXQ_SETUP(asc, i)) { 1203 txq = &asc->asc_txq[i]; 1204 mutex_enter(&txq->axq_lock); 1205 bf = list_head(&txq->axq_list); 1206 while (bf != NULL) { 1207 if (bf->bf_in == in) { 1208 bf->bf_in = NULL; 1209 } 1210 bf = list_next(&txq->axq_list, bf); 1211 } 1212 mutex_exit(&txq->axq_lock); 1213 } 1214 } 1215 ic->ic_node_cleanup(in); 1216 if (in->in_wpa_ie != NULL) 1217 ieee80211_free(in->in_wpa_ie); 1218 kmem_free(in, sizeof (struct ath_node)); 1219 } 1220 1221 static void 1222 ath_next_scan(void *arg) 1223 { 1224 ieee80211com_t *ic = arg; 1225 ath_t *asc = (ath_t *)ic; 1226 1227 asc->asc_scan_timer = 0; 1228 if (ic->ic_state == IEEE80211_S_SCAN) { 1229 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc, 1230 drv_usectohz(ath_dwelltime * 1000)); 1231 ieee80211_next_scan(ic); 1232 } 1233 } 1234 1235 static void 1236 ath_stop_scantimer(ath_t *asc) 1237 { 1238 timeout_id_t tmp_id = 0; 1239 1240 while ((asc->asc_scan_timer != 0) && (tmp_id != asc->asc_scan_timer)) { 1241 tmp_id = asc->asc_scan_timer; 1242 (void) untimeout(tmp_id); 1243 } 1244 asc->asc_scan_timer = 0; 1245 } 1246 1247 static int32_t 1248 ath_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1249 { 1250 ath_t *asc = (ath_t *)ic; 1251 struct ath_hal *ah = asc->asc_ah; 1252 struct ieee80211_node *in; 1253 int32_t i, error; 1254 uint8_t *bssid; 1255 uint32_t rfilt; 1256 enum ieee80211_state ostate; 1257 1258 static const HAL_LED_STATE leds[] = { 1259 HAL_LED_INIT, /* IEEE80211_S_INIT */ 1260 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 1261 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 1262 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 1263 HAL_LED_RUN, /* IEEE80211_S_RUN */ 1264 }; 1265 if (!ATH_IS_RUNNING(asc)) 1266 return (0); 1267 1268 ostate = ic->ic_state; 1269 if (nstate != IEEE80211_S_SCAN) 1270 ath_stop_scantimer(asc); 1271 1272 ATH_LOCK(asc); 1273 ATH_HAL_SETLEDSTATE(ah, leds[nstate]); /* set LED */ 1274 1275 if (nstate == IEEE80211_S_INIT) { 1276 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 1277 ATH_HAL_INTRSET(ah, asc->asc_imask &~ HAL_INT_GLOBAL); 1278 ATH_UNLOCK(asc); 1279 goto done; 1280 } 1281 in = ic->ic_bss; 1282 error = ath_chan_set(asc, ic->ic_curchan); 1283 if (error != 0) { 1284 if (nstate != IEEE80211_S_SCAN) { 1285 ATH_UNLOCK(asc); 1286 ieee80211_reset_chan(ic); 1287 goto bad; 1288 } 1289 } 1290 1291 rfilt = ath_calcrxfilter(asc); 1292 if (nstate == IEEE80211_S_SCAN) 1293 bssid = ic->ic_macaddr; 1294 else 1295 bssid = in->in_bssid; 1296 ATH_HAL_SETRXFILTER(ah, rfilt); 1297 1298 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS) 1299 ATH_HAL_SETASSOCID(ah, bssid, in->in_associd); 1300 else 1301 ATH_HAL_SETASSOCID(ah, bssid, 0); 1302 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 1303 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 1304 if (ATH_HAL_KEYISVALID(ah, i)) 1305 ATH_HAL_KEYSETMAC(ah, i, bssid); 1306 } 1307 } 1308 1309 if ((nstate == IEEE80211_S_RUN) && 1310 (ostate != IEEE80211_S_RUN)) { 1311 /* Configure the beacon and sleep timers. */ 1312 ath_beacon_config(asc); 1313 } else { 1314 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 1315 ATH_HAL_INTRSET(ah, asc->asc_imask); 1316 } 1317 /* 1318 * Reset the rate control state. 1319 */ 1320 ath_rate_ctl_reset(asc, nstate); 1321 1322 if (nstate == IEEE80211_S_RUN && (ostate != IEEE80211_S_RUN)) { 1323 nvlist_t *attr_list = NULL; 1324 sysevent_id_t eid; 1325 int32_t err = 0; 1326 char *str_name = "ATH"; 1327 char str_value[256] = {0}; 1328 1329 ATH_DEBUG((ATH_DBG_80211, "ath: ath new state(RUN): " 1330 "ic_flags=0x%08x iv=%d" 1331 " bssid=%s capinfo=0x%04x chan=%d\n", 1332 ic->ic_flags, 1333 in->in_intval, 1334 ieee80211_macaddr_sprintf(in->in_bssid), 1335 in->in_capinfo, 1336 ieee80211_chan2ieee(ic, in->in_chan))); 1337 1338 (void) sprintf(str_value, "%s%s%d", "-i ", 1339 ddi_driver_name(asc->asc_dev), 1340 ddi_get_instance(asc->asc_dev)); 1341 if (nvlist_alloc(&attr_list, 1342 NV_UNIQUE_NAME_TYPE, KM_SLEEP) == 0) { 1343 err = nvlist_add_string(attr_list, 1344 str_name, str_value); 1345 if (err != DDI_SUCCESS) 1346 ATH_DEBUG((ATH_DBG_80211, "ath: " 1347 "ath_new_state: error log event\n")); 1348 err = ddi_log_sysevent(asc->asc_dev, 1349 DDI_VENDOR_SUNW, "class", 1350 "subclass", attr_list, 1351 &eid, DDI_NOSLEEP); 1352 if (err != DDI_SUCCESS) 1353 ATH_DEBUG((ATH_DBG_80211, "ath: " 1354 "ath_new_state(): error log event\n")); 1355 nvlist_free(attr_list); 1356 } 1357 } 1358 1359 ATH_UNLOCK(asc); 1360 done: 1361 /* 1362 * Invoke the parent method to complete the work. 1363 */ 1364 error = asc->asc_newstate(ic, nstate, arg); 1365 /* 1366 * Finally, start any timers. 1367 */ 1368 if (nstate == IEEE80211_S_RUN) { 1369 ieee80211_start_watchdog(ic, 1); 1370 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) { 1371 /* start ap/neighbor scan timer */ 1372 ASSERT(asc->asc_scan_timer == 0); 1373 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc, 1374 drv_usectohz(ath_dwelltime * 1000)); 1375 } 1376 bad: 1377 return (error); 1378 } 1379 1380 /* 1381 * Periodically recalibrate the PHY to account 1382 * for temperature/environment changes. 1383 */ 1384 static void 1385 ath_calibrate(ath_t *asc) 1386 { 1387 struct ath_hal *ah = asc->asc_ah; 1388 HAL_BOOL iqcaldone; 1389 1390 asc->asc_stats.ast_per_cal++; 1391 1392 if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) { 1393 /* 1394 * Rfgain is out of bounds, reset the chip 1395 * to load new gain values. 1396 */ 1397 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): " 1398 "Need change RFgain\n")); 1399 asc->asc_stats.ast_per_rfgain++; 1400 (void) ath_reset(&asc->asc_isc); 1401 } 1402 if (!ATH_HAL_CALIBRATE(ah, &asc->asc_curchan, &iqcaldone)) { 1403 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): " 1404 "calibration of channel %u failed\n", 1405 asc->asc_curchan.channel)); 1406 asc->asc_stats.ast_per_calfail++; 1407 } 1408 } 1409 1410 static void 1411 ath_watchdog(void *arg) 1412 { 1413 ath_t *asc = arg; 1414 ieee80211com_t *ic = &asc->asc_isc; 1415 int ntimer = 0; 1416 1417 ATH_LOCK(asc); 1418 ic->ic_watchdog_timer = 0; 1419 if (!ATH_IS_RUNNING(asc)) { 1420 ATH_UNLOCK(asc); 1421 return; 1422 } 1423 1424 if (ic->ic_state == IEEE80211_S_RUN) { 1425 /* periodic recalibration */ 1426 ath_calibrate(asc); 1427 1428 /* 1429 * Start the background rate control thread if we 1430 * are not configured to use a fixed xmit rate. 1431 */ 1432 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1433 asc->asc_stats.ast_rate_calls ++; 1434 if (ic->ic_opmode == IEEE80211_M_STA) 1435 ath_rate_ctl(ic, ic->ic_bss); 1436 else 1437 ieee80211_iterate_nodes(&ic->ic_sta, 1438 ath_rate_cb, asc); 1439 } 1440 1441 ntimer = 1; 1442 } 1443 ATH_UNLOCK(asc); 1444 1445 ieee80211_watchdog(ic); 1446 if (ntimer != 0) 1447 ieee80211_start_watchdog(ic, ntimer); 1448 } 1449 1450 static uint_t 1451 ath_intr(caddr_t arg) 1452 { 1453 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1454 ath_t *asc = (ath_t *)arg; 1455 struct ath_hal *ah = asc->asc_ah; 1456 HAL_INT status; 1457 ieee80211com_t *ic = (ieee80211com_t *)asc; 1458 1459 ATH_LOCK(asc); 1460 1461 if (!ATH_IS_RUNNING(asc)) { 1462 /* 1463 * The hardware is not ready/present, don't touch anything. 1464 * Note this can happen early on if the IRQ is shared. 1465 */ 1466 ATH_UNLOCK(asc); 1467 return (DDI_INTR_UNCLAIMED); 1468 } 1469 1470 if (!ATH_HAL_INTRPEND(ah)) { /* shared irq, not for us */ 1471 ATH_UNLOCK(asc); 1472 return (DDI_INTR_UNCLAIMED); 1473 } 1474 1475 ATH_HAL_GETISR(ah, &status); 1476 status &= asc->asc_imask; 1477 if (status & HAL_INT_FATAL) { 1478 asc->asc_stats.ast_hardware++; 1479 goto reset; 1480 } else if (status & HAL_INT_RXORN) { 1481 asc->asc_stats.ast_rxorn++; 1482 goto reset; 1483 } else { 1484 if (status & HAL_INT_RXEOL) { 1485 asc->asc_stats.ast_rxeol++; 1486 asc->asc_rxlink = NULL; 1487 } 1488 if (status & HAL_INT_TXURN) { 1489 asc->asc_stats.ast_txurn++; 1490 ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE); 1491 } 1492 1493 if (status & HAL_INT_RX) { 1494 asc->asc_rx_pend = 1; 1495 ddi_trigger_softintr(asc->asc_softint_id); 1496 } 1497 if (status & HAL_INT_TX) { 1498 ath_tx_handler(asc); 1499 } 1500 ATH_UNLOCK(asc); 1501 1502 if (status & HAL_INT_SWBA) { 1503 /* This will occur only in Host-AP or Ad-Hoc mode */ 1504 return (DDI_INTR_CLAIMED); 1505 } 1506 if (status & HAL_INT_BMISS) { 1507 if (ic->ic_state == IEEE80211_S_RUN) { 1508 (void) ieee80211_new_state(ic, 1509 IEEE80211_S_ASSOC, -1); 1510 } 1511 } 1512 } 1513 1514 return (DDI_INTR_CLAIMED); 1515 reset: 1516 (void) ath_reset(ic); 1517 ATH_UNLOCK(asc); 1518 return (DDI_INTR_CLAIMED); 1519 } 1520 1521 static uint_t 1522 ath_softint_handler(caddr_t data) 1523 { 1524 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1525 ath_t *asc = (ath_t *)data; 1526 1527 /* 1528 * Check if the soft interrupt is triggered by another 1529 * driver at the same level. 1530 */ 1531 ATH_LOCK(asc); 1532 if (asc->asc_rx_pend) { /* Soft interrupt for this driver */ 1533 asc->asc_rx_pend = 0; 1534 ATH_UNLOCK(asc); 1535 ath_rx_handler(asc); 1536 return (DDI_INTR_CLAIMED); 1537 } 1538 ATH_UNLOCK(asc); 1539 return (DDI_INTR_UNCLAIMED); 1540 } 1541 1542 /* 1543 * following are gld callback routine 1544 * ath_gld_send, ath_gld_ioctl, ath_gld_gstat 1545 * are listed in other corresponding sections. 1546 * reset the hardware w/o losing operational state. this is 1547 * basically a more efficient way of doing ath_gld_stop, ath_gld_start, 1548 * followed by state transitions to the current 802.11 1549 * operational state. used to recover from errors rx overrun 1550 * and to reset the hardware when rf gain settings must be reset. 1551 */ 1552 1553 static void 1554 ath_stop_locked(ath_t *asc) 1555 { 1556 ieee80211com_t *ic = (ieee80211com_t *)asc; 1557 struct ath_hal *ah = asc->asc_ah; 1558 1559 ATH_LOCK_ASSERT(asc); 1560 if (!asc->asc_isrunning) 1561 return; 1562 1563 /* 1564 * Shutdown the hardware and driver: 1565 * reset 802.11 state machine 1566 * turn off timers 1567 * disable interrupts 1568 * turn off the radio 1569 * clear transmit machinery 1570 * clear receive machinery 1571 * drain and release tx queues 1572 * reclaim beacon resources 1573 * power down hardware 1574 * 1575 * Note that some of this work is not possible if the 1576 * hardware is gone (invalid). 1577 */ 1578 ATH_UNLOCK(asc); 1579 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1580 ieee80211_stop_watchdog(ic); 1581 ATH_LOCK(asc); 1582 ATH_HAL_INTRSET(ah, 0); 1583 ath_draintxq(asc); 1584 if (!asc->asc_invalid) { 1585 ath_stoprecv(asc); 1586 ATH_HAL_PHYDISABLE(ah); 1587 } else { 1588 asc->asc_rxlink = NULL; 1589 } 1590 asc->asc_isrunning = 0; 1591 } 1592 1593 static void 1594 ath_m_stop(void *arg) 1595 { 1596 ath_t *asc = arg; 1597 struct ath_hal *ah = asc->asc_ah; 1598 1599 ATH_LOCK(asc); 1600 ath_stop_locked(asc); 1601 ATH_HAL_SETPOWER(ah, HAL_PM_AWAKE); 1602 asc->asc_invalid = 1; 1603 ATH_UNLOCK(asc); 1604 } 1605 1606 static int 1607 ath_start_locked(ath_t *asc) 1608 { 1609 ieee80211com_t *ic = (ieee80211com_t *)asc; 1610 struct ath_hal *ah = asc->asc_ah; 1611 HAL_STATUS status; 1612 1613 ATH_LOCK_ASSERT(asc); 1614 1615 /* 1616 * The basic interface to setting the hardware in a good 1617 * state is ``reset''. On return the hardware is known to 1618 * be powered up and with interrupts disabled. This must 1619 * be followed by initialization of the appropriate bits 1620 * and then setup of the interrupt mask. 1621 */ 1622 asc->asc_curchan.channel = ic->ic_curchan->ich_freq; 1623 asc->asc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan); 1624 if (!ATH_HAL_RESET(ah, (HAL_OPMODE)ic->ic_opmode, 1625 &asc->asc_curchan, AH_FALSE, &status)) { 1626 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_m_start(): " 1627 "reset hardware failed: '%s' (HAL status %u)\n", 1628 ath_get_hal_status_desc(status), status)); 1629 return (ENOTACTIVE); 1630 } 1631 1632 (void) ath_startrecv(asc); 1633 1634 /* 1635 * Enable interrupts. 1636 */ 1637 asc->asc_imask = HAL_INT_RX | HAL_INT_TX 1638 | HAL_INT_RXEOL | HAL_INT_RXORN 1639 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1640 ATH_HAL_INTRSET(ah, asc->asc_imask); 1641 1642 /* 1643 * The hardware should be ready to go now so it's safe 1644 * to kick the 802.11 state machine as it's likely to 1645 * immediately call back to us to send mgmt frames. 1646 */ 1647 ath_chan_change(asc, ic->ic_curchan); 1648 1649 asc->asc_isrunning = 1; 1650 1651 return (0); 1652 } 1653 1654 int 1655 ath_m_start(void *arg) 1656 { 1657 ath_t *asc = arg; 1658 int err; 1659 1660 ATH_LOCK(asc); 1661 /* 1662 * Stop anything previously setup. This is safe 1663 * whether this is the first time through or not. 1664 */ 1665 ath_stop_locked(asc); 1666 1667 if ((err = ath_start_locked(asc)) != 0) { 1668 ATH_UNLOCK(asc); 1669 return (err); 1670 } 1671 1672 asc->asc_invalid = 0; 1673 ATH_UNLOCK(asc); 1674 1675 return (0); 1676 } 1677 1678 1679 static int 1680 ath_m_unicst(void *arg, const uint8_t *macaddr) 1681 { 1682 ath_t *asc = arg; 1683 struct ath_hal *ah = asc->asc_ah; 1684 1685 ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): " 1686 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 1687 macaddr[0], macaddr[1], macaddr[2], 1688 macaddr[3], macaddr[4], macaddr[5])); 1689 1690 ATH_LOCK(asc); 1691 IEEE80211_ADDR_COPY(asc->asc_isc.ic_macaddr, macaddr); 1692 ATH_HAL_SETMAC(ah, asc->asc_isc.ic_macaddr); 1693 1694 (void) ath_reset(&asc->asc_isc); 1695 ATH_UNLOCK(asc); 1696 return (0); 1697 } 1698 1699 static int 1700 ath_m_promisc(void *arg, boolean_t on) 1701 { 1702 ath_t *asc = arg; 1703 struct ath_hal *ah = asc->asc_ah; 1704 uint32_t rfilt; 1705 1706 ATH_LOCK(asc); 1707 rfilt = ATH_HAL_GETRXFILTER(ah); 1708 if (on) 1709 rfilt |= HAL_RX_FILTER_PROM; 1710 else 1711 rfilt &= ~HAL_RX_FILTER_PROM; 1712 asc->asc_promisc = on; 1713 ATH_HAL_SETRXFILTER(ah, rfilt); 1714 ATH_UNLOCK(asc); 1715 1716 return (0); 1717 } 1718 1719 static int 1720 ath_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1721 { 1722 ath_t *asc = arg; 1723 struct ath_hal *ah = asc->asc_ah; 1724 uint32_t val, index, bit; 1725 uint8_t pos; 1726 uint32_t *mfilt = asc->asc_mcast_hash; 1727 1728 ATH_LOCK(asc); 1729 /* calculate XOR of eight 6bit values */ 1730 val = ATH_LE_READ_4(mca + 0); 1731 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1732 val = ATH_LE_READ_4(mca + 3); 1733 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1734 pos &= 0x3f; 1735 index = pos / 32; 1736 bit = 1 << (pos % 32); 1737 1738 if (add) { /* enable multicast */ 1739 asc->asc_mcast_refs[pos]++; 1740 mfilt[index] |= bit; 1741 } else { /* disable multicast */ 1742 if (--asc->asc_mcast_refs[pos] == 0) 1743 mfilt[index] &= ~bit; 1744 } 1745 ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]); 1746 1747 ATH_UNLOCK(asc); 1748 return (0); 1749 } 1750 1751 /* 1752 * callback functions for /get/set properties 1753 */ 1754 static int 1755 ath_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 1756 uint_t wldp_length, const void *wldp_buf) 1757 { 1758 ath_t *asc = arg; 1759 int err; 1760 1761 err = ieee80211_setprop(&asc->asc_isc, pr_name, wldp_pr_num, 1762 wldp_length, wldp_buf); 1763 1764 ATH_LOCK(asc); 1765 1766 if (err == ENETRESET) { 1767 if (ATH_IS_RUNNING(asc)) { 1768 ATH_UNLOCK(asc); 1769 (void) ath_m_start(asc); 1770 (void) ieee80211_new_state(&asc->asc_isc, 1771 IEEE80211_S_SCAN, -1); 1772 ATH_LOCK(asc); 1773 } 1774 err = 0; 1775 } 1776 1777 ATH_UNLOCK(asc); 1778 1779 return (err); 1780 } 1781 static int 1782 ath_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 1783 uint_t pr_flags, uint_t wldp_length, void *wldp_buf) 1784 { 1785 ath_t *asc = arg; 1786 int err = 0; 1787 1788 err = ieee80211_getprop(&asc->asc_isc, pr_name, wldp_pr_num, 1789 pr_flags, wldp_length, wldp_buf); 1790 1791 return (err); 1792 } 1793 1794 static void 1795 ath_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1796 { 1797 ath_t *asc = arg; 1798 int32_t err; 1799 1800 err = ieee80211_ioctl(&asc->asc_isc, wq, mp); 1801 ATH_LOCK(asc); 1802 if (err == ENETRESET) { 1803 if (ATH_IS_RUNNING(asc)) { 1804 ATH_UNLOCK(asc); 1805 (void) ath_m_start(asc); 1806 (void) ieee80211_new_state(&asc->asc_isc, 1807 IEEE80211_S_SCAN, -1); 1808 ATH_LOCK(asc); 1809 } 1810 } 1811 ATH_UNLOCK(asc); 1812 } 1813 1814 static int 1815 ath_m_stat(void *arg, uint_t stat, uint64_t *val) 1816 { 1817 ath_t *asc = arg; 1818 ieee80211com_t *ic = (ieee80211com_t *)asc; 1819 struct ieee80211_node *in = ic->ic_bss; 1820 struct ieee80211_rateset *rs = &in->in_rates; 1821 1822 ATH_LOCK(asc); 1823 switch (stat) { 1824 case MAC_STAT_IFSPEED: 1825 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 * 1826 1000000ull; 1827 break; 1828 case MAC_STAT_NOXMTBUF: 1829 *val = asc->asc_stats.ast_tx_nobuf + 1830 asc->asc_stats.ast_tx_nobufmgt; 1831 break; 1832 case MAC_STAT_IERRORS: 1833 *val = asc->asc_stats.ast_rx_tooshort; 1834 break; 1835 case MAC_STAT_RBYTES: 1836 *val = ic->ic_stats.is_rx_bytes; 1837 break; 1838 case MAC_STAT_IPACKETS: 1839 *val = ic->ic_stats.is_rx_frags; 1840 break; 1841 case MAC_STAT_OBYTES: 1842 *val = ic->ic_stats.is_tx_bytes; 1843 break; 1844 case MAC_STAT_OPACKETS: 1845 *val = ic->ic_stats.is_tx_frags; 1846 break; 1847 case MAC_STAT_OERRORS: 1848 case WIFI_STAT_TX_FAILED: 1849 *val = asc->asc_stats.ast_tx_fifoerr + 1850 asc->asc_stats.ast_tx_xretries + 1851 asc->asc_stats.ast_tx_discard; 1852 break; 1853 case WIFI_STAT_TX_RETRANS: 1854 *val = asc->asc_stats.ast_tx_xretries; 1855 break; 1856 case WIFI_STAT_FCS_ERRORS: 1857 *val = asc->asc_stats.ast_rx_crcerr; 1858 break; 1859 case WIFI_STAT_WEP_ERRORS: 1860 *val = asc->asc_stats.ast_rx_badcrypt; 1861 break; 1862 case WIFI_STAT_TX_FRAGS: 1863 case WIFI_STAT_MCAST_TX: 1864 case WIFI_STAT_RTS_SUCCESS: 1865 case WIFI_STAT_RTS_FAILURE: 1866 case WIFI_STAT_ACK_FAILURE: 1867 case WIFI_STAT_RX_FRAGS: 1868 case WIFI_STAT_MCAST_RX: 1869 case WIFI_STAT_RX_DUPS: 1870 ATH_UNLOCK(asc); 1871 return (ieee80211_stat(ic, stat, val)); 1872 default: 1873 ATH_UNLOCK(asc); 1874 return (ENOTSUP); 1875 } 1876 ATH_UNLOCK(asc); 1877 1878 return (0); 1879 } 1880 1881 static int 1882 ath_pci_setup(ath_t *asc) 1883 { 1884 uint16_t command; 1885 1886 /* 1887 * Enable memory mapping and bus mastering 1888 */ 1889 ASSERT(asc != NULL); 1890 command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM); 1891 command |= PCI_COMM_MAE | PCI_COMM_ME; 1892 pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command); 1893 command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM); 1894 if ((command & PCI_COMM_MAE) == 0) { 1895 ath_problem("ath: ath_pci_setup(): " 1896 "failed to enable memory mapping\n"); 1897 return (EIO); 1898 } 1899 if ((command & PCI_COMM_ME) == 0) { 1900 ath_problem("ath: ath_pci_setup(): " 1901 "failed to enable bus mastering\n"); 1902 return (EIO); 1903 } 1904 ATH_DEBUG((ATH_DBG_INIT, "ath: ath_pci_setup(): " 1905 "set command reg to 0x%x \n", command)); 1906 1907 return (0); 1908 } 1909 1910 static int 1911 ath_resume(dev_info_t *devinfo) 1912 { 1913 ath_t *asc; 1914 int ret = DDI_SUCCESS; 1915 1916 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo)); 1917 if (asc == NULL) { 1918 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): " 1919 "failed to get soft state\n")); 1920 return (DDI_FAILURE); 1921 } 1922 1923 ATH_LOCK(asc); 1924 /* 1925 * Set up config space command register(s). Refuse 1926 * to resume on failure. 1927 */ 1928 if (ath_pci_setup(asc) != 0) { 1929 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): " 1930 "ath_pci_setup() failed\n")); 1931 ATH_UNLOCK(asc); 1932 return (DDI_FAILURE); 1933 } 1934 1935 if (!asc->asc_invalid) 1936 ret = ath_start_locked(asc); 1937 ATH_UNLOCK(asc); 1938 1939 return (ret); 1940 } 1941 1942 static int 1943 ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 1944 { 1945 ath_t *asc; 1946 ieee80211com_t *ic; 1947 struct ath_hal *ah; 1948 uint8_t csz; 1949 HAL_STATUS status; 1950 caddr_t regs; 1951 uint32_t i, val; 1952 uint16_t vendor_id, device_id; 1953 const char *athname; 1954 int32_t ath_countrycode = CTRY_DEFAULT; /* country code */ 1955 int32_t err, ath_regdomain = 0; /* regulatory domain */ 1956 char strbuf[32]; 1957 int instance; 1958 wifi_data_t wd = { 0 }; 1959 mac_register_t *macp; 1960 1961 switch (cmd) { 1962 case DDI_ATTACH: 1963 break; 1964 1965 case DDI_RESUME: 1966 return (ath_resume(devinfo)); 1967 1968 default: 1969 return (DDI_FAILURE); 1970 } 1971 1972 instance = ddi_get_instance(devinfo); 1973 if (ddi_soft_state_zalloc(ath_soft_state_p, instance) != DDI_SUCCESS) { 1974 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 1975 "Unable to alloc softstate\n")); 1976 return (DDI_FAILURE); 1977 } 1978 1979 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo)); 1980 ic = (ieee80211com_t *)asc; 1981 asc->asc_dev = devinfo; 1982 1983 mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL); 1984 mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL); 1985 mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL); 1986 mutex_init(&asc->asc_resched_lock, NULL, MUTEX_DRIVER, NULL); 1987 1988 err = pci_config_setup(devinfo, &asc->asc_cfg_handle); 1989 if (err != DDI_SUCCESS) { 1990 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 1991 "pci_config_setup() failed")); 1992 goto attach_fail0; 1993 } 1994 1995 if (ath_pci_setup(asc) != 0) 1996 goto attach_fail1; 1997 1998 /* 1999 * Cache line size is used to size and align various 2000 * structures used to communicate with the hardware. 2001 */ 2002 csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ); 2003 if (csz == 0) { 2004 /* 2005 * We must have this setup properly for rx buffer 2006 * DMA to work so force a reasonable value here if it 2007 * comes up zero. 2008 */ 2009 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t); 2010 pci_config_put8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ, 2011 csz); 2012 } 2013 asc->asc_cachelsz = csz << 2; 2014 vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID); 2015 device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID); 2016 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, " 2017 "device id 0x%x, cache size %d\n", vendor_id, device_id, csz)); 2018 2019 athname = ath_hal_probe(vendor_id, device_id); 2020 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n", 2021 athname ? athname : "Atheros ???")); 2022 2023 pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8); 2024 val = pci_config_get32(asc->asc_cfg_handle, 0x40); 2025 if ((val & 0x0000ff00) != 0) 2026 pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff); 2027 2028 err = ddi_regs_map_setup(devinfo, 1, 2029 ®s, 0, 0, &ath_reg_accattr, &asc->asc_io_handle); 2030 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2031 "regs map1 = %x err=%d\n", regs, err)); 2032 if (err != DDI_SUCCESS) { 2033 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2034 "ddi_regs_map_setup() failed")); 2035 goto attach_fail1; 2036 } 2037 2038 ah = ath_hal_attach(device_id, asc, 0, regs, &status); 2039 if (ah == NULL) { 2040 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2041 "unable to attach hw: '%s' (HAL status %u)\n", 2042 ath_get_hal_status_desc(status), status)); 2043 goto attach_fail2; 2044 } 2045 ATH_HAL_INTRSET(ah, 0); 2046 asc->asc_ah = ah; 2047 2048 if (ah->ah_abi != HAL_ABI_VERSION) { 2049 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2050 "HAL ABI mismatch detected (0x%x != 0x%x)\n", 2051 ah->ah_abi, HAL_ABI_VERSION)); 2052 goto attach_fail3; 2053 } 2054 2055 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2056 "HAL ABI version 0x%x\n", ah->ah_abi)); 2057 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2058 "HAL mac version %d.%d, phy version %d.%d\n", 2059 ah->ah_macVersion, ah->ah_macRev, 2060 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf)); 2061 if (ah->ah_analog5GhzRev) 2062 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2063 "HAL 5ghz radio version %d.%d\n", 2064 ah->ah_analog5GhzRev >> 4, 2065 ah->ah_analog5GhzRev & 0xf)); 2066 if (ah->ah_analog2GhzRev) 2067 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2068 "HAL 2ghz radio version %d.%d\n", 2069 ah->ah_analog2GhzRev >> 4, 2070 ah->ah_analog2GhzRev & 0xf)); 2071 2072 /* 2073 * Check if the MAC has multi-rate retry support. 2074 * We do this by trying to setup a fake extended 2075 * descriptor. MAC's that don't have support will 2076 * return false w/o doing anything. MAC's that do 2077 * support it will return true w/o doing anything. 2078 */ 2079 asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0); 2080 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2081 "multi rate retry support=%x\n", 2082 asc->asc_mrretry)); 2083 2084 /* 2085 * Get the hardware key cache size. 2086 */ 2087 asc->asc_keymax = ATH_HAL_KEYCACHESIZE(ah); 2088 if (asc->asc_keymax > sizeof (asc->asc_keymap) * NBBY) { 2089 ATH_DEBUG((ATH_DBG_ATTACH, "ath_attach:" 2090 " Warning, using only %u entries in %u key cache\n", 2091 sizeof (asc->asc_keymap) * NBBY, asc->asc_keymax)); 2092 asc->asc_keymax = sizeof (asc->asc_keymap) * NBBY; 2093 } 2094 /* 2095 * Reset the key cache since some parts do not 2096 * reset the contents on initial power up. 2097 */ 2098 for (i = 0; i < asc->asc_keymax; i++) 2099 ATH_HAL_KEYRESET(ah, i); 2100 2101 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 2102 setbit(asc->asc_keymap, i); 2103 setbit(asc->asc_keymap, i+32); 2104 setbit(asc->asc_keymap, i+64); 2105 setbit(asc->asc_keymap, i+32+64); 2106 } 2107 2108 ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain); 2109 ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode); 2110 /* 2111 * Collect the channel list using the default country 2112 * code and including outdoor channels. The 802.11 layer 2113 * is resposible for filtering this list to a set of 2114 * channels that it considers ok to use. 2115 */ 2116 asc->asc_have11g = 0; 2117 2118 /* enable outdoor use, enable extended channels */ 2119 err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE); 2120 if (err != 0) 2121 goto attach_fail3; 2122 2123 /* 2124 * Setup rate tables for all potential media types. 2125 */ 2126 ath_rate_setup(asc, IEEE80211_MODE_11A); 2127 ath_rate_setup(asc, IEEE80211_MODE_11B); 2128 ath_rate_setup(asc, IEEE80211_MODE_11G); 2129 ath_rate_setup(asc, IEEE80211_MODE_TURBO_A); 2130 2131 /* Setup here so ath_rate_update is happy */ 2132 ath_setcurmode(asc, IEEE80211_MODE_11A); 2133 2134 err = ath_desc_alloc(devinfo, asc); 2135 if (err != DDI_SUCCESS) { 2136 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2137 "failed to allocate descriptors: %d\n", err)); 2138 goto attach_fail3; 2139 } 2140 2141 /* Setup transmit queues in the HAL */ 2142 if (ath_txq_setup(asc)) 2143 goto attach_fail4; 2144 2145 ATH_HAL_GETMAC(ah, ic->ic_macaddr); 2146 2147 /* 2148 * Initialize pointers to device specific functions which 2149 * will be used by the generic layer. 2150 */ 2151 /* 11g support is identified when we fetch the channel set */ 2152 if (asc->asc_have11g) 2153 ic->ic_caps |= IEEE80211_C_SHPREAMBLE | 2154 IEEE80211_C_SHSLOT; /* short slot time */ 2155 /* 2156 * Query the hal to figure out h/w crypto support. 2157 */ 2158 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_WEP)) 2159 ic->ic_caps |= IEEE80211_C_WEP; 2160 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_OCB)) 2161 ic->ic_caps |= IEEE80211_C_AES; 2162 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_CCM)) { 2163 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W CCMP\n")); 2164 ic->ic_caps |= IEEE80211_C_AES_CCM; 2165 } 2166 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CKIP)) 2167 ic->ic_caps |= IEEE80211_C_CKIP; 2168 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_TKIP)) { 2169 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W TKIP\n")); 2170 ic->ic_caps |= IEEE80211_C_TKIP; 2171 /* 2172 * Check if h/w does the MIC and/or whether the 2173 * separate key cache entries are required to 2174 * handle both tx+rx MIC keys. 2175 */ 2176 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_MIC)) { 2177 ATH_DEBUG((ATH_DBG_ATTACH, "Support H/W TKIP MIC\n")); 2178 ic->ic_caps |= IEEE80211_C_TKIPMIC; 2179 } 2180 if (ATH_HAL_TKIPSPLIT(ah)) 2181 asc->asc_splitmic = 1; 2182 } 2183 ic->ic_caps |= IEEE80211_C_WPA; /* Support WPA/WPA2 */ 2184 2185 asc->asc_hasclrkey = ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CLR); 2186 ic->ic_phytype = IEEE80211_T_OFDM; 2187 ic->ic_opmode = IEEE80211_M_STA; 2188 ic->ic_state = IEEE80211_S_INIT; 2189 ic->ic_maxrssi = ATH_MAX_RSSI; 2190 ic->ic_set_shortslot = ath_set_shortslot; 2191 ic->ic_xmit = ath_xmit; 2192 ieee80211_attach(ic); 2193 2194 /* different instance has different WPA door */ 2195 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 2196 ddi_driver_name(devinfo), 2197 ddi_get_instance(devinfo)); 2198 2199 /* Override 80211 default routines */ 2200 ic->ic_reset = ath_reset; 2201 asc->asc_newstate = ic->ic_newstate; 2202 ic->ic_newstate = ath_newstate; 2203 ic->ic_watchdog = ath_watchdog; 2204 ic->ic_node_alloc = ath_node_alloc; 2205 ic->ic_node_free = ath_node_free; 2206 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 2207 ic->ic_crypto.cs_key_delete = ath_key_delete; 2208 ic->ic_crypto.cs_key_set = ath_key_set; 2209 ieee80211_media_init(ic); 2210 /* 2211 * initialize default tx key 2212 */ 2213 ic->ic_def_txkey = 0; 2214 2215 asc->asc_rx_pend = 0; 2216 ATH_HAL_INTRSET(ah, 0); 2217 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, 2218 &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc); 2219 if (err != DDI_SUCCESS) { 2220 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2221 "ddi_add_softintr() failed\n")); 2222 goto attach_fail5; 2223 } 2224 2225 if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock) 2226 != DDI_SUCCESS) { 2227 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2228 "Can not get iblock cookie for INT\n")); 2229 goto attach_fail6; 2230 } 2231 2232 if (ddi_add_intr(devinfo, 0, NULL, NULL, ath_intr, 2233 (caddr_t)asc) != DDI_SUCCESS) { 2234 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2235 "Can not set intr for ATH driver\n")); 2236 goto attach_fail6; 2237 } 2238 2239 /* 2240 * Provide initial settings for the WiFi plugin; whenever this 2241 * information changes, we need to call mac_plugindata_update() 2242 */ 2243 wd.wd_opmode = ic->ic_opmode; 2244 wd.wd_secalloc = WIFI_SEC_NONE; 2245 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid); 2246 2247 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 2248 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2249 "MAC version mismatch\n")); 2250 goto attach_fail7; 2251 } 2252 2253 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 2254 macp->m_driver = asc; 2255 macp->m_dip = devinfo; 2256 macp->m_src_addr = ic->ic_macaddr; 2257 macp->m_callbacks = &ath_m_callbacks; 2258 macp->m_min_sdu = 0; 2259 macp->m_max_sdu = IEEE80211_MTU; 2260 macp->m_pdata = &wd; 2261 macp->m_pdata_size = sizeof (wd); 2262 2263 err = mac_register(macp, &ic->ic_mach); 2264 mac_free(macp); 2265 if (err != 0) { 2266 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): " 2267 "mac_register err %x\n", err)); 2268 goto attach_fail7; 2269 } 2270 2271 /* Create minor node of type DDI_NT_NET_WIFI */ 2272 (void) snprintf(strbuf, sizeof (strbuf), "%s%d", 2273 ATH_NODENAME, instance); 2274 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR, 2275 instance + 1, DDI_NT_NET_WIFI, 0); 2276 if (err != DDI_SUCCESS) 2277 ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): " 2278 "Create minor node failed - %d\n", err)); 2279 2280 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 2281 asc->asc_invalid = 1; 2282 asc->asc_isrunning = 0; 2283 asc->asc_promisc = B_FALSE; 2284 bzero(asc->asc_mcast_refs, sizeof (asc->asc_mcast_refs)); 2285 bzero(asc->asc_mcast_hash, sizeof (asc->asc_mcast_hash)); 2286 return (DDI_SUCCESS); 2287 attach_fail7: 2288 ddi_remove_intr(devinfo, 0, asc->asc_iblock); 2289 attach_fail6: 2290 ddi_remove_softintr(asc->asc_softint_id); 2291 attach_fail5: 2292 (void) ieee80211_detach(ic); 2293 attach_fail4: 2294 ath_desc_free(asc); 2295 attach_fail3: 2296 ah->ah_detach(asc->asc_ah); 2297 attach_fail2: 2298 ddi_regs_map_free(&asc->asc_io_handle); 2299 attach_fail1: 2300 pci_config_teardown(&asc->asc_cfg_handle); 2301 attach_fail0: 2302 asc->asc_invalid = 1; 2303 mutex_destroy(&asc->asc_txbuflock); 2304 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2305 if (ATH_TXQ_SETUP(asc, i)) { 2306 struct ath_txq *txq = &asc->asc_txq[i]; 2307 mutex_destroy(&txq->axq_lock); 2308 } 2309 } 2310 mutex_destroy(&asc->asc_rxbuflock); 2311 mutex_destroy(&asc->asc_genlock); 2312 mutex_destroy(&asc->asc_resched_lock); 2313 ddi_soft_state_free(ath_soft_state_p, instance); 2314 2315 return (DDI_FAILURE); 2316 } 2317 2318 /* 2319 * Suspend transmit/receive for powerdown 2320 */ 2321 static int 2322 ath_suspend(ath_t *asc) 2323 { 2324 ATH_LOCK(asc); 2325 ath_stop_locked(asc); 2326 ATH_UNLOCK(asc); 2327 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: suspended.\n")); 2328 2329 return (DDI_SUCCESS); 2330 } 2331 2332 static int32_t 2333 ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2334 { 2335 ath_t *asc; 2336 2337 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo)); 2338 ASSERT(asc != NULL); 2339 2340 switch (cmd) { 2341 case DDI_DETACH: 2342 break; 2343 2344 case DDI_SUSPEND: 2345 return (ath_suspend(asc)); 2346 2347 default: 2348 return (DDI_FAILURE); 2349 } 2350 2351 if (mac_disable(asc->asc_isc.ic_mach) != 0) 2352 return (DDI_FAILURE); 2353 2354 ath_stop_scantimer(asc); 2355 2356 /* disable interrupts */ 2357 ATH_HAL_INTRSET(asc->asc_ah, 0); 2358 2359 /* 2360 * Unregister from the MAC layer subsystem 2361 */ 2362 (void) mac_unregister(asc->asc_isc.ic_mach); 2363 2364 /* free intterrupt resources */ 2365 ddi_remove_intr(devinfo, 0, asc->asc_iblock); 2366 ddi_remove_softintr(asc->asc_softint_id); 2367 2368 /* 2369 * NB: the order of these is important: 2370 * o call the 802.11 layer before detaching the hal to 2371 * insure callbacks into the driver to delete global 2372 * key cache entries can be handled 2373 * o reclaim the tx queue data structures after calling 2374 * the 802.11 layer as we'll get called back to reclaim 2375 * node state and potentially want to use them 2376 * o to cleanup the tx queues the hal is called, so detach 2377 * it last 2378 */ 2379 ieee80211_detach(&asc->asc_isc); 2380 ath_desc_free(asc); 2381 ath_txq_cleanup(asc); 2382 asc->asc_ah->ah_detach(asc->asc_ah); 2383 2384 /* free io handle */ 2385 ddi_regs_map_free(&asc->asc_io_handle); 2386 pci_config_teardown(&asc->asc_cfg_handle); 2387 2388 /* destroy locks */ 2389 mutex_destroy(&asc->asc_rxbuflock); 2390 mutex_destroy(&asc->asc_genlock); 2391 mutex_destroy(&asc->asc_resched_lock); 2392 2393 ddi_remove_minor_node(devinfo, NULL); 2394 ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo)); 2395 2396 return (DDI_SUCCESS); 2397 } 2398 2399 /* 2400 * quiesce(9E) entry point. 2401 * 2402 * This function is called when the system is single-threaded at high 2403 * PIL with preemption disabled. Therefore, this function must not be 2404 * blocked. 2405 * 2406 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 2407 * DDI_FAILURE indicates an error condition and should almost never happen. 2408 */ 2409 static int32_t 2410 ath_quiesce(dev_info_t *devinfo) 2411 { 2412 ath_t *asc; 2413 struct ath_hal *ah; 2414 int i; 2415 2416 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo)); 2417 2418 if (asc == NULL || (ah = asc->asc_ah) == NULL) 2419 return (DDI_FAILURE); 2420 2421 /* 2422 * Disable interrupts 2423 */ 2424 ATH_HAL_INTRSET(ah, 0); 2425 2426 /* 2427 * Disable TX HW 2428 */ 2429 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2430 if (ATH_TXQ_SETUP(asc, i)) { 2431 ATH_HAL_STOPTXDMA(ah, asc->asc_txq[i].axq_qnum); 2432 } 2433 } 2434 2435 /* 2436 * Disable RX HW 2437 */ 2438 ATH_HAL_STOPPCURECV(ah); 2439 ATH_HAL_SETRXFILTER(ah, 0); 2440 ATH_HAL_STOPDMARECV(ah); 2441 drv_usecwait(3000); 2442 2443 /* 2444 * Power down HW 2445 */ 2446 ATH_HAL_PHYDISABLE(ah); 2447 2448 return (DDI_SUCCESS); 2449 } 2450 2451 DDI_DEFINE_STREAM_OPS(ath_dev_ops, nulldev, nulldev, ath_attach, ath_detach, 2452 nodev, NULL, D_MP, NULL, ath_quiesce); 2453 2454 static struct modldrv ath_modldrv = { 2455 &mod_driverops, /* Type of module. This one is a driver */ 2456 "ath driver", /* short description */ 2457 &ath_dev_ops /* driver specific ops */ 2458 }; 2459 2460 static struct modlinkage modlinkage = { 2461 MODREV_1, (void *)&ath_modldrv, NULL 2462 }; 2463 2464 2465 int 2466 _info(struct modinfo *modinfop) 2467 { 2468 return (mod_info(&modlinkage, modinfop)); 2469 } 2470 2471 int 2472 _init(void) 2473 { 2474 int status; 2475 2476 status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1); 2477 if (status != 0) 2478 return (status); 2479 2480 mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL); 2481 ath_halfix_init(); 2482 mac_init_ops(&ath_dev_ops, "ath"); 2483 status = mod_install(&modlinkage); 2484 if (status != 0) { 2485 mac_fini_ops(&ath_dev_ops); 2486 ath_halfix_finit(); 2487 mutex_destroy(&ath_loglock); 2488 ddi_soft_state_fini(&ath_soft_state_p); 2489 } 2490 2491 return (status); 2492 } 2493 2494 int 2495 _fini(void) 2496 { 2497 int status; 2498 2499 status = mod_remove(&modlinkage); 2500 if (status == 0) { 2501 mac_fini_ops(&ath_dev_ops); 2502 ath_halfix_finit(); 2503 mutex_destroy(&ath_loglock); 2504 ddi_soft_state_fini(&ath_soft_state_p); 2505 } 2506 return (status); 2507 } 2508