xref: /titanic_41/usr/src/uts/common/io/ath/ath_main.c (revision 450396635f70344c58b6b1e4db38cf17ff34445c)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer,
15  * without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18  * redistribution must be conditioned upon including a substantially
19  * similar Disclaimer requirement for further binary redistribution.
20  * 3. Neither the names of the above-listed copyright holders nor the names
21  * of any contributors may be used to endorse or promote products derived
22  * from this software without specific prior written permission.
23  *
24  * NO WARRANTY
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
28  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
29  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
30  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
33  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
35  * THE POSSIBILITY OF SUCH DAMAGES.
36  *
37  */
38 
39 /*
40  * Driver for the Atheros Wireless LAN controller.
41  *
42  * The Atheros driver calls into net80211 module for IEEE80211 protocol
43  * management functionalities. The driver includes a LLD(Low Level Driver)
44  * part to implement H/W related operations.
45  * The following is the high level structure of ath driver.
46  * (The arrows between modules indicate function call direction.)
47  *
48  *
49  *                                                  |
50  *                                                  | GLD thread
51  *                                                  V
52  *         ==================  =========================================
53  *         |                |  |[1]                                    |
54  *         |                |  |  GLDv3 Callback functions registered  |
55  *         |   Net80211     |  =========================       by      |
56  *         |    module      |          |               |     driver    |
57  *         |                |          V               |               |
58  *         |                |========================  |               |
59  *         |   Functions exported by net80211       |  |               |
60  *         |                                        |  |               |
61  *         ==========================================  =================
62  *                         |                                  |
63  *                         V                                  |
64  *         +----------------------------------+               |
65  *         |[2]                               |               |
66  *         |    Net80211 Callback functions   |               |
67  *         |      registered by LLD           |               |
68  *         +----------------------------------+               |
69  *                         |                                  |
70  *                         V                                  v
71  *         +-----------------------------------------------------------+
72  *         |[3]                                                        |
73  *         |                LLD Internal functions                     |
74  *         |                                                           |
75  *         +-----------------------------------------------------------+
76  *                                    ^
77  *                                    | Software interrupt thread
78  *                                    |
79  *
80  * The short description of each module is as below:
81  *      Module 1: GLD callback functions, which are intercepting the calls from
82  *                GLD to LLD.
83  *      Module 2: Net80211 callback functions registered by LLD, which
84  *                calls into LLD for H/W related functions needed by net80211.
85  *      Module 3: LLD Internal functions, which are responsible for allocing
86  *                descriptor/buffer, handling interrupt and other H/W
87  *                operations.
88  *
89  * All functions are running in 3 types of thread:
90  * 1. GLD callbacks threads, such as ioctl, intr, etc.
91  * 2. Clock interruptt thread which is responsible for scan, rate control and
92  *    calibration.
93  * 3. Software Interrupt thread originated in LLD.
94  *
95  * The lock strategy is as below:
96  * There have 4 queues for tx, each queue has one asc_txqlock[i] to
97  *      prevent conflicts access to queue resource from different thread.
98  *
99  * All the transmit buffers are contained in asc_txbuf which are
100  *      protected by asc_txbuflock.
101  *
102  * Each receive buffers are contained in asc_rxbuf which are protected
103  *      by asc_rxbuflock.
104  *
105  * In ath struct, asc_genlock is a general lock, protecting most other
106  *      operational data in ath_softc struct and HAL accesses.
107  *      It is acquired by the interupt handler and most "mode-ctrl" routines.
108  *
109  * Any of the locks can be acquired singly, but where multiple
110  * locks are acquired, they *must* be in the order:
111  *    asc_genlock >> asc_txqlock[i] >> asc_txbuflock >> asc_rxbuflock
112  */
113 
114 #include <sys/param.h>
115 #include <sys/types.h>
116 #include <sys/signal.h>
117 #include <sys/stream.h>
118 #include <sys/termio.h>
119 #include <sys/errno.h>
120 #include <sys/file.h>
121 #include <sys/cmn_err.h>
122 #include <sys/stropts.h>
123 #include <sys/strsubr.h>
124 #include <sys/strtty.h>
125 #include <sys/kbio.h>
126 #include <sys/cred.h>
127 #include <sys/stat.h>
128 #include <sys/consdev.h>
129 #include <sys/kmem.h>
130 #include <sys/modctl.h>
131 #include <sys/ddi.h>
132 #include <sys/sunddi.h>
133 #include <sys/pci.h>
134 #include <sys/errno.h>
135 #include <sys/mac_provider.h>
136 #include <sys/dlpi.h>
137 #include <sys/ethernet.h>
138 #include <sys/list.h>
139 #include <sys/byteorder.h>
140 #include <sys/strsun.h>
141 #include <sys/policy.h>
142 #include <inet/common.h>
143 #include <inet/nd.h>
144 #include <inet/mi.h>
145 #include <inet/wifi_ioctl.h>
146 #include <sys/mac_wifi.h>
147 #include "ath_hal.h"
148 #include "ath_impl.h"
149 #include "ath_aux.h"
150 #include "ath_rate.h"
151 
152 #define	ATH_MAX_RSSI	63	/* max rssi */
153 
154 extern void ath_halfix_init(void);
155 extern void ath_halfix_finit(void);
156 extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd);
157 
158 /*
159  * PIO access attributes for registers
160  */
161 static ddi_device_acc_attr_t ath_reg_accattr = {
162 	DDI_DEVICE_ATTR_V0,
163 	DDI_STRUCTURE_LE_ACC,
164 	DDI_STRICTORDER_ACC
165 };
166 
167 /*
168  * DMA access attributes for descriptors: NOT to be byte swapped.
169  */
170 static ddi_device_acc_attr_t ath_desc_accattr = {
171 	DDI_DEVICE_ATTR_V0,
172 	DDI_STRUCTURE_LE_ACC,
173 	DDI_STRICTORDER_ACC
174 };
175 
176 /*
177  * DMA attributes for rx/tx buffers
178  */
179 static ddi_dma_attr_t ath_dma_attr = {
180 	DMA_ATTR_V0,		/* version number */
181 	0,			/* low address */
182 	0xffffffffU,		/* high address */
183 	0x3ffffU,		/* counter register max */
184 	1,			/* alignment */
185 	0xFFF,			/* burst sizes */
186 	1,			/* minimum transfer size */
187 	0x3ffffU,		/* max transfer size */
188 	0xffffffffU,		/* address register max */
189 	1,			/* no scatter-gather */
190 	1,			/* granularity of device */
191 	0,			/* DMA flags */
192 };
193 
194 static ddi_dma_attr_t ath_desc_dma_attr = {
195 	DMA_ATTR_V0,		/* version number */
196 	0,			/* low address */
197 	0xffffffffU,		/* high address */
198 	0xffffffffU,		/* counter register max */
199 	0x1000,			/* alignment */
200 	0xFFF,			/* burst sizes */
201 	1,			/* minimum transfer size */
202 	0xffffffffU,		/* max transfer size */
203 	0xffffffffU,		/* address register max */
204 	1,			/* no scatter-gather */
205 	1,			/* granularity of device */
206 	0,			/* DMA flags */
207 };
208 
209 static kmutex_t ath_loglock;
210 static void *ath_soft_state_p = NULL;
211 static int ath_dwelltime = 150;		/* scan interval, ms */
212 
213 static int	ath_m_stat(void *,  uint_t, uint64_t *);
214 static int	ath_m_start(void *);
215 static void	ath_m_stop(void *);
216 static int	ath_m_promisc(void *, boolean_t);
217 static int	ath_m_multicst(void *, boolean_t, const uint8_t *);
218 static int	ath_m_unicst(void *, const uint8_t *);
219 static mblk_t	*ath_m_tx(void *, mblk_t *);
220 static void	ath_m_ioctl(void *, queue_t *, mblk_t *);
221 static int	ath_m_setprop(void *, const char *, mac_prop_id_t,
222     uint_t, const void *);
223 static int	ath_m_getprop(void *, const char *, mac_prop_id_t,
224     uint_t, uint_t, void *, uint_t *);
225 
226 static mac_callbacks_t ath_m_callbacks = {
227 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
228 	ath_m_stat,
229 	ath_m_start,
230 	ath_m_stop,
231 	ath_m_promisc,
232 	ath_m_multicst,
233 	ath_m_unicst,
234 	ath_m_tx,
235 	ath_m_ioctl,
236 	NULL,		/* mc_getcapab */
237 	NULL,
238 	NULL,
239 	ath_m_setprop,
240 	ath_m_getprop
241 };
242 
243 /*
244  * Available debug flags:
245  * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH,
246  * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP
247  */
248 uint32_t ath_dbg_flags = 0;
249 
250 /*
251  * Exception/warning cases not leading to panic.
252  */
253 void
254 ath_problem(const int8_t *fmt, ...)
255 {
256 	va_list args;
257 
258 	mutex_enter(&ath_loglock);
259 
260 	va_start(args, fmt);
261 	vcmn_err(CE_WARN, fmt, args);
262 	va_end(args);
263 
264 	mutex_exit(&ath_loglock);
265 }
266 
267 /*
268  * Normal log information independent of debug.
269  */
270 void
271 ath_log(const int8_t *fmt, ...)
272 {
273 	va_list args;
274 
275 	mutex_enter(&ath_loglock);
276 
277 	va_start(args, fmt);
278 	vcmn_err(CE_CONT, fmt, args);
279 	va_end(args);
280 
281 	mutex_exit(&ath_loglock);
282 }
283 
284 void
285 ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
286 {
287 	va_list args;
288 
289 	if (dbg_flags & ath_dbg_flags) {
290 		mutex_enter(&ath_loglock);
291 		va_start(args, fmt);
292 		vcmn_err(CE_CONT, fmt, args);
293 		va_end(args);
294 		mutex_exit(&ath_loglock);
295 	}
296 }
297 
298 void
299 ath_setup_desc(ath_t *asc, struct ath_buf *bf)
300 {
301 	struct ath_desc *ds;
302 
303 	ds = bf->bf_desc;
304 	ds->ds_link = bf->bf_daddr;
305 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
306 	ATH_HAL_SETUPRXDESC(asc->asc_ah, ds,
307 	    bf->bf_dma.alength,		/* buffer size */
308 	    0);
309 
310 	if (asc->asc_rxlink != NULL)
311 		*asc->asc_rxlink = bf->bf_daddr;
312 	asc->asc_rxlink = &ds->ds_link;
313 }
314 
315 
316 /*
317  * Allocate an area of memory and a DMA handle for accessing it
318  */
319 static int
320 ath_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
321     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
322     uint_t bind_flags, dma_area_t *dma_p)
323 {
324 	int err;
325 
326 	/*
327 	 * Allocate handle
328 	 */
329 	err = ddi_dma_alloc_handle(devinfo, dma_attr,
330 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
331 	if (err != DDI_SUCCESS)
332 		return (DDI_FAILURE);
333 
334 	/*
335 	 * Allocate memory
336 	 */
337 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
338 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
339 	    &dma_p->alength, &dma_p->acc_hdl);
340 	if (err != DDI_SUCCESS)
341 		return (DDI_FAILURE);
342 
343 	/*
344 	 * Bind the two together
345 	 */
346 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
347 	    dma_p->mem_va, dma_p->alength, bind_flags,
348 	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
349 	if (err != DDI_DMA_MAPPED)
350 		return (DDI_FAILURE);
351 
352 	dma_p->nslots = ~0U;
353 	dma_p->size = ~0U;
354 	dma_p->token = ~0U;
355 	dma_p->offset = 0;
356 	return (DDI_SUCCESS);
357 }
358 
359 /*
360  * Free one allocated area of DMAable memory
361  */
362 static void
363 ath_free_dma_mem(dma_area_t *dma_p)
364 {
365 	if (dma_p->dma_hdl != NULL) {
366 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
367 		if (dma_p->acc_hdl != NULL) {
368 			ddi_dma_mem_free(&dma_p->acc_hdl);
369 			dma_p->acc_hdl = NULL;
370 		}
371 		ddi_dma_free_handle(&dma_p->dma_hdl);
372 		dma_p->ncookies = 0;
373 		dma_p->dma_hdl = NULL;
374 	}
375 }
376 
377 
378 /*
379  * Initialize tx/rx buffer list. Allocate DMA memory for
380  * each buffer.
381  */
382 static int
383 ath_buflist_setup(dev_info_t *devinfo, ath_t *asc, list_t *bflist,
384     struct ath_buf **pbf, struct ath_desc **pds, int nbuf, uint_t dmabflags)
385 {
386 	int i, err;
387 	struct ath_buf *bf = *pbf;
388 	struct ath_desc *ds = *pds;
389 
390 	list_create(bflist, sizeof (struct ath_buf),
391 	    offsetof(struct ath_buf, bf_node));
392 	for (i = 0; i < nbuf; i++, bf++, ds++) {
393 		bf->bf_desc = ds;
394 		bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
395 		    ((uintptr_t)ds - (uintptr_t)asc->asc_desc);
396 		list_insert_tail(bflist, bf);
397 
398 		/* alloc DMA memory */
399 		err = ath_alloc_dma_mem(devinfo, &ath_dma_attr,
400 		    asc->asc_dmabuf_size, &ath_desc_accattr, DDI_DMA_STREAMING,
401 		    dmabflags, &bf->bf_dma);
402 		if (err != DDI_SUCCESS)
403 			return (err);
404 	}
405 	*pbf = bf;
406 	*pds = ds;
407 
408 	return (DDI_SUCCESS);
409 }
410 
411 /*
412  * Destroy tx/rx buffer list. Free DMA memory.
413  */
414 static void
415 ath_buflist_cleanup(list_t *buflist)
416 {
417 	struct ath_buf *bf;
418 
419 	if (!buflist)
420 		return;
421 
422 	bf = list_head(buflist);
423 	while (bf != NULL) {
424 		if (bf->bf_m != NULL) {
425 			freemsg(bf->bf_m);
426 			bf->bf_m = NULL;
427 		}
428 		/* Free DMA buffer */
429 		ath_free_dma_mem(&bf->bf_dma);
430 		if (bf->bf_in != NULL) {
431 			ieee80211_free_node(bf->bf_in);
432 			bf->bf_in = NULL;
433 		}
434 		list_remove(buflist, bf);
435 		bf = list_head(buflist);
436 	}
437 	list_destroy(buflist);
438 }
439 
440 
441 static void
442 ath_desc_free(ath_t *asc)
443 {
444 	ath_buflist_cleanup(&asc->asc_txbuf_list);
445 	ath_buflist_cleanup(&asc->asc_rxbuf_list);
446 
447 	/* Free descriptor DMA buffer */
448 	ath_free_dma_mem(&asc->asc_desc_dma);
449 
450 	kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen);
451 	asc->asc_vbufptr = NULL;
452 }
453 
454 static int
455 ath_desc_alloc(dev_info_t *devinfo, ath_t *asc)
456 {
457 	int err;
458 	size_t size;
459 	struct ath_desc *ds;
460 	struct ath_buf *bf;
461 
462 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
463 
464 	err = ath_alloc_dma_mem(devinfo, &ath_desc_dma_attr, size,
465 	    &ath_desc_accattr, DDI_DMA_CONSISTENT,
466 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &asc->asc_desc_dma);
467 
468 	/* virtual address of the first descriptor */
469 	asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va;
470 
471 	ds = asc->asc_desc;
472 	ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: "
473 	    "%p (%d) -> %p\n",
474 	    asc->asc_desc, asc->asc_desc_dma.alength,
475 	    asc->asc_desc_dma.cookie.dmac_address));
476 
477 	/* allocate data structures to describe TX/RX DMA buffers */
478 	asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
479 	bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP);
480 	asc->asc_vbufptr = bf;
481 
482 	/* DMA buffer size for each TX/RX packet */
483 	asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
484 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
485 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
486 	    IEEE80211_WEP_CRCLEN), asc->asc_cachelsz);
487 
488 	/* create RX buffer list */
489 	err = ath_buflist_setup(devinfo, asc, &asc->asc_rxbuf_list, &bf, &ds,
490 	    ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING);
491 	if (err != DDI_SUCCESS) {
492 		ath_desc_free(asc);
493 		return (err);
494 	}
495 
496 	/* create TX buffer list */
497 	err = ath_buflist_setup(devinfo, asc, &asc->asc_txbuf_list, &bf, &ds,
498 	    ATH_TXBUF, DDI_DMA_STREAMING);
499 	if (err != DDI_SUCCESS) {
500 		ath_desc_free(asc);
501 		return (err);
502 	}
503 
504 
505 	return (DDI_SUCCESS);
506 }
507 
508 static void
509 ath_printrxbuf(struct ath_buf *bf, int32_t done)
510 {
511 	struct ath_desc *ds = bf->bf_desc;
512 	const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
513 
514 	ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x "
515 	    "%08x %08x %08x %c\n",
516 	    ds, bf->bf_daddr,
517 	    ds->ds_link, ds->ds_data,
518 	    ds->ds_ctl0, ds->ds_ctl1,
519 	    ds->ds_hw[0], ds->ds_hw[1],
520 	    !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'));
521 }
522 
523 static void
524 ath_rx_handler(ath_t *asc)
525 {
526 	ieee80211com_t *ic = (ieee80211com_t *)asc;
527 	struct ath_buf *bf;
528 	struct ath_hal *ah = asc->asc_ah;
529 	struct ath_desc *ds;
530 	struct ath_rx_status *rs;
531 	mblk_t *rx_mp;
532 	struct ieee80211_frame *wh;
533 	int32_t len, loop = 1;
534 	uint8_t phyerr;
535 	HAL_STATUS status;
536 	HAL_NODE_STATS hal_node_stats;
537 	struct ieee80211_node *in;
538 
539 	do {
540 		mutex_enter(&asc->asc_rxbuflock);
541 		bf = list_head(&asc->asc_rxbuf_list);
542 		if (bf == NULL) {
543 			ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): "
544 			    "no buffer\n"));
545 			mutex_exit(&asc->asc_rxbuflock);
546 			break;
547 		}
548 		ASSERT(bf->bf_dma.cookie.dmac_address != NULL);
549 		ds = bf->bf_desc;
550 		if (ds->ds_link == bf->bf_daddr) {
551 			/*
552 			 * Never process the self-linked entry at the end,
553 			 * this may be met at heavy load.
554 			 */
555 			mutex_exit(&asc->asc_rxbuflock);
556 			break;
557 		}
558 
559 		rs = &bf->bf_status.ds_rxstat;
560 		status = ATH_HAL_RXPROCDESC(ah, ds,
561 		    bf->bf_daddr,
562 		    ATH_PA2DESC(asc, ds->ds_link), rs);
563 		if (status == HAL_EINPROGRESS) {
564 			mutex_exit(&asc->asc_rxbuflock);
565 			break;
566 		}
567 		list_remove(&asc->asc_rxbuf_list, bf);
568 		mutex_exit(&asc->asc_rxbuflock);
569 
570 		if (rs->rs_status != 0) {
571 			if (rs->rs_status & HAL_RXERR_CRC)
572 				asc->asc_stats.ast_rx_crcerr++;
573 			if (rs->rs_status & HAL_RXERR_FIFO)
574 				asc->asc_stats.ast_rx_fifoerr++;
575 			if (rs->rs_status & HAL_RXERR_DECRYPT)
576 				asc->asc_stats.ast_rx_badcrypt++;
577 			if (rs->rs_status & HAL_RXERR_PHY) {
578 				asc->asc_stats.ast_rx_phyerr++;
579 				phyerr = rs->rs_phyerr & 0x1f;
580 				asc->asc_stats.ast_rx_phy[phyerr]++;
581 			}
582 			goto rx_next;
583 		}
584 		len = rs->rs_datalen;
585 
586 		/* less than sizeof(struct ieee80211_frame) */
587 		if (len < 20) {
588 			asc->asc_stats.ast_rx_tooshort++;
589 			goto rx_next;
590 		}
591 
592 		if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) {
593 			ath_problem("ath: ath_rx_handler(): "
594 			    "allocing mblk buffer failed.\n");
595 			return;
596 		}
597 
598 		ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU);
599 		bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len);
600 
601 		rx_mp->b_wptr += len;
602 		wh = (struct ieee80211_frame *)rx_mp->b_rptr;
603 		if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
604 		    IEEE80211_FC0_TYPE_CTL) {
605 			/*
606 			 * Ignore control frame received in promisc mode.
607 			 */
608 			freemsg(rx_mp);
609 			goto rx_next;
610 		}
611 		/* Remove the CRC at the end of IEEE80211 frame */
612 		rx_mp->b_wptr -= IEEE80211_CRC_LEN;
613 #ifdef DEBUG
614 		ath_printrxbuf(bf, status == HAL_OK);
615 #endif /* DEBUG */
616 		/*
617 		 * Locate the node for sender, track state, and then
618 		 * pass the (referenced) node up to the 802.11 layer
619 		 * for its use.
620 		 */
621 		in = ieee80211_find_rxnode(ic, wh);
622 
623 		/*
624 		 * Send frame up for processing.
625 		 */
626 		(void) ieee80211_input(ic, rx_mp, in,
627 		    rs->rs_rssi, rs->rs_tstamp);
628 
629 		ieee80211_free_node(in);
630 
631 rx_next:
632 		mutex_enter(&asc->asc_rxbuflock);
633 		list_insert_tail(&asc->asc_rxbuf_list, bf);
634 		mutex_exit(&asc->asc_rxbuflock);
635 		ath_setup_desc(asc, bf);
636 	} while (loop);
637 
638 	/* rx signal state monitoring */
639 	ATH_HAL_RXMONITOR(ah, &hal_node_stats, &asc->asc_curchan);
640 }
641 
642 static void
643 ath_printtxbuf(struct ath_buf *bf, int done)
644 {
645 	struct ath_desc *ds = bf->bf_desc;
646 	const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
647 
648 	ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x"
649 	    " %08x %08x %08x %c\n",
650 	    ds, bf->bf_daddr,
651 	    ds->ds_link, ds->ds_data,
652 	    ds->ds_ctl0, ds->ds_ctl1,
653 	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
654 	    !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
655 }
656 
657 /*
658  * The input parameter mp has following assumption:
659  * For data packets, GLDv3 mac_wifi plugin allocates and fills the
660  * ieee80211 header. For management packets, net80211 allocates and
661  * fills the ieee80211 header. In both cases, enough spaces in the
662  * header are left for encryption option.
663  */
664 static int32_t
665 ath_tx_start(ath_t *asc, struct ieee80211_node *in, struct ath_buf *bf,
666     mblk_t *mp)
667 {
668 	ieee80211com_t *ic = (ieee80211com_t *)asc;
669 	struct ieee80211_frame *wh;
670 	struct ath_hal *ah = asc->asc_ah;
671 	uint32_t subtype, flags, ctsduration;
672 	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0;
673 	uint8_t rix, cix, txrate, ctsrate;
674 	struct ath_desc *ds;
675 	struct ath_txq *txq;
676 	HAL_PKT_TYPE atype;
677 	const HAL_RATE_TABLE *rt;
678 	HAL_BOOL shortPreamble;
679 	struct ath_node *an;
680 	caddr_t dest;
681 
682 	/*
683 	 * CRC are added by H/W, not encaped by driver,
684 	 * but we must count it in pkt length.
685 	 */
686 	pktlen = IEEE80211_CRC_LEN;
687 
688 	wh = (struct ieee80211_frame *)mp->b_rptr;
689 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
690 	keyix = HAL_TXKEYIX_INVALID;
691 	hdrlen = sizeof (struct ieee80211_frame);
692 	if (iswep != 0) {
693 		const struct ieee80211_cipher *cip;
694 		struct ieee80211_key *k;
695 
696 		/*
697 		 * Construct the 802.11 header+trailer for an encrypted
698 		 * frame. The only reason this can fail is because of an
699 		 * unknown or unsupported cipher/key type.
700 		 */
701 		k = ieee80211_crypto_encap(ic, mp);
702 		if (k == NULL) {
703 			ATH_DEBUG((ATH_DBG_AUX, "crypto_encap failed\n"));
704 			/*
705 			 * This can happen when the key is yanked after the
706 			 * frame was queued.  Just discard the frame; the
707 			 * 802.11 layer counts failures and provides
708 			 * debugging/diagnostics.
709 			 */
710 			return (EIO);
711 		}
712 		cip = k->wk_cipher;
713 		/*
714 		 * Adjust the packet + header lengths for the crypto
715 		 * additions and calculate the h/w key index.  When
716 		 * a s/w mic is done the frame will have had any mic
717 		 * added to it prior to entry so m0->m_pkthdr.len above will
718 		 * account for it. Otherwise we need to add it to the
719 		 * packet length.
720 		 */
721 		hdrlen += cip->ic_header;
722 		pktlen += cip->ic_trailer;
723 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
724 			pktlen += cip->ic_miclen;
725 		keyix = k->wk_keyix;
726 
727 		/* packet header may have moved, reset our local pointer */
728 		wh = (struct ieee80211_frame *)mp->b_rptr;
729 	}
730 
731 	dest = bf->bf_dma.mem_va;
732 	for (; mp != NULL; mp = mp->b_cont) {
733 		mblen = MBLKL(mp);
734 		bcopy(mp->b_rptr, dest, mblen);
735 		dest += mblen;
736 	}
737 	mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
738 	pktlen += mbslen;
739 
740 	bf->bf_in = in;
741 
742 	/* setup descriptors */
743 	ds = bf->bf_desc;
744 	rt = asc->asc_currates;
745 	ASSERT(rt != NULL);
746 
747 	/*
748 	 * The 802.11 layer marks whether or not we should
749 	 * use short preamble based on the current mode and
750 	 * negotiated parameters.
751 	 */
752 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
753 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
754 		shortPreamble = AH_TRUE;
755 		asc->asc_stats.ast_tx_shortpre++;
756 	} else {
757 		shortPreamble = AH_FALSE;
758 	}
759 
760 	an = ATH_NODE(in);
761 
762 	/*
763 	 * Calculate Atheros packet type from IEEE80211 packet header
764 	 * and setup for rate calculations.
765 	 */
766 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
767 	case IEEE80211_FC0_TYPE_MGT:
768 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
769 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
770 			atype = HAL_PKT_TYPE_BEACON;
771 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
772 			atype = HAL_PKT_TYPE_PROBE_RESP;
773 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
774 			atype = HAL_PKT_TYPE_ATIM;
775 		else
776 			atype = HAL_PKT_TYPE_NORMAL;
777 		rix = 0;	/* lowest rate */
778 		try0 = ATH_TXMAXTRY;
779 		if (shortPreamble)
780 			txrate = an->an_tx_mgtratesp;
781 		else
782 			txrate = an->an_tx_mgtrate;
783 		/* force all ctl frames to highest queue */
784 		txq = asc->asc_ac2q[WME_AC_VO];
785 		break;
786 	case IEEE80211_FC0_TYPE_CTL:
787 		atype = HAL_PKT_TYPE_PSPOLL;
788 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
789 		rix = 0;	/* lowest rate */
790 		try0 = ATH_TXMAXTRY;
791 		if (shortPreamble)
792 			txrate = an->an_tx_mgtratesp;
793 		else
794 			txrate = an->an_tx_mgtrate;
795 		/* force all ctl frames to highest queue */
796 		txq = asc->asc_ac2q[WME_AC_VO];
797 		break;
798 	case IEEE80211_FC0_TYPE_DATA:
799 		atype = HAL_PKT_TYPE_NORMAL;
800 		rix = an->an_tx_rix0;
801 		try0 = an->an_tx_try0;
802 		if (shortPreamble)
803 			txrate = an->an_tx_rate0sp;
804 		else
805 			txrate = an->an_tx_rate0;
806 		/* Always use background queue */
807 		txq = asc->asc_ac2q[WME_AC_BK];
808 		break;
809 	default:
810 		/* Unknown 802.11 frame */
811 		asc->asc_stats.ast_tx_invalid++;
812 		return (1);
813 	}
814 	/*
815 	 * Calculate miscellaneous flags.
816 	 */
817 	flags = HAL_TXDESC_CLRDMASK;
818 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
819 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
820 		asc->asc_stats.ast_tx_noack++;
821 	} else if (pktlen > ic->ic_rtsthreshold) {
822 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
823 		asc->asc_stats.ast_tx_rts++;
824 	}
825 
826 	/*
827 	 * Calculate duration.  This logically belongs in the 802.11
828 	 * layer but it lacks sufficient information to calculate it.
829 	 */
830 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
831 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
832 	    IEEE80211_FC0_TYPE_CTL) {
833 		uint16_t dur;
834 		dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
835 		    rix, shortPreamble);
836 		/* LINTED E_BAD_PTR_CAST_ALIGN */
837 		*(uint16_t *)wh->i_dur = LE_16(dur);
838 	}
839 
840 	/*
841 	 * Calculate RTS/CTS rate and duration if needed.
842 	 */
843 	ctsduration = 0;
844 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
845 		/*
846 		 * CTS transmit rate is derived from the transmit rate
847 		 * by looking in the h/w rate table.  We must also factor
848 		 * in whether or not a short preamble is to be used.
849 		 */
850 		cix = rt->info[rix].controlRate;
851 		ctsrate = rt->info[cix].rateCode;
852 		if (shortPreamble)
853 			ctsrate |= rt->info[cix].shortPreamble;
854 		/*
855 		 * Compute the transmit duration based on the size
856 		 * of an ACK frame.  We call into the HAL to do the
857 		 * computation since it depends on the characteristics
858 		 * of the actual PHY being used.
859 		 */
860 		if (flags & HAL_TXDESC_RTSENA) {	/* SIFS + CTS */
861 			ctsduration += ath_hal_computetxtime(ah,
862 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
863 		}
864 		/* SIFS + data */
865 		ctsduration += ath_hal_computetxtime(ah,
866 		    rt, pktlen, rix, shortPreamble);
867 		if ((flags & HAL_TXDESC_NOACK) == 0) {  /* SIFS + ACK */
868 			ctsduration += ath_hal_computetxtime(ah,
869 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
870 		}
871 	} else
872 		ctsrate = 0;
873 
874 	if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) {
875 		flags |= HAL_TXDESC_INTREQ;
876 		txq->axq_intrcnt = 0;
877 	}
878 
879 	/*
880 	 * Formulate first tx descriptor with tx controls.
881 	 */
882 	ATH_HAL_SETUPTXDESC(ah, ds,
883 	    pktlen,			/* packet length */
884 	    hdrlen,			/* header length */
885 	    atype,			/* Atheros packet type */
886 	    MIN(in->in_txpower, 60),	/* txpower */
887 	    txrate, try0,		/* series 0 rate/tries */
888 	    keyix,			/* key cache index */
889 	    an->an_tx_antenna,		/* antenna mode */
890 	    flags,			/* flags */
891 	    ctsrate,			/* rts/cts rate */
892 	    ctsduration);		/* rts/cts duration */
893 	bf->bf_flags = flags;
894 
895 	/* LINTED E_BAD_PTR_CAST_ALIGN */
896 	ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d "
897 	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
898 	    "qnum=%d rix=%d sht=%d dur = %d\n",
899 	    ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
900 	    an->an_tx_rate2sp, an->an_tx_rate3sp,
901 	    txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur));
902 
903 	/*
904 	 * Setup the multi-rate retry state only when we're
905 	 * going to use it.  This assumes ath_hal_setuptxdesc
906 	 * initializes the descriptors (so we don't have to)
907 	 * when the hardware supports multi-rate retry and
908 	 * we don't use it.
909 	 */
910 	if (try0 != ATH_TXMAXTRY)
911 		ATH_HAL_SETUPXTXDESC(ah, ds,
912 		    an->an_tx_rate1sp, 2,	/* series 1 */
913 		    an->an_tx_rate2sp, 2,	/* series 2 */
914 		    an->an_tx_rate3sp, 2);	/* series 3 */
915 
916 	ds->ds_link = 0;
917 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
918 	ATH_HAL_FILLTXDESC(ah, ds,
919 	    mbslen,		/* segment length */
920 	    AH_TRUE,		/* first segment */
921 	    AH_TRUE,		/* last segment */
922 	    ds);		/* first descriptor */
923 
924 	ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
925 
926 	mutex_enter(&txq->axq_lock);
927 	list_insert_tail(&txq->axq_list, bf);
928 	if (txq->axq_link == NULL) {
929 		ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr);
930 	} else {
931 		*txq->axq_link = bf->bf_daddr;
932 	}
933 	txq->axq_link = &ds->ds_link;
934 	mutex_exit(&txq->axq_lock);
935 
936 	ATH_HAL_TXSTART(ah, txq->axq_qnum);
937 
938 	ic->ic_stats.is_tx_frags++;
939 	ic->ic_stats.is_tx_bytes += pktlen;
940 
941 	return (0);
942 }
943 
944 /*
945  * Transmit a management frame.  On failure we reclaim the skbuff.
946  * Note that management frames come directly from the 802.11 layer
947  * and do not honor the send queue flow control.  Need to investigate
948  * using priority queueing so management frames can bypass data.
949  */
950 static int
951 ath_xmit(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
952 {
953 	ath_t *asc = (ath_t *)ic;
954 	struct ath_hal *ah = asc->asc_ah;
955 	struct ieee80211_node *in = NULL;
956 	struct ath_buf *bf = NULL;
957 	struct ieee80211_frame *wh;
958 	int error = 0;
959 
960 	ASSERT(mp->b_next == NULL);
961 
962 	if (!ATH_IS_RUNNING(asc)) {
963 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
964 		    IEEE80211_FC0_TYPE_DATA) {
965 			freemsg(mp);
966 		}
967 		return (ENXIO);
968 	}
969 
970 	/* Grab a TX buffer */
971 	mutex_enter(&asc->asc_txbuflock);
972 	bf = list_head(&asc->asc_txbuf_list);
973 	if (bf != NULL)
974 		list_remove(&asc->asc_txbuf_list, bf);
975 	if (list_empty(&asc->asc_txbuf_list)) {
976 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): "
977 		    "stop queue\n"));
978 		asc->asc_stats.ast_tx_qstop++;
979 	}
980 	mutex_exit(&asc->asc_txbuflock);
981 	if (bf == NULL) {
982 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, "
983 		    "no xmit buf\n"));
984 		ic->ic_stats.is_tx_nobuf++;
985 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
986 		    IEEE80211_FC0_TYPE_DATA) {
987 			asc->asc_stats.ast_tx_nobuf++;
988 			mutex_enter(&asc->asc_resched_lock);
989 			asc->asc_resched_needed = B_TRUE;
990 			mutex_exit(&asc->asc_resched_lock);
991 		} else {
992 			asc->asc_stats.ast_tx_nobufmgt++;
993 			freemsg(mp);
994 		}
995 		return (ENOMEM);
996 	}
997 
998 	wh = (struct ieee80211_frame *)mp->b_rptr;
999 
1000 	/* Locate node */
1001 	in = ieee80211_find_txnode(ic,  wh->i_addr1);
1002 	if (in == NULL) {
1003 		error = EIO;
1004 		goto bad;
1005 	}
1006 
1007 	in->in_inact = 0;
1008 	switch (type & IEEE80211_FC0_TYPE_MASK) {
1009 	case IEEE80211_FC0_TYPE_DATA:
1010 		(void) ieee80211_encap(ic, mp, in);
1011 		break;
1012 	default:
1013 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
1014 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
1015 			/* fill time stamp */
1016 			uint64_t tsf;
1017 			uint32_t *tstamp;
1018 
1019 			tsf = ATH_HAL_GETTSF64(ah);
1020 			/* adjust 100us delay to xmit */
1021 			tsf += 100;
1022 			/* LINTED E_BAD_PTR_CAST_ALIGN */
1023 			tstamp = (uint32_t *)&wh[1];
1024 			tstamp[0] = LE_32(tsf & 0xffffffff);
1025 			tstamp[1] = LE_32(tsf >> 32);
1026 		}
1027 		asc->asc_stats.ast_tx_mgmt++;
1028 		break;
1029 	}
1030 
1031 	error = ath_tx_start(asc, in, bf, mp);
1032 	if (error != 0) {
1033 bad:
1034 		ic->ic_stats.is_tx_failed++;
1035 		if (bf != NULL) {
1036 			mutex_enter(&asc->asc_txbuflock);
1037 			list_insert_tail(&asc->asc_txbuf_list, bf);
1038 			mutex_exit(&asc->asc_txbuflock);
1039 		}
1040 	}
1041 	if (in != NULL)
1042 		ieee80211_free_node(in);
1043 	if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
1044 	    error == 0) {
1045 		freemsg(mp);
1046 	}
1047 
1048 	return (error);
1049 }
1050 
1051 static mblk_t *
1052 ath_m_tx(void *arg, mblk_t *mp)
1053 {
1054 	ath_t *asc = arg;
1055 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1056 	mblk_t *next;
1057 	int error = 0;
1058 
1059 	/*
1060 	 * No data frames go out unless we're associated; this
1061 	 * should not happen as the 802.11 layer does not enable
1062 	 * the xmit queue until we enter the RUN state.
1063 	 */
1064 	if (ic->ic_state != IEEE80211_S_RUN) {
1065 		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_m_tx(): "
1066 		    "discard, state %u\n", ic->ic_state));
1067 		asc->asc_stats.ast_tx_discard++;
1068 		freemsgchain(mp);
1069 		return (NULL);
1070 	}
1071 
1072 	while (mp != NULL) {
1073 		next = mp->b_next;
1074 		mp->b_next = NULL;
1075 		error = ath_xmit(ic, mp, IEEE80211_FC0_TYPE_DATA);
1076 		if (error != 0) {
1077 			mp->b_next = next;
1078 			if (error == ENOMEM) {
1079 				break;
1080 			} else {
1081 				freemsgchain(mp);	/* CR6501759 issues */
1082 				return (NULL);
1083 			}
1084 		}
1085 		mp = next;
1086 	}
1087 
1088 	return (mp);
1089 }
1090 
1091 static int
1092 ath_tx_processq(ath_t *asc, struct ath_txq *txq)
1093 {
1094 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1095 	struct ath_hal *ah = asc->asc_ah;
1096 	struct ath_buf *bf;
1097 	struct ath_desc *ds;
1098 	struct ieee80211_node *in;
1099 	int32_t sr, lr, nacked = 0;
1100 	struct ath_tx_status *ts;
1101 	HAL_STATUS status;
1102 	struct ath_node *an;
1103 
1104 	for (;;) {
1105 		mutex_enter(&txq->axq_lock);
1106 		bf = list_head(&txq->axq_list);
1107 		if (bf == NULL) {
1108 			txq->axq_link = NULL;
1109 			mutex_exit(&txq->axq_lock);
1110 			break;
1111 		}
1112 		ds = bf->bf_desc;	/* last decriptor */
1113 		ts = &bf->bf_status.ds_txstat;
1114 		status = ATH_HAL_TXPROCDESC(ah, ds, ts);
1115 #ifdef DEBUG
1116 		ath_printtxbuf(bf, status == HAL_OK);
1117 #endif
1118 		if (status == HAL_EINPROGRESS) {
1119 			mutex_exit(&txq->axq_lock);
1120 			break;
1121 		}
1122 		list_remove(&txq->axq_list, bf);
1123 		mutex_exit(&txq->axq_lock);
1124 		in = bf->bf_in;
1125 		if (in != NULL) {
1126 			an = ATH_NODE(in);
1127 			/* Successful transmition */
1128 			if (ts->ts_status == 0) {
1129 				an->an_tx_ok++;
1130 				an->an_tx_antenna = ts->ts_antenna;
1131 				if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
1132 					asc->asc_stats.ast_tx_altrate++;
1133 				asc->asc_stats.ast_tx_rssidelta =
1134 				    ts->ts_rssi - asc->asc_stats.ast_tx_rssi;
1135 				asc->asc_stats.ast_tx_rssi = ts->ts_rssi;
1136 			} else {
1137 				an->an_tx_err++;
1138 				if (ts->ts_status & HAL_TXERR_XRETRY)
1139 					asc->asc_stats.ast_tx_xretries++;
1140 				if (ts->ts_status & HAL_TXERR_FIFO)
1141 					asc->asc_stats.ast_tx_fifoerr++;
1142 				if (ts->ts_status & HAL_TXERR_FILT)
1143 					asc->asc_stats.ast_tx_filtered++;
1144 				an->an_tx_antenna = 0;	/* invalidate */
1145 			}
1146 			sr = ts->ts_shortretry;
1147 			lr = ts->ts_longretry;
1148 			asc->asc_stats.ast_tx_shortretry += sr;
1149 			asc->asc_stats.ast_tx_longretry += lr;
1150 			/*
1151 			 * Hand the descriptor to the rate control algorithm.
1152 			 */
1153 			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
1154 			    (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
1155 				/*
1156 				 * If frame was ack'd update the last rx time
1157 				 * used to workaround phantom bmiss interrupts.
1158 				 */
1159 				if (ts->ts_status == 0) {
1160 					nacked++;
1161 					an->an_tx_ok++;
1162 				} else {
1163 					an->an_tx_err++;
1164 				}
1165 				an->an_tx_retr += sr + lr;
1166 			}
1167 		}
1168 		bf->bf_in = NULL;
1169 		mutex_enter(&asc->asc_txbuflock);
1170 		list_insert_tail(&asc->asc_txbuf_list, bf);
1171 		mutex_exit(&asc->asc_txbuflock);
1172 		/*
1173 		 * Reschedule stalled outbound packets
1174 		 */
1175 		mutex_enter(&asc->asc_resched_lock);
1176 		if (asc->asc_resched_needed) {
1177 			asc->asc_resched_needed = B_FALSE;
1178 			mac_tx_update(ic->ic_mach);
1179 		}
1180 		mutex_exit(&asc->asc_resched_lock);
1181 	}
1182 	return (nacked);
1183 }
1184 
1185 
1186 static void
1187 ath_tx_handler(ath_t *asc)
1188 {
1189 	int i;
1190 
1191 	/*
1192 	 * Process each active queue.
1193 	 */
1194 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1195 		if (ATH_TXQ_SETUP(asc, i)) {
1196 			(void) ath_tx_processq(asc, &asc->asc_txq[i]);
1197 		}
1198 	}
1199 }
1200 
1201 static struct ieee80211_node *
1202 ath_node_alloc(ieee80211com_t *ic)
1203 {
1204 	struct ath_node *an;
1205 	ath_t *asc = (ath_t *)ic;
1206 
1207 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1208 	ath_rate_update(asc, &an->an_node, 0);
1209 	return (&an->an_node);
1210 }
1211 
1212 static void
1213 ath_node_free(struct ieee80211_node *in)
1214 {
1215 	ieee80211com_t *ic = in->in_ic;
1216 	ath_t *asc = (ath_t *)ic;
1217 	struct ath_buf *bf;
1218 	struct ath_txq *txq;
1219 	int32_t i;
1220 
1221 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1222 		if (ATH_TXQ_SETUP(asc, i)) {
1223 			txq = &asc->asc_txq[i];
1224 			mutex_enter(&txq->axq_lock);
1225 			bf = list_head(&txq->axq_list);
1226 			while (bf != NULL) {
1227 				if (bf->bf_in == in) {
1228 					bf->bf_in = NULL;
1229 				}
1230 				bf = list_next(&txq->axq_list, bf);
1231 			}
1232 			mutex_exit(&txq->axq_lock);
1233 		}
1234 	}
1235 	ic->ic_node_cleanup(in);
1236 	if (in->in_wpa_ie != NULL)
1237 		ieee80211_free(in->in_wpa_ie);
1238 	kmem_free(in, sizeof (struct ath_node));
1239 }
1240 
1241 static void
1242 ath_next_scan(void *arg)
1243 {
1244 	ieee80211com_t *ic = arg;
1245 	ath_t *asc = (ath_t *)ic;
1246 
1247 	asc->asc_scan_timer = 0;
1248 	if (ic->ic_state == IEEE80211_S_SCAN) {
1249 		asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1250 		    drv_usectohz(ath_dwelltime * 1000));
1251 		ieee80211_next_scan(ic);
1252 	}
1253 }
1254 
1255 static void
1256 ath_stop_scantimer(ath_t *asc)
1257 {
1258 	timeout_id_t tmp_id = 0;
1259 
1260 	while ((asc->asc_scan_timer != 0) && (tmp_id != asc->asc_scan_timer)) {
1261 		tmp_id = asc->asc_scan_timer;
1262 		(void) untimeout(tmp_id);
1263 	}
1264 	asc->asc_scan_timer = 0;
1265 }
1266 
1267 static int32_t
1268 ath_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1269 {
1270 	ath_t *asc = (ath_t *)ic;
1271 	struct ath_hal *ah = asc->asc_ah;
1272 	struct ieee80211_node *in;
1273 	int32_t i, error;
1274 	uint8_t *bssid;
1275 	uint32_t rfilt;
1276 	enum ieee80211_state ostate;
1277 
1278 	static const HAL_LED_STATE leds[] = {
1279 	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
1280 	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
1281 	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
1282 	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
1283 	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
1284 	};
1285 	if (!ATH_IS_RUNNING(asc))
1286 		return (0);
1287 
1288 	ostate = ic->ic_state;
1289 	if (nstate != IEEE80211_S_SCAN)
1290 		ath_stop_scantimer(asc);
1291 
1292 	ATH_LOCK(asc);
1293 	ATH_HAL_SETLEDSTATE(ah, leds[nstate]);	/* set LED */
1294 
1295 	if (nstate == IEEE80211_S_INIT) {
1296 		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1297 		/*
1298 		 * Disable interrupts.
1299 		 */
1300 		ATH_HAL_INTRSET(ah, asc->asc_imask &~ HAL_INT_GLOBAL);
1301 		ATH_UNLOCK(asc);
1302 		goto done;
1303 	}
1304 	in = ic->ic_bss;
1305 	error = ath_chan_set(asc, ic->ic_curchan);
1306 	if (error != 0) {
1307 		if (nstate != IEEE80211_S_SCAN) {
1308 			ATH_UNLOCK(asc);
1309 			ieee80211_reset_chan(ic);
1310 			goto bad;
1311 		}
1312 	}
1313 
1314 	rfilt = ath_calcrxfilter(asc);
1315 
1316 	if (nstate == IEEE80211_S_SCAN)
1317 		bssid = ic->ic_macaddr;
1318 	else
1319 		bssid = in->in_bssid;
1320 	ATH_HAL_SETRXFILTER(ah, rfilt);
1321 
1322 	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1323 		ATH_HAL_SETASSOCID(ah, bssid, in->in_associd);
1324 	else
1325 		ATH_HAL_SETASSOCID(ah, bssid, 0);
1326 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1327 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1328 			if (ATH_HAL_KEYISVALID(ah, i))
1329 				ATH_HAL_KEYSETMAC(ah, i, bssid);
1330 		}
1331 	}
1332 
1333 	if ((nstate == IEEE80211_S_RUN) &&
1334 	    (ostate != IEEE80211_S_RUN)) {
1335 		/* Configure the beacon and sleep timers. */
1336 		ath_beacon_config(asc);
1337 	} else {
1338 		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1339 		ATH_HAL_INTRSET(ah, asc->asc_imask);
1340 	}
1341 	/*
1342 	 * Reset the rate control state.
1343 	 */
1344 	ath_rate_ctl_reset(asc, nstate);
1345 
1346 	ATH_UNLOCK(asc);
1347 done:
1348 	/*
1349 	 * Invoke the parent method to complete the work.
1350 	 */
1351 	error = asc->asc_newstate(ic, nstate, arg);
1352 	/*
1353 	 * Finally, start any timers.
1354 	 */
1355 	if (nstate == IEEE80211_S_RUN) {
1356 		ieee80211_start_watchdog(ic, 1);
1357 	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1358 		/* start ap/neighbor scan timer */
1359 		ASSERT(asc->asc_scan_timer == 0);
1360 		asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1361 		    drv_usectohz(ath_dwelltime * 1000));
1362 	}
1363 bad:
1364 	return (error);
1365 }
1366 
1367 /*
1368  * Periodically recalibrate the PHY to account
1369  * for temperature/environment changes.
1370  */
1371 static void
1372 ath_calibrate(ath_t *asc)
1373 {
1374 	struct ath_hal *ah = asc->asc_ah;
1375 	HAL_BOOL iqcaldone;
1376 
1377 	asc->asc_stats.ast_per_cal++;
1378 
1379 	if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) {
1380 		/*
1381 		 * Rfgain is out of bounds, reset the chip
1382 		 * to load new gain values.
1383 		 */
1384 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1385 		    "Need change RFgain\n"));
1386 		asc->asc_stats.ast_per_rfgain++;
1387 		(void) ath_reset(&asc->asc_isc);
1388 	}
1389 	if (!ATH_HAL_CALIBRATE(ah, &asc->asc_curchan, &iqcaldone)) {
1390 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1391 		    "calibration of channel %u failed\n",
1392 		    asc->asc_curchan.channel));
1393 		asc->asc_stats.ast_per_calfail++;
1394 	}
1395 }
1396 
1397 static void
1398 ath_watchdog(void *arg)
1399 {
1400 	ath_t *asc = arg;
1401 	ieee80211com_t *ic = &asc->asc_isc;
1402 	int ntimer = 0;
1403 
1404 	ATH_LOCK(asc);
1405 	ic->ic_watchdog_timer = 0;
1406 	if (!ATH_IS_RUNNING(asc)) {
1407 		ATH_UNLOCK(asc);
1408 		return;
1409 	}
1410 
1411 	if (ic->ic_state == IEEE80211_S_RUN) {
1412 		/* periodic recalibration */
1413 		ath_calibrate(asc);
1414 
1415 		/*
1416 		 * Start the background rate control thread if we
1417 		 * are not configured to use a fixed xmit rate.
1418 		 */
1419 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1420 			asc->asc_stats.ast_rate_calls ++;
1421 			if (ic->ic_opmode == IEEE80211_M_STA)
1422 				ath_rate_ctl(ic, ic->ic_bss);
1423 			else
1424 				ieee80211_iterate_nodes(&ic->ic_sta,
1425 				    ath_rate_ctl, asc);
1426 		}
1427 
1428 		ntimer = 1;
1429 	}
1430 	ATH_UNLOCK(asc);
1431 
1432 	ieee80211_watchdog(ic);
1433 	if (ntimer != 0)
1434 		ieee80211_start_watchdog(ic, ntimer);
1435 }
1436 
1437 static void
1438 ath_tx_proc(void *arg)
1439 {
1440 	ath_t *asc = arg;
1441 	ath_tx_handler(asc);
1442 }
1443 
1444 
1445 static uint_t
1446 ath_intr(caddr_t arg)
1447 {
1448 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1449 	ath_t *asc = (ath_t *)arg;
1450 	struct ath_hal *ah = asc->asc_ah;
1451 	HAL_INT status;
1452 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1453 
1454 	ATH_LOCK(asc);
1455 
1456 	if (!ATH_IS_RUNNING(asc)) {
1457 		/*
1458 		 * The hardware is not ready/present, don't touch anything.
1459 		 * Note this can happen early on if the IRQ is shared.
1460 		 */
1461 		ATH_UNLOCK(asc);
1462 		return (DDI_INTR_UNCLAIMED);
1463 	}
1464 
1465 	if (!ATH_HAL_INTRPEND(ah)) {	/* shared irq, not for us */
1466 		ATH_UNLOCK(asc);
1467 		return (DDI_INTR_UNCLAIMED);
1468 	}
1469 
1470 	ATH_HAL_GETISR(ah, &status);
1471 	status &= asc->asc_imask;
1472 	if (status & HAL_INT_FATAL) {
1473 		asc->asc_stats.ast_hardware++;
1474 		goto reset;
1475 	} else if (status & HAL_INT_RXORN) {
1476 		asc->asc_stats.ast_rxorn++;
1477 		goto reset;
1478 	} else {
1479 		if (status & HAL_INT_RXEOL) {
1480 			asc->asc_stats.ast_rxeol++;
1481 			asc->asc_rxlink = NULL;
1482 		}
1483 		if (status & HAL_INT_TXURN) {
1484 			asc->asc_stats.ast_txurn++;
1485 			ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE);
1486 		}
1487 
1488 		if (status & HAL_INT_RX) {
1489 			asc->asc_rx_pend = 1;
1490 			ddi_trigger_softintr(asc->asc_softint_id);
1491 		}
1492 		if (status & HAL_INT_TX) {
1493 			if (ddi_taskq_dispatch(asc->asc_tq, ath_tx_proc,
1494 			    asc, DDI_NOSLEEP) != DDI_SUCCESS) {
1495 				ath_problem("ath: ath_intr(): "
1496 				    "No memory available for tx taskq\n");
1497 			}
1498 		}
1499 		ATH_UNLOCK(asc);
1500 
1501 		if (status & HAL_INT_SWBA) {
1502 			/* This will occur only in Host-AP or Ad-Hoc mode */
1503 			return (DDI_INTR_CLAIMED);
1504 		}
1505 
1506 		if (status & HAL_INT_BMISS) {
1507 			if (ic->ic_state == IEEE80211_S_RUN) {
1508 				(void) ieee80211_new_state(ic,
1509 				    IEEE80211_S_ASSOC, -1);
1510 			}
1511 		}
1512 
1513 	}
1514 
1515 	return (DDI_INTR_CLAIMED);
1516 reset:
1517 	(void) ath_reset(ic);
1518 	ATH_UNLOCK(asc);
1519 	return (DDI_INTR_CLAIMED);
1520 }
1521 
1522 static uint_t
1523 ath_softint_handler(caddr_t data)
1524 {
1525 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1526 	ath_t *asc = (ath_t *)data;
1527 
1528 	/*
1529 	 * Check if the soft interrupt is triggered by another
1530 	 * driver at the same level.
1531 	 */
1532 	ATH_LOCK(asc);
1533 	if (asc->asc_rx_pend) { /* Soft interrupt for this driver */
1534 		asc->asc_rx_pend = 0;
1535 		ATH_UNLOCK(asc);
1536 		ath_rx_handler(asc);
1537 		return (DDI_INTR_CLAIMED);
1538 	}
1539 	ATH_UNLOCK(asc);
1540 	return (DDI_INTR_UNCLAIMED);
1541 }
1542 
1543 /*
1544  * following are gld callback routine
1545  * ath_gld_send, ath_gld_ioctl, ath_gld_gstat
1546  * are listed in other corresponding sections.
1547  * reset the hardware w/o losing operational state.  this is
1548  * basically a more efficient way of doing ath_gld_stop, ath_gld_start,
1549  * followed by state transitions to the current 802.11
1550  * operational state.  used to recover from errors rx overrun
1551  * and to reset the hardware when rf gain settings must be reset.
1552  */
1553 
1554 static void
1555 ath_stop_locked(ath_t *asc)
1556 {
1557 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1558 	struct ath_hal *ah = asc->asc_ah;
1559 
1560 	ATH_LOCK_ASSERT(asc);
1561 	if (!asc->asc_isrunning)
1562 		return;
1563 
1564 	/*
1565 	 * Shutdown the hardware and driver:
1566 	 *    reset 802.11 state machine
1567 	 *    turn off timers
1568 	 *    disable interrupts
1569 	 *    turn off the radio
1570 	 *    clear transmit machinery
1571 	 *    clear receive machinery
1572 	 *    drain and release tx queues
1573 	 *    reclaim beacon resources
1574 	 *    power down hardware
1575 	 *
1576 	 * Note that some of this work is not possible if the
1577 	 * hardware is gone (invalid).
1578 	 */
1579 	ATH_UNLOCK(asc);
1580 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1581 	ieee80211_stop_watchdog(ic);
1582 	ATH_LOCK(asc);
1583 	ATH_HAL_INTRSET(ah, 0);
1584 	ath_draintxq(asc);
1585 	if (!asc->asc_invalid) {
1586 		ath_stoprecv(asc);
1587 		ATH_HAL_PHYDISABLE(ah);
1588 	} else {
1589 		asc->asc_rxlink = NULL;
1590 	}
1591 	asc->asc_isrunning = 0;
1592 }
1593 
1594 static void
1595 ath_m_stop(void *arg)
1596 {
1597 	ath_t *asc = arg;
1598 	struct ath_hal *ah = asc->asc_ah;
1599 
1600 	ATH_LOCK(asc);
1601 	ath_stop_locked(asc);
1602 	ATH_HAL_SETPOWER(ah, HAL_PM_AWAKE);
1603 	asc->asc_invalid = 1;
1604 	ATH_UNLOCK(asc);
1605 }
1606 
1607 static int
1608 ath_start_locked(ath_t *asc)
1609 {
1610 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1611 	struct ath_hal *ah = asc->asc_ah;
1612 	HAL_STATUS status;
1613 
1614 	ATH_LOCK_ASSERT(asc);
1615 
1616 	/*
1617 	 * The basic interface to setting the hardware in a good
1618 	 * state is ``reset''.  On return the hardware is known to
1619 	 * be powered up and with interrupts disabled.  This must
1620 	 * be followed by initialization of the appropriate bits
1621 	 * and then setup of the interrupt mask.
1622 	 */
1623 	asc->asc_curchan.channel = ic->ic_curchan->ich_freq;
1624 	asc->asc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan);
1625 	if (!ATH_HAL_RESET(ah, (HAL_OPMODE)ic->ic_opmode,
1626 	    &asc->asc_curchan, AH_FALSE, &status)) {
1627 		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_m_start(): "
1628 		    "reset hardware failed: '%s' (HAL status %u)\n",
1629 		    ath_get_hal_status_desc(status), status));
1630 		return (ENOTACTIVE);
1631 	}
1632 
1633 	(void) ath_startrecv(asc);
1634 
1635 	/*
1636 	 * Enable interrupts.
1637 	 */
1638 	asc->asc_imask = HAL_INT_RX | HAL_INT_TX
1639 	    | HAL_INT_RXEOL | HAL_INT_RXORN
1640 	    | HAL_INT_FATAL | HAL_INT_GLOBAL;
1641 	ATH_HAL_INTRSET(ah, asc->asc_imask);
1642 
1643 	/*
1644 	 * The hardware should be ready to go now so it's safe
1645 	 * to kick the 802.11 state machine as it's likely to
1646 	 * immediately call back to us to send mgmt frames.
1647 	 */
1648 	ath_chan_change(asc, ic->ic_curchan);
1649 
1650 	asc->asc_isrunning = 1;
1651 
1652 	return (0);
1653 }
1654 
1655 int
1656 ath_m_start(void *arg)
1657 {
1658 	ath_t *asc = arg;
1659 	int err;
1660 
1661 	ATH_LOCK(asc);
1662 	/*
1663 	 * Stop anything previously setup.  This is safe
1664 	 * whether this is the first time through or not.
1665 	 */
1666 	ath_stop_locked(asc);
1667 
1668 	if ((err = ath_start_locked(asc)) != 0) {
1669 		ATH_UNLOCK(asc);
1670 		return (err);
1671 	}
1672 
1673 	asc->asc_invalid = 0;
1674 	ATH_UNLOCK(asc);
1675 
1676 	return (0);
1677 }
1678 
1679 
1680 static int
1681 ath_m_unicst(void *arg, const uint8_t *macaddr)
1682 {
1683 	ath_t *asc = arg;
1684 	struct ath_hal *ah = asc->asc_ah;
1685 
1686 	ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): "
1687 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1688 	    macaddr[0], macaddr[1], macaddr[2],
1689 	    macaddr[3], macaddr[4], macaddr[5]));
1690 
1691 	ATH_LOCK(asc);
1692 	IEEE80211_ADDR_COPY(asc->asc_isc.ic_macaddr, macaddr);
1693 	ATH_HAL_SETMAC(ah, asc->asc_isc.ic_macaddr);
1694 
1695 	(void) ath_reset(&asc->asc_isc);
1696 	ATH_UNLOCK(asc);
1697 	return (0);
1698 }
1699 
1700 static int
1701 ath_m_promisc(void *arg, boolean_t on)
1702 {
1703 	ath_t *asc = arg;
1704 	struct ath_hal *ah = asc->asc_ah;
1705 	uint32_t rfilt;
1706 
1707 	ATH_LOCK(asc);
1708 	rfilt = ATH_HAL_GETRXFILTER(ah);
1709 	if (on)
1710 		rfilt |= HAL_RX_FILTER_PROM;
1711 	else
1712 		rfilt &= ~HAL_RX_FILTER_PROM;
1713 	asc->asc_promisc = on;
1714 	ATH_HAL_SETRXFILTER(ah, rfilt);
1715 	ATH_UNLOCK(asc);
1716 
1717 	return (0);
1718 }
1719 
1720 static int
1721 ath_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1722 {
1723 	ath_t *asc = arg;
1724 	struct ath_hal *ah = asc->asc_ah;
1725 	uint32_t val, index, bit;
1726 	uint8_t pos;
1727 	uint32_t *mfilt = asc->asc_mcast_hash;
1728 
1729 	ATH_LOCK(asc);
1730 
1731 	/* calculate XOR of eight 6bit values */
1732 	val = ATH_LE_READ_4(mca + 0);
1733 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1734 	val = ATH_LE_READ_4(mca + 3);
1735 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1736 	pos &= 0x3f;
1737 	index = pos / 32;
1738 	bit = 1 << (pos % 32);
1739 
1740 	if (add) {	/* enable multicast */
1741 		asc->asc_mcast_refs[pos]++;
1742 		mfilt[index] |= bit;
1743 	} else {	/* disable multicast */
1744 		if (--asc->asc_mcast_refs[pos] == 0)
1745 			mfilt[index] &= ~bit;
1746 	}
1747 	ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]);
1748 
1749 	ATH_UNLOCK(asc);
1750 	return (0);
1751 }
1752 /*
1753  * callback functions for /get/set properties
1754  */
1755 static int
1756 ath_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1757     uint_t wldp_length, const void *wldp_buf)
1758 {
1759 	ath_t	*asc = arg;
1760 	int	err;
1761 
1762 	err = ieee80211_setprop(&asc->asc_isc, pr_name, wldp_pr_num,
1763 	    wldp_length, wldp_buf);
1764 
1765 	ATH_LOCK(asc);
1766 
1767 	if (err == ENETRESET) {
1768 		if (ATH_IS_RUNNING(asc)) {
1769 			ATH_UNLOCK(asc);
1770 			(void) ath_m_start(asc);
1771 			(void) ieee80211_new_state(&asc->asc_isc,
1772 			    IEEE80211_S_SCAN, -1);
1773 			ATH_LOCK(asc);
1774 		}
1775 		err = 0;
1776 	}
1777 
1778 	ATH_UNLOCK(asc);
1779 
1780 	return (err);
1781 }
1782 /* ARGSUSED */
1783 static int
1784 ath_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1785     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
1786 {
1787 	ath_t	*asc = arg;
1788 	int	err = 0;
1789 
1790 	err = ieee80211_getprop(&asc->asc_isc, pr_name, wldp_pr_num,
1791 	    pr_flags, wldp_length, wldp_buf, perm);
1792 
1793 	return (err);
1794 }
1795 
1796 static void
1797 ath_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1798 {
1799 	ath_t *asc = arg;
1800 	int32_t err;
1801 
1802 	err = ieee80211_ioctl(&asc->asc_isc, wq, mp);
1803 	ATH_LOCK(asc);
1804 	if (err == ENETRESET) {
1805 		if (ATH_IS_RUNNING(asc)) {
1806 			ATH_UNLOCK(asc);
1807 			(void) ath_m_start(asc);
1808 			(void) ieee80211_new_state(&asc->asc_isc,
1809 			    IEEE80211_S_SCAN, -1);
1810 			ATH_LOCK(asc);
1811 		}
1812 	}
1813 	ATH_UNLOCK(asc);
1814 }
1815 
1816 static int
1817 ath_m_stat(void *arg, uint_t stat, uint64_t *val)
1818 {
1819 	ath_t *asc = arg;
1820 	ieee80211com_t *ic = (ieee80211com_t *)asc;
1821 	struct ieee80211_node *in = ic->ic_bss;
1822 	struct ieee80211_rateset *rs = &in->in_rates;
1823 
1824 	ATH_LOCK(asc);
1825 	switch (stat) {
1826 	case MAC_STAT_IFSPEED:
1827 		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
1828 		    1000000ull;
1829 		break;
1830 	case MAC_STAT_NOXMTBUF:
1831 		*val = asc->asc_stats.ast_tx_nobuf +
1832 		    asc->asc_stats.ast_tx_nobufmgt;
1833 		break;
1834 	case MAC_STAT_IERRORS:
1835 		*val = asc->asc_stats.ast_rx_tooshort;
1836 		break;
1837 	case MAC_STAT_RBYTES:
1838 		*val = ic->ic_stats.is_rx_bytes;
1839 		break;
1840 	case MAC_STAT_IPACKETS:
1841 		*val = ic->ic_stats.is_rx_frags;
1842 		break;
1843 	case MAC_STAT_OBYTES:
1844 		*val = ic->ic_stats.is_tx_bytes;
1845 		break;
1846 	case MAC_STAT_OPACKETS:
1847 		*val = ic->ic_stats.is_tx_frags;
1848 		break;
1849 	case MAC_STAT_OERRORS:
1850 	case WIFI_STAT_TX_FAILED:
1851 		*val = asc->asc_stats.ast_tx_fifoerr +
1852 		    asc->asc_stats.ast_tx_xretries +
1853 		    asc->asc_stats.ast_tx_discard;
1854 		break;
1855 	case WIFI_STAT_TX_RETRANS:
1856 		*val = asc->asc_stats.ast_tx_xretries;
1857 		break;
1858 	case WIFI_STAT_FCS_ERRORS:
1859 		*val = asc->asc_stats.ast_rx_crcerr;
1860 		break;
1861 	case WIFI_STAT_WEP_ERRORS:
1862 		*val = asc->asc_stats.ast_rx_badcrypt;
1863 		break;
1864 	case WIFI_STAT_TX_FRAGS:
1865 	case WIFI_STAT_MCAST_TX:
1866 	case WIFI_STAT_RTS_SUCCESS:
1867 	case WIFI_STAT_RTS_FAILURE:
1868 	case WIFI_STAT_ACK_FAILURE:
1869 	case WIFI_STAT_RX_FRAGS:
1870 	case WIFI_STAT_MCAST_RX:
1871 	case WIFI_STAT_RX_DUPS:
1872 		ATH_UNLOCK(asc);
1873 		return (ieee80211_stat(ic, stat, val));
1874 	default:
1875 		ATH_UNLOCK(asc);
1876 		return (ENOTSUP);
1877 	}
1878 	ATH_UNLOCK(asc);
1879 
1880 	return (0);
1881 }
1882 
1883 static int
1884 ath_pci_setup(ath_t *asc)
1885 {
1886 	uint16_t command;
1887 
1888 	/*
1889 	 * Enable memory mapping and bus mastering
1890 	 */
1891 	ASSERT(asc != NULL);
1892 	command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1893 	command |= PCI_COMM_MAE | PCI_COMM_ME;
1894 	pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command);
1895 	command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1896 	if ((command & PCI_COMM_MAE) == 0) {
1897 		ath_problem("ath: ath_pci_setup(): "
1898 		    "failed to enable memory mapping\n");
1899 		return (EIO);
1900 	}
1901 	if ((command & PCI_COMM_ME) == 0) {
1902 		ath_problem("ath: ath_pci_setup(): "
1903 		    "failed to enable bus mastering\n");
1904 		return (EIO);
1905 	}
1906 	ATH_DEBUG((ATH_DBG_INIT, "ath: ath_pci_setup(): "
1907 	    "set command reg to 0x%x \n", command));
1908 
1909 	return (0);
1910 }
1911 
1912 static int
1913 ath_resume(dev_info_t *devinfo)
1914 {
1915 	ath_t *asc;
1916 	int ret = DDI_SUCCESS;
1917 
1918 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1919 	if (asc == NULL) {
1920 		ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1921 		    "failed to get soft state\n"));
1922 		return (DDI_FAILURE);
1923 	}
1924 
1925 	ATH_LOCK(asc);
1926 	/*
1927 	 * Set up config space command register(s). Refuse
1928 	 * to resume on failure.
1929 	 */
1930 	if (ath_pci_setup(asc) != 0) {
1931 		ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1932 		    "ath_pci_setup() failed\n"));
1933 		ATH_UNLOCK(asc);
1934 		return (DDI_FAILURE);
1935 	}
1936 
1937 	if (!asc->asc_invalid)
1938 		ret = ath_start_locked(asc);
1939 	ATH_UNLOCK(asc);
1940 
1941 	return (ret);
1942 }
1943 
1944 static int
1945 ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1946 {
1947 	ath_t *asc;
1948 	ieee80211com_t *ic;
1949 	struct ath_hal *ah;
1950 	uint8_t csz;
1951 	HAL_STATUS status;
1952 	caddr_t regs;
1953 	uint32_t i, val;
1954 	uint16_t vendor_id, device_id;
1955 	const char *athname;
1956 	int32_t ath_countrycode = CTRY_DEFAULT;	/* country code */
1957 	int32_t err, ath_regdomain = 0; /* regulatory domain */
1958 	char strbuf[32];
1959 	int instance;
1960 	wifi_data_t wd = { 0 };
1961 	mac_register_t *macp;
1962 
1963 	switch (cmd) {
1964 	case DDI_ATTACH:
1965 		break;
1966 
1967 	case DDI_RESUME:
1968 		return (ath_resume(devinfo));
1969 
1970 	default:
1971 		return (DDI_FAILURE);
1972 	}
1973 
1974 	instance = ddi_get_instance(devinfo);
1975 	if (ddi_soft_state_zalloc(ath_soft_state_p, instance) != DDI_SUCCESS) {
1976 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1977 		    "Unable to alloc softstate\n"));
1978 		return (DDI_FAILURE);
1979 	}
1980 
1981 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1982 	ic = (ieee80211com_t *)asc;
1983 	asc->asc_dev = devinfo;
1984 
1985 	mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL);
1986 	mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL);
1987 	mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
1988 	mutex_init(&asc->asc_resched_lock, NULL, MUTEX_DRIVER, NULL);
1989 
1990 	err = pci_config_setup(devinfo, &asc->asc_cfg_handle);
1991 	if (err != DDI_SUCCESS) {
1992 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1993 		    "pci_config_setup() failed"));
1994 		goto attach_fail0;
1995 	}
1996 
1997 	if (ath_pci_setup(asc) != 0)
1998 		goto attach_fail1;
1999 
2000 	/*
2001 	 * Cache line size is used to size and align various
2002 	 * structures used to communicate with the hardware.
2003 	 */
2004 	csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2005 	if (csz == 0) {
2006 		/*
2007 		 * We must have this setup properly for rx buffer
2008 		 * DMA to work so force a reasonable value here if it
2009 		 * comes up zero.
2010 		 */
2011 		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2012 		pci_config_put8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2013 		    csz);
2014 	}
2015 	asc->asc_cachelsz = csz << 2;
2016 	vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID);
2017 	device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID);
2018 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, "
2019 	    "device id 0x%x, cache size %d\n", vendor_id, device_id, csz));
2020 
2021 	athname = ath_hal_probe(vendor_id, device_id);
2022 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n",
2023 	    athname ? athname : "Atheros ???"));
2024 
2025 	pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2026 	val = pci_config_get32(asc->asc_cfg_handle, 0x40);
2027 	if ((val & 0x0000ff00) != 0)
2028 		pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff);
2029 
2030 	err = ddi_regs_map_setup(devinfo, 1,
2031 	    &regs, 0, 0, &ath_reg_accattr, &asc->asc_io_handle);
2032 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2033 	    "regs map1 = %x err=%d\n", regs, err));
2034 	if (err != DDI_SUCCESS) {
2035 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2036 		    "ddi_regs_map_setup() failed"));
2037 		goto attach_fail1;
2038 	}
2039 
2040 	ah = ath_hal_attach(device_id, asc, 0, regs, &status);
2041 	if (ah == NULL) {
2042 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2043 		    "unable to attach hw: '%s' (HAL status %u)\n",
2044 		    ath_get_hal_status_desc(status), status));
2045 		goto attach_fail2;
2046 	}
2047 	ATH_DEBUG((ATH_DBG_ATTACH, "mac %d.%d phy %d.%d",
2048 	    ah->ah_macVersion, ah->ah_macRev,
2049 	    ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2050 	ATH_HAL_INTRSET(ah, 0);
2051 	asc->asc_ah = ah;
2052 
2053 	if (ah->ah_abi != HAL_ABI_VERSION) {
2054 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2055 		    "HAL ABI mismatch detected (0x%x != 0x%x)\n",
2056 		    ah->ah_abi, HAL_ABI_VERSION));
2057 		goto attach_fail3;
2058 	}
2059 
2060 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2061 	    "HAL ABI version 0x%x\n", ah->ah_abi));
2062 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2063 	    "HAL mac version %d.%d, phy version %d.%d\n",
2064 	    ah->ah_macVersion, ah->ah_macRev,
2065 	    ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2066 	if (ah->ah_analog5GhzRev)
2067 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2068 		    "HAL 5ghz radio version %d.%d\n",
2069 		    ah->ah_analog5GhzRev >> 4,
2070 		    ah->ah_analog5GhzRev & 0xf));
2071 	if (ah->ah_analog2GhzRev)
2072 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2073 		    "HAL 2ghz radio version %d.%d\n",
2074 		    ah->ah_analog2GhzRev >> 4,
2075 		    ah->ah_analog2GhzRev & 0xf));
2076 
2077 	/*
2078 	 * Check if the MAC has multi-rate retry support.
2079 	 * We do this by trying to setup a fake extended
2080 	 * descriptor.  MAC's that don't have support will
2081 	 * return false w/o doing anything.  MAC's that do
2082 	 * support it will return true w/o doing anything.
2083 	 */
2084 	asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0);
2085 	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2086 	    "multi rate retry support=%x\n",
2087 	    asc->asc_mrretry));
2088 
2089 	/*
2090 	 * Get the hardware key cache size.
2091 	 */
2092 	asc->asc_keymax = ATH_HAL_KEYCACHESIZE(ah);
2093 	if (asc->asc_keymax > sizeof (asc->asc_keymap) * NBBY) {
2094 		ATH_DEBUG((ATH_DBG_ATTACH, "ath_attach:"
2095 		    " Warning, using only %u entries in %u key cache\n",
2096 		    sizeof (asc->asc_keymap) * NBBY, asc->asc_keymax));
2097 		asc->asc_keymax = sizeof (asc->asc_keymap) * NBBY;
2098 	}
2099 	/*
2100 	 * Reset the key cache since some parts do not
2101 	 * reset the contents on initial power up.
2102 	 */
2103 	for (i = 0; i < asc->asc_keymax; i++)
2104 		ATH_HAL_KEYRESET(ah, i);
2105 
2106 	ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain);
2107 	ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode);
2108 	/*
2109 	 * Collect the channel list using the default country
2110 	 * code and including outdoor channels.  The 802.11 layer
2111 	 * is resposible for filtering this list to a set of
2112 	 * channels that it considers ok to use.
2113 	 */
2114 	asc->asc_have11g = 0;
2115 
2116 	/* enable outdoor use, enable extended channels */
2117 	err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE);
2118 	if (err != 0)
2119 		goto attach_fail3;
2120 
2121 	/*
2122 	 * Setup rate tables for all potential media types.
2123 	 */
2124 	ath_rate_setup(asc, IEEE80211_MODE_11A);
2125 	ath_rate_setup(asc, IEEE80211_MODE_11B);
2126 	ath_rate_setup(asc, IEEE80211_MODE_11G);
2127 	ath_rate_setup(asc, IEEE80211_MODE_TURBO_A);
2128 
2129 	/* Setup here so ath_rate_update is happy */
2130 	ath_setcurmode(asc, IEEE80211_MODE_11A);
2131 
2132 	err = ath_desc_alloc(devinfo, asc);
2133 	if (err != DDI_SUCCESS) {
2134 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2135 		    "failed to allocate descriptors: %d\n", err));
2136 		goto attach_fail3;
2137 	}
2138 
2139 	if ((asc->asc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2140 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2141 		goto attach_fail4;
2142 	}
2143 	/* Setup transmit queues in the HAL */
2144 	if (ath_txq_setup(asc))
2145 		goto attach_fail4;
2146 
2147 	ATH_HAL_GETMAC(ah, ic->ic_macaddr);
2148 
2149 	/*
2150 	 * Initialize pointers to device specific functions which
2151 	 * will be used by the generic layer.
2152 	 */
2153 	/* 11g support is identified when we fetch the channel set */
2154 	if (asc->asc_have11g)
2155 		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2156 		    IEEE80211_C_SHSLOT;		/* short slot time */
2157 	/*
2158 	 * Query the hal to figure out h/w crypto support.
2159 	 */
2160 	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_WEP))
2161 		ic->ic_caps |= IEEE80211_C_WEP;
2162 	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_OCB))
2163 		ic->ic_caps |= IEEE80211_C_AES;
2164 	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_CCM)) {
2165 		ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W CCMP\n"));
2166 		ic->ic_caps |= IEEE80211_C_AES_CCM;
2167 	}
2168 	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CKIP))
2169 		ic->ic_caps |= IEEE80211_C_CKIP;
2170 	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_TKIP)) {
2171 		ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W TKIP\n"));
2172 		ic->ic_caps |= IEEE80211_C_TKIP;
2173 		/*
2174 		 * Check if h/w does the MIC and/or whether the
2175 		 * separate key cache entries are required to
2176 		 * handle both tx+rx MIC keys.
2177 		 */
2178 		if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_MIC)) {
2179 			ATH_DEBUG((ATH_DBG_ATTACH, "Support H/W TKIP MIC\n"));
2180 			ic->ic_caps |= IEEE80211_C_TKIPMIC;
2181 		}
2182 
2183 		/*
2184 		 * If the h/w supports storing tx+rx MIC keys
2185 		 * in one cache slot automatically enable use.
2186 		 */
2187 		if (ATH_HAL_HASTKIPSPLIT(ah) ||
2188 		    !ATH_HAL_SETTKIPSPLIT(ah, AH_FALSE)) {
2189 			asc->asc_splitmic = 1;
2190 		}
2191 	}
2192 	ic->ic_caps |= IEEE80211_C_WPA;	/* Support WPA/WPA2 */
2193 
2194 	asc->asc_hasclrkey = ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CLR);
2195 	/*
2196 	 * Mark key cache slots associated with global keys
2197 	 * as in use.  If we knew TKIP was not to be used we
2198 	 * could leave the +32, +64, and +32+64 slots free.
2199 	 */
2200 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2201 		setbit(asc->asc_keymap, i);
2202 		setbit(asc->asc_keymap, i+64);
2203 		if (asc->asc_splitmic) {
2204 			setbit(asc->asc_keymap, i+32);
2205 			setbit(asc->asc_keymap, i+32+64);
2206 		}
2207 	}
2208 
2209 	ic->ic_phytype = IEEE80211_T_OFDM;
2210 	ic->ic_opmode = IEEE80211_M_STA;
2211 	ic->ic_state = IEEE80211_S_INIT;
2212 	ic->ic_maxrssi = ATH_MAX_RSSI;
2213 	ic->ic_set_shortslot = ath_set_shortslot;
2214 	ic->ic_xmit = ath_xmit;
2215 	ieee80211_attach(ic);
2216 
2217 	/* different instance has different WPA door */
2218 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
2219 	    ddi_driver_name(devinfo),
2220 	    ddi_get_instance(devinfo));
2221 
2222 	/* Override 80211 default routines */
2223 	ic->ic_reset = ath_reset;
2224 	asc->asc_newstate = ic->ic_newstate;
2225 	ic->ic_newstate = ath_newstate;
2226 	ic->ic_watchdog = ath_watchdog;
2227 	ic->ic_node_alloc = ath_node_alloc;
2228 	ic->ic_node_free = ath_node_free;
2229 	ic->ic_crypto.cs_key_alloc = ath_key_alloc;
2230 	ic->ic_crypto.cs_key_delete = ath_key_delete;
2231 	ic->ic_crypto.cs_key_set = ath_key_set;
2232 	ieee80211_media_init(ic);
2233 	/*
2234 	 * initialize default tx key
2235 	 */
2236 	ic->ic_def_txkey = 0;
2237 
2238 	asc->asc_rx_pend = 0;
2239 	ATH_HAL_INTRSET(ah, 0);
2240 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
2241 	    &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc);
2242 	if (err != DDI_SUCCESS) {
2243 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2244 		    "ddi_add_softintr() failed\n"));
2245 		goto attach_fail5;
2246 	}
2247 
2248 	if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock)
2249 	    != DDI_SUCCESS) {
2250 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2251 		    "Can not get iblock cookie for INT\n"));
2252 		goto attach_fail6;
2253 	}
2254 
2255 	if (ddi_add_intr(devinfo, 0, NULL, NULL, ath_intr,
2256 	    (caddr_t)asc) != DDI_SUCCESS) {
2257 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2258 		    "Can not set intr for ATH driver\n"));
2259 		goto attach_fail6;
2260 	}
2261 
2262 	/*
2263 	 * Provide initial settings for the WiFi plugin; whenever this
2264 	 * information changes, we need to call mac_plugindata_update()
2265 	 */
2266 	wd.wd_opmode = ic->ic_opmode;
2267 	wd.wd_secalloc = WIFI_SEC_NONE;
2268 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
2269 
2270 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2271 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2272 		    "MAC version mismatch\n"));
2273 		goto attach_fail7;
2274 	}
2275 
2276 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
2277 	macp->m_driver		= asc;
2278 	macp->m_dip		= devinfo;
2279 	macp->m_src_addr	= ic->ic_macaddr;
2280 	macp->m_callbacks	= &ath_m_callbacks;
2281 	macp->m_min_sdu		= 0;
2282 	macp->m_max_sdu		= IEEE80211_MTU;
2283 	macp->m_pdata		= &wd;
2284 	macp->m_pdata_size	= sizeof (wd);
2285 
2286 	err = mac_register(macp, &ic->ic_mach);
2287 	mac_free(macp);
2288 	if (err != 0) {
2289 		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2290 		    "mac_register err %x\n", err));
2291 		goto attach_fail7;
2292 	}
2293 
2294 	/* Create minor node of type DDI_NT_NET_WIFI */
2295 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
2296 	    ATH_NODENAME, instance);
2297 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
2298 	    instance + 1, DDI_NT_NET_WIFI, 0);
2299 	if (err != DDI_SUCCESS)
2300 		ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): "
2301 		    "Create minor node failed - %d\n", err));
2302 
2303 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
2304 	asc->asc_invalid = 1;
2305 	asc->asc_isrunning = 0;
2306 	asc->asc_promisc = B_FALSE;
2307 	bzero(asc->asc_mcast_refs, sizeof (asc->asc_mcast_refs));
2308 	bzero(asc->asc_mcast_hash, sizeof (asc->asc_mcast_hash));
2309 	return (DDI_SUCCESS);
2310 attach_fail7:
2311 	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2312 attach_fail6:
2313 	ddi_remove_softintr(asc->asc_softint_id);
2314 attach_fail5:
2315 	(void) ieee80211_detach(ic);
2316 attach_fail4:
2317 	ath_desc_free(asc);
2318 	if (asc->asc_tq)
2319 		ddi_taskq_destroy(asc->asc_tq);
2320 attach_fail3:
2321 	ah->ah_detach(asc->asc_ah);
2322 attach_fail2:
2323 	ddi_regs_map_free(&asc->asc_io_handle);
2324 attach_fail1:
2325 	pci_config_teardown(&asc->asc_cfg_handle);
2326 attach_fail0:
2327 	asc->asc_invalid = 1;
2328 	mutex_destroy(&asc->asc_txbuflock);
2329 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2330 		if (ATH_TXQ_SETUP(asc, i)) {
2331 			struct ath_txq *txq = &asc->asc_txq[i];
2332 			mutex_destroy(&txq->axq_lock);
2333 		}
2334 	}
2335 	mutex_destroy(&asc->asc_rxbuflock);
2336 	mutex_destroy(&asc->asc_genlock);
2337 	mutex_destroy(&asc->asc_resched_lock);
2338 	ddi_soft_state_free(ath_soft_state_p, instance);
2339 
2340 	return (DDI_FAILURE);
2341 }
2342 
2343 /*
2344  * Suspend transmit/receive for powerdown
2345  */
2346 static int
2347 ath_suspend(ath_t *asc)
2348 {
2349 	ATH_LOCK(asc);
2350 	ath_stop_locked(asc);
2351 	ATH_UNLOCK(asc);
2352 	ATH_DEBUG((ATH_DBG_SUSPEND, "ath: suspended.\n"));
2353 
2354 	return (DDI_SUCCESS);
2355 }
2356 
2357 static int32_t
2358 ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2359 {
2360 	ath_t *asc;
2361 
2362 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2363 	ASSERT(asc != NULL);
2364 
2365 	switch (cmd) {
2366 	case DDI_DETACH:
2367 		break;
2368 
2369 	case DDI_SUSPEND:
2370 		return (ath_suspend(asc));
2371 
2372 	default:
2373 		return (DDI_FAILURE);
2374 	}
2375 
2376 	if (mac_disable(asc->asc_isc.ic_mach) != 0)
2377 		return (DDI_FAILURE);
2378 
2379 	ath_stop_scantimer(asc);
2380 
2381 	/* disable interrupts */
2382 	ATH_HAL_INTRSET(asc->asc_ah, 0);
2383 
2384 	/*
2385 	 * Unregister from the MAC layer subsystem
2386 	 */
2387 	(void) mac_unregister(asc->asc_isc.ic_mach);
2388 
2389 	/* free intterrupt resources */
2390 	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2391 	ddi_remove_softintr(asc->asc_softint_id);
2392 
2393 	/*
2394 	 * NB: the order of these is important:
2395 	 * o call the 802.11 layer before detaching the hal to
2396 	 *   insure callbacks into the driver to delete global
2397 	 *   key cache entries can be handled
2398 	 * o reclaim the tx queue data structures after calling
2399 	 *   the 802.11 layer as we'll get called back to reclaim
2400 	 *   node state and potentially want to use them
2401 	 * o to cleanup the tx queues the hal is called, so detach
2402 	 *   it last
2403 	 */
2404 	ieee80211_detach(&asc->asc_isc);
2405 	ath_desc_free(asc);
2406 	ddi_taskq_destroy(asc->asc_tq);
2407 	ath_txq_cleanup(asc);
2408 	asc->asc_ah->ah_detach(asc->asc_ah);
2409 
2410 	/* free io handle */
2411 	ddi_regs_map_free(&asc->asc_io_handle);
2412 	pci_config_teardown(&asc->asc_cfg_handle);
2413 
2414 	/* destroy locks */
2415 	mutex_destroy(&asc->asc_rxbuflock);
2416 	mutex_destroy(&asc->asc_genlock);
2417 	mutex_destroy(&asc->asc_resched_lock);
2418 
2419 	ddi_remove_minor_node(devinfo, NULL);
2420 	ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2421 
2422 	return (DDI_SUCCESS);
2423 }
2424 
2425 /*
2426  * quiesce(9E) entry point.
2427  *
2428  * This function is called when the system is single-threaded at high
2429  * PIL with preemption disabled. Therefore, this function must not be
2430  * blocked.
2431  *
2432  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2433  * DDI_FAILURE indicates an error condition and should almost never happen.
2434  */
2435 static int32_t
2436 ath_quiesce(dev_info_t *devinfo)
2437 {
2438 	ath_t 		*asc;
2439 	struct ath_hal	*ah;
2440 	int		i;
2441 
2442 	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2443 
2444 	if (asc == NULL || (ah = asc->asc_ah) == NULL)
2445 		return (DDI_FAILURE);
2446 
2447 	/*
2448 	 * Disable interrupts
2449 	 */
2450 	ATH_HAL_INTRSET(ah, 0);
2451 
2452 	/*
2453 	 * Disable TX HW
2454 	 */
2455 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2456 		if (ATH_TXQ_SETUP(asc, i)) {
2457 			ATH_HAL_STOPTXDMA(ah, asc->asc_txq[i].axq_qnum);
2458 		}
2459 	}
2460 
2461 	/*
2462 	 * Disable RX HW
2463 	 */
2464 	ATH_HAL_STOPPCURECV(ah);
2465 	ATH_HAL_SETRXFILTER(ah, 0);
2466 	ATH_HAL_STOPDMARECV(ah);
2467 	drv_usecwait(3000);
2468 
2469 	/*
2470 	 * Power down HW
2471 	 */
2472 	ATH_HAL_PHYDISABLE(ah);
2473 
2474 	return (DDI_SUCCESS);
2475 }
2476 
2477 DDI_DEFINE_STREAM_OPS(ath_dev_ops, nulldev, nulldev, ath_attach, ath_detach,
2478     nodev, NULL, D_MP, NULL, ath_quiesce);
2479 
2480 static struct modldrv ath_modldrv = {
2481 	&mod_driverops,		/* Type of module.  This one is a driver */
2482 	"ath driver 1.4/HAL 0.10.5.6",		/* short description */
2483 	&ath_dev_ops		/* driver specific ops */
2484 };
2485 
2486 static struct modlinkage modlinkage = {
2487 	MODREV_1, (void *)&ath_modldrv, NULL
2488 };
2489 
2490 
2491 int
2492 _info(struct modinfo *modinfop)
2493 {
2494 	return (mod_info(&modlinkage, modinfop));
2495 }
2496 
2497 int
2498 _init(void)
2499 {
2500 	int status;
2501 
2502 	status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1);
2503 	if (status != 0)
2504 		return (status);
2505 
2506 	mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL);
2507 	ath_halfix_init();
2508 	mac_init_ops(&ath_dev_ops, "ath");
2509 	status = mod_install(&modlinkage);
2510 	if (status != 0) {
2511 		mac_fini_ops(&ath_dev_ops);
2512 		ath_halfix_finit();
2513 		mutex_destroy(&ath_loglock);
2514 		ddi_soft_state_fini(&ath_soft_state_p);
2515 	}
2516 
2517 	return (status);
2518 }
2519 
2520 int
2521 _fini(void)
2522 {
2523 	int status;
2524 
2525 	status = mod_remove(&modlinkage);
2526 	if (status == 0) {
2527 		mac_fini_ops(&ath_dev_ops);
2528 		ath_halfix_finit();
2529 		mutex_destroy(&ath_loglock);
2530 		ddi_soft_state_fini(&ath_soft_state_p);
2531 	}
2532 	return (status);
2533 }
2534