xref: /titanic_51/usr/src/uts/common/io/arn/arn_main.c (revision f7b98820db2e767eb246fc6aef8f740f838f03c6)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/sysmacros.h>
23 #include <sys/param.h>
24 #include <sys/types.h>
25 #include <sys/signal.h>
26 #include <sys/stream.h>
27 #include <sys/termio.h>
28 #include <sys/errno.h>
29 #include <sys/file.h>
30 #include <sys/cmn_err.h>
31 #include <sys/stropts.h>
32 #include <sys/strsubr.h>
33 #include <sys/strtty.h>
34 #include <sys/kbio.h>
35 #include <sys/cred.h>
36 #include <sys/stat.h>
37 #include <sys/consdev.h>
38 #include <sys/kmem.h>
39 #include <sys/modctl.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/pci.h>
43 #include <sys/errno.h>
44 #include <sys/mac_provider.h>
45 #include <sys/dlpi.h>
46 #include <sys/ethernet.h>
47 #include <sys/list.h>
48 #include <sys/byteorder.h>
49 #include <sys/strsun.h>
50 #include <sys/policy.h>
51 #include <inet/common.h>
52 #include <inet/nd.h>
53 #include <inet/mi.h>
54 #include <inet/wifi_ioctl.h>
55 #include <sys/mac_wifi.h>
56 #include <sys/net80211.h>
57 #include <sys/net80211_proto.h>
58 #include <sys/net80211_ht.h>
59 
60 
61 #include "arn_ath9k.h"
62 #include "arn_core.h"
63 #include "arn_reg.h"
64 #include "arn_hw.h"
65 
66 #define	ARN_MAX_RSSI	45	/* max rssi */
67 
68 /*
69  * Default 11n reates supported by this station.
70  */
71 extern struct ieee80211_htrateset ieee80211_rateset_11n;
72 
73 /*
74  * PIO access attributes for registers
75  */
76 static ddi_device_acc_attr_t arn_reg_accattr = {
77 	DDI_DEVICE_ATTR_V0,
78 	DDI_STRUCTURE_LE_ACC,
79 	DDI_STRICTORDER_ACC,
80 	DDI_DEFAULT_ACC
81 };
82 
83 /*
84  * DMA access attributes for descriptors: NOT to be byte swapped.
85  */
86 static ddi_device_acc_attr_t arn_desc_accattr = {
87 	DDI_DEVICE_ATTR_V0,
88 	DDI_STRUCTURE_LE_ACC,
89 	DDI_STRICTORDER_ACC,
90 	DDI_DEFAULT_ACC
91 };
92 
93 /*
94  * Describes the chip's DMA engine
95  */
96 static ddi_dma_attr_t arn_dma_attr = {
97 	DMA_ATTR_V0,	/* version number */
98 	0,				/* low address */
99 	0xffffffffU,	/* high address */
100 	0x3ffffU,		/* counter register max */
101 	1,				/* alignment */
102 	0xFFF,			/* burst sizes */
103 	1,				/* minimum transfer size */
104 	0x3ffffU,		/* max transfer size */
105 	0xffffffffU,	/* address register max */
106 	1,				/* no scatter-gather */
107 	1,				/* granularity of device */
108 	0,				/* DMA flags */
109 };
110 
111 static ddi_dma_attr_t arn_desc_dma_attr = {
112 	DMA_ATTR_V0,	/* version number */
113 	0,				/* low address */
114 	0xffffffffU,	/* high address */
115 	0xffffffffU,	/* counter register max */
116 	0x1000,			/* alignment */
117 	0xFFF,			/* burst sizes */
118 	1,				/* minimum transfer size */
119 	0xffffffffU,	/* max transfer size */
120 	0xffffffffU,	/* address register max */
121 	1,				/* no scatter-gather */
122 	1,				/* granularity of device */
123 	0,				/* DMA flags */
124 };
125 
126 #define	ATH_DEF_CACHE_BYTES	32 /* default cache line size */
127 
128 static kmutex_t arn_loglock;
129 static void *arn_soft_state_p = NULL;
130 static int arn_dwelltime = 200; /* scan interval */
131 
132 static int	arn_m_stat(void *,  uint_t, uint64_t *);
133 static int	arn_m_start(void *);
134 static void	arn_m_stop(void *);
135 static int	arn_m_promisc(void *, boolean_t);
136 static int	arn_m_multicst(void *, boolean_t, const uint8_t *);
137 static int	arn_m_unicst(void *, const uint8_t *);
138 static mblk_t	*arn_m_tx(void *, mblk_t *);
139 static void	arn_m_ioctl(void *, queue_t *, mblk_t *);
140 static int	arn_m_setprop(void *, const char *, mac_prop_id_t,
141     uint_t, const void *);
142 static int	arn_m_getprop(void *, const char *, mac_prop_id_t,
143     uint_t, void *);
144 static void	arn_m_propinfo(void *, const char *, mac_prop_id_t,
145     mac_prop_info_handle_t);
146 
147 /* MAC Callcack Functions */
148 static mac_callbacks_t arn_m_callbacks = {
149 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
150 	arn_m_stat,
151 	arn_m_start,
152 	arn_m_stop,
153 	arn_m_promisc,
154 	arn_m_multicst,
155 	arn_m_unicst,
156 	arn_m_tx,
157 	NULL,
158 	arn_m_ioctl,
159 	NULL,
160 	NULL,
161 	NULL,
162 	arn_m_setprop,
163 	arn_m_getprop,
164 	arn_m_propinfo
165 };
166 
167 /*
168  * ARN_DBG_HW
169  * ARN_DBG_REG_IO
170  * ARN_DBG_QUEUE
171  * ARN_DBG_EEPROM
172  * ARN_DBG_XMIT
173  * ARN_DBG_RECV
174  * ARN_DBG_CALIBRATE
175  * ARN_DBG_CHANNEL
176  * ARN_DBG_INTERRUPT
177  * ARN_DBG_REGULATORY
178  * ARN_DBG_ANI
179  * ARN_DBG_POWER_MGMT
180  * ARN_DBG_KEYCACHE
181  * ARN_DBG_BEACON
182  * ARN_DBG_RATE
183  * ARN_DBG_INIT
184  * ARN_DBG_ATTACH
185  * ARN_DBG_DEATCH
186  * ARN_DBG_AGGR
187  * ARN_DBG_RESET
188  * ARN_DBG_FATAL
189  * ARN_DBG_ANY
190  * ARN_DBG_ALL
191  */
192 uint32_t arn_dbg_mask = 0;
193 
194 /*
195  * Exception/warning cases not leading to panic.
196  */
197 void
198 arn_problem(const int8_t *fmt, ...)
199 {
200 	va_list args;
201 
202 	mutex_enter(&arn_loglock);
203 
204 	va_start(args, fmt);
205 	vcmn_err(CE_WARN, fmt, args);
206 	va_end(args);
207 
208 	mutex_exit(&arn_loglock);
209 }
210 
211 /*
212  * Normal log information independent of debug.
213  */
214 void
215 arn_log(const int8_t *fmt, ...)
216 {
217 	va_list args;
218 
219 	mutex_enter(&arn_loglock);
220 
221 	va_start(args, fmt);
222 	vcmn_err(CE_CONT, fmt, args);
223 	va_end(args);
224 
225 	mutex_exit(&arn_loglock);
226 }
227 
228 void
229 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
230 {
231 	va_list args;
232 
233 	if (dbg_flags & arn_dbg_mask) {
234 		mutex_enter(&arn_loglock);
235 		va_start(args, fmt);
236 		vcmn_err(CE_CONT, fmt, args);
237 		va_end(args);
238 		mutex_exit(&arn_loglock);
239 	}
240 }
241 
242 /*
243  * Read and write, they both share the same lock. We do this to serialize
244  * reads and writes on Atheros 802.11n PCI devices only. This is required
245  * as the FIFO on these devices can only accept sanely 2 requests. After
246  * that the device goes bananas. Serializing the reads/writes prevents this
247  * from happening.
248  */
249 void
250 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
251 {
252 	struct arn_softc *sc = ah->ah_sc;
253 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
254 		mutex_enter(&sc->sc_serial_rw);
255 		ddi_put32(sc->sc_io_handle,
256 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
257 		mutex_exit(&sc->sc_serial_rw);
258 	} else {
259 		ddi_put32(sc->sc_io_handle,
260 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
261 	}
262 }
263 
264 unsigned int
265 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
266 {
267 	uint32_t val;
268 	struct arn_softc *sc = ah->ah_sc;
269 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
270 		mutex_enter(&sc->sc_serial_rw);
271 		val = ddi_get32(sc->sc_io_handle,
272 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
273 		mutex_exit(&sc->sc_serial_rw);
274 	} else {
275 		val = ddi_get32(sc->sc_io_handle,
276 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
277 	}
278 
279 	return (val);
280 }
281 
282 /*
283  * Allocate an area of memory and a DMA handle for accessing it
284  */
285 static int
286 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
287     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
288     uint_t bind_flags, dma_area_t *dma_p)
289 {
290 	int err;
291 
292 	/*
293 	 * Allocate handle
294 	 */
295 	err = ddi_dma_alloc_handle(devinfo, dma_attr,
296 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
297 	if (err != DDI_SUCCESS)
298 		return (DDI_FAILURE);
299 
300 	/*
301 	 * Allocate memory
302 	 */
303 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
304 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
305 	    &dma_p->alength, &dma_p->acc_hdl);
306 	if (err != DDI_SUCCESS)
307 		return (DDI_FAILURE);
308 
309 	/*
310 	 * Bind the two together
311 	 */
312 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
313 	    dma_p->mem_va, dma_p->alength, bind_flags,
314 	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
315 	if (err != DDI_DMA_MAPPED)
316 		return (DDI_FAILURE);
317 
318 	dma_p->nslots = ~0U;
319 	dma_p->size = ~0U;
320 	dma_p->token = ~0U;
321 	dma_p->offset = 0;
322 	return (DDI_SUCCESS);
323 }
324 
325 /*
326  * Free one allocated area of DMAable memory
327  */
328 static void
329 arn_free_dma_mem(dma_area_t *dma_p)
330 {
331 	if (dma_p->dma_hdl != NULL) {
332 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
333 		if (dma_p->acc_hdl != NULL) {
334 			ddi_dma_mem_free(&dma_p->acc_hdl);
335 			dma_p->acc_hdl = NULL;
336 		}
337 		ddi_dma_free_handle(&dma_p->dma_hdl);
338 		dma_p->ncookies = 0;
339 		dma_p->dma_hdl = NULL;
340 	}
341 }
342 
343 /*
344  * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
345  * each buffer.
346  */
347 static int
348 arn_buflist_setup(dev_info_t *devinfo,
349     struct arn_softc *sc,
350     list_t *bflist,
351     struct ath_buf **pbf,
352     struct ath_desc **pds,
353     int nbuf,
354     uint_t dmabflags,
355     uint32_t buflen)
356 {
357 	int i, err;
358 	struct ath_buf *bf = *pbf;
359 	struct ath_desc *ds = *pds;
360 
361 	list_create(bflist, sizeof (struct ath_buf),
362 	    offsetof(struct ath_buf, bf_node));
363 	for (i = 0; i < nbuf; i++, bf++, ds++) {
364 		bf->bf_desc = ds;
365 		bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
366 		    ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
367 		list_insert_tail(bflist, bf);
368 
369 		/* alloc DMA memory */
370 		err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
371 		    buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
372 		    dmabflags, &bf->bf_dma);
373 		if (err != DDI_SUCCESS)
374 			return (err);
375 	}
376 	*pbf = bf;
377 	*pds = ds;
378 
379 	return (DDI_SUCCESS);
380 }
381 
382 /*
383  * Destroy tx, rx or beacon buffer list. Free DMA memory.
384  */
385 static void
386 arn_buflist_cleanup(list_t *buflist)
387 {
388 	struct ath_buf *bf;
389 
390 	if (!buflist)
391 		return;
392 
393 	bf = list_head(buflist);
394 	while (bf != NULL) {
395 		if (bf->bf_m != NULL) {
396 			freemsg(bf->bf_m);
397 			bf->bf_m = NULL;
398 		}
399 		/* Free DMA buffer */
400 		arn_free_dma_mem(&bf->bf_dma);
401 		if (bf->bf_in != NULL) {
402 			ieee80211_free_node(bf->bf_in);
403 			bf->bf_in = NULL;
404 		}
405 		list_remove(buflist, bf);
406 		bf = list_head(buflist);
407 	}
408 	list_destroy(buflist);
409 }
410 
411 static void
412 arn_desc_free(struct arn_softc *sc)
413 {
414 	arn_buflist_cleanup(&sc->sc_txbuf_list);
415 	arn_buflist_cleanup(&sc->sc_rxbuf_list);
416 #ifdef ARN_IBSS
417 	arn_buflist_cleanup(&sc->sc_bcbuf_list);
418 #endif
419 
420 	/* Free descriptor DMA buffer */
421 	arn_free_dma_mem(&sc->sc_desc_dma);
422 
423 	kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
424 	sc->sc_vbufptr = NULL;
425 }
426 
427 static int
428 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
429 {
430 	int err;
431 	size_t size;
432 	struct ath_desc *ds;
433 	struct ath_buf *bf;
434 
435 #ifdef ARN_IBSS
436 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
437 #else
438 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
439 #endif
440 
441 	err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
442 	    &arn_desc_accattr, DDI_DMA_CONSISTENT,
443 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
444 
445 	/* virtual address of the first descriptor */
446 	sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
447 
448 	ds = sc->sc_desc;
449 	ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
450 	    "%p (%d) -> %p\n",
451 	    sc->sc_desc, sc->sc_desc_dma.alength,
452 	    sc->sc_desc_dma.cookie.dmac_address));
453 
454 	/* allocate data structures to describe TX/RX DMA buffers */
455 #ifdef ARN_IBSS
456 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
457 	    ATH_BCBUF);
458 #else
459 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
460 #endif
461 	bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
462 	sc->sc_vbufptr = bf;
463 
464 	/* DMA buffer size for each TX/RX packet */
465 #ifdef ARN_TX_AGGREGRATION
466 	sc->tx_dmabuf_size =
467 	    roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
468 	    min(sc->sc_cachelsz, (uint16_t)64));
469 #else
470 	sc->tx_dmabuf_size =
471 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
472 #endif
473 	sc->rx_dmabuf_size =
474 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
475 
476 	/* create RX buffer list */
477 	err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
478 	    ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
479 	if (err != DDI_SUCCESS) {
480 		arn_desc_free(sc);
481 		return (err);
482 	}
483 
484 	/* create TX buffer list */
485 	err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
486 	    ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
487 	if (err != DDI_SUCCESS) {
488 		arn_desc_free(sc);
489 		return (err);
490 	}
491 
492 	/* create beacon buffer list */
493 #ifdef ARN_IBSS
494 	err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
495 	    ATH_BCBUF, DDI_DMA_STREAMING);
496 	if (err != DDI_SUCCESS) {
497 		arn_desc_free(sc);
498 		return (err);
499 	}
500 #endif
501 
502 	return (DDI_SUCCESS);
503 }
504 
505 static struct ath_rate_table *
506 /* LINTED E_STATIC_UNUSED */
507 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
508 {
509 	struct ath_rate_table *rate_table = NULL;
510 
511 	switch (mode) {
512 	case IEEE80211_MODE_11A:
513 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
514 		break;
515 	case IEEE80211_MODE_11B:
516 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
517 		break;
518 	case IEEE80211_MODE_11G:
519 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
520 		break;
521 #ifdef ARB_11N
522 	case IEEE80211_MODE_11NA_HT20:
523 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
524 		break;
525 	case IEEE80211_MODE_11NG_HT20:
526 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
527 		break;
528 	case IEEE80211_MODE_11NA_HT40PLUS:
529 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
530 		break;
531 	case IEEE80211_MODE_11NA_HT40MINUS:
532 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
533 		break;
534 	case IEEE80211_MODE_11NG_HT40PLUS:
535 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
536 		break;
537 	case IEEE80211_MODE_11NG_HT40MINUS:
538 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
539 		break;
540 #endif
541 	default:
542 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
543 		    "invalid mode %u\n", mode));
544 		return (NULL);
545 	}
546 
547 	return (rate_table);
548 
549 }
550 
551 static void
552 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
553 {
554 	struct ath_rate_table *rt;
555 	int i;
556 
557 	for (i = 0; i < sizeof (sc->asc_rixmap); i++)
558 		sc->asc_rixmap[i] = 0xff;
559 
560 	rt = sc->hw_rate_table[mode];
561 	ASSERT(rt != NULL);
562 
563 	for (i = 0; i < rt->rate_cnt; i++)
564 		sc->asc_rixmap[rt->info[i].dot11rate &
565 		    IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
566 
567 	sc->sc_currates = rt;
568 	sc->sc_curmode = mode;
569 
570 	/*
571 	 * All protection frames are transmited at 2Mb/s for
572 	 * 11g, otherwise at 1Mb/s.
573 	 * XXX select protection rate index from rate table.
574 	 */
575 	sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
576 }
577 
578 static enum wireless_mode
579 arn_chan2mode(struct ath9k_channel *chan)
580 {
581 	if (chan->chanmode == CHANNEL_A)
582 		return (ATH9K_MODE_11A);
583 	else if (chan->chanmode == CHANNEL_G)
584 		return (ATH9K_MODE_11G);
585 	else if (chan->chanmode == CHANNEL_B)
586 		return (ATH9K_MODE_11B);
587 	else if (chan->chanmode == CHANNEL_A_HT20)
588 		return (ATH9K_MODE_11NA_HT20);
589 	else if (chan->chanmode == CHANNEL_G_HT20)
590 		return (ATH9K_MODE_11NG_HT20);
591 	else if (chan->chanmode == CHANNEL_A_HT40PLUS)
592 		return (ATH9K_MODE_11NA_HT40PLUS);
593 	else if (chan->chanmode == CHANNEL_A_HT40MINUS)
594 		return (ATH9K_MODE_11NA_HT40MINUS);
595 	else if (chan->chanmode == CHANNEL_G_HT40PLUS)
596 		return (ATH9K_MODE_11NG_HT40PLUS);
597 	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
598 		return (ATH9K_MODE_11NG_HT40MINUS);
599 
600 	return (ATH9K_MODE_11B);
601 }
602 
603 static void
604 arn_update_txpow(struct arn_softc *sc)
605 {
606 	struct ath_hal 	*ah = sc->sc_ah;
607 	uint32_t txpow;
608 
609 	if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
610 		(void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
611 		/* read back in case value is clamped */
612 		(void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
613 		sc->sc_curtxpow = (uint32_t)txpow;
614 	}
615 }
616 
617 uint8_t
618 parse_mpdudensity(uint8_t mpdudensity)
619 {
620 	/*
621 	 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
622 	 *   0 for no restriction
623 	 *   1 for 1/4 us
624 	 *   2 for 1/2 us
625 	 *   3 for 1 us
626 	 *   4 for 2 us
627 	 *   5 for 4 us
628 	 *   6 for 8 us
629 	 *   7 for 16 us
630 	 */
631 	switch (mpdudensity) {
632 	case 0:
633 		return (0);
634 	case 1:
635 	case 2:
636 	case 3:
637 		/*
638 		 * Our lower layer calculations limit our
639 		 * precision to 1 microsecond
640 		 */
641 		return (1);
642 	case 4:
643 		return (2);
644 	case 5:
645 		return (4);
646 	case 6:
647 		return (8);
648 	case 7:
649 		return (16);
650 	default:
651 		return (0);
652 	}
653 }
654 
655 static void
656 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
657 {
658 	int i, maxrates;
659 	struct ath_rate_table *rate_table = NULL;
660 	struct ieee80211_rateset *rateset;
661 	ieee80211com_t *ic = (ieee80211com_t *)sc;
662 
663 	/* rate_table = arn_get_ratetable(sc, mode); */
664 	switch (mode) {
665 	case IEEE80211_MODE_11A:
666 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
667 		break;
668 	case IEEE80211_MODE_11B:
669 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
670 		break;
671 	case IEEE80211_MODE_11G:
672 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
673 		break;
674 #ifdef ARN_11N
675 	case IEEE80211_MODE_11NA_HT20:
676 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
677 		break;
678 	case IEEE80211_MODE_11NG_HT20:
679 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
680 		break;
681 	case IEEE80211_MODE_11NA_HT40PLUS:
682 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
683 		break;
684 	case IEEE80211_MODE_11NA_HT40MINUS:
685 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
686 		break;
687 	case IEEE80211_MODE_11NG_HT40PLUS:
688 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
689 		break;
690 	case IEEE80211_MODE_11NG_HT40MINUS:
691 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
692 		break;
693 #endif
694 	default:
695 		ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
696 		    "invalid mode %u\n", mode));
697 		break;
698 	}
699 	if (rate_table == NULL)
700 		return;
701 	if (rate_table->rate_cnt > ATH_RATE_MAX) {
702 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
703 		    "rate table too small (%u > %u)\n",
704 		    rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
705 		maxrates = ATH_RATE_MAX;
706 	} else
707 		maxrates = rate_table->rate_cnt;
708 
709 	ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
710 	    "maxrates is %d\n", maxrates));
711 
712 	rateset = &ic->ic_sup_rates[mode];
713 	for (i = 0; i < maxrates; i++) {
714 		rateset->ir_rates[i] = rate_table->info[i].dot11rate;
715 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
716 		    "%d\n", rate_table->info[i].dot11rate));
717 	}
718 	rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
719 }
720 
721 static int
722 arn_setup_channels(struct arn_softc *sc)
723 {
724 	struct ath_hal *ah = sc->sc_ah;
725 	ieee80211com_t *ic = (ieee80211com_t *)sc;
726 	int nchan, i, index;
727 	uint8_t regclassids[ATH_REGCLASSIDS_MAX];
728 	uint32_t nregclass = 0;
729 	struct ath9k_channel *c;
730 
731 	/* Fill in ah->ah_channels */
732 	if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
733 	    regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
734 	    B_FALSE, 1)) {
735 		uint32_t rd = ah->ah_currentRD;
736 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
737 		    "unable to collect channel list; "
738 		    "regdomain likely %u country code %u\n",
739 		    rd, CTRY_DEFAULT));
740 		return (EINVAL);
741 	}
742 
743 	ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
744 	    "number of channel is %d\n", nchan));
745 
746 	for (i = 0; i < nchan; i++) {
747 		c = &ah->ah_channels[i];
748 		uint32_t flags;
749 		index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
750 
751 		if (index > IEEE80211_CHAN_MAX) {
752 			ARN_DBG((ARN_DBG_CHANNEL,
753 			    "arn: arn_setup_channels(): "
754 			    "bad hal channel %d (%u/%x) ignored\n",
755 			    index, c->channel, c->channelFlags));
756 			continue;
757 		}
758 		/* NB: flags are known to be compatible */
759 		if (index < 0) {
760 			/*
761 			 * can't handle frequency <2400MHz (negative
762 			 * channels) right now
763 			 */
764 			ARN_DBG((ARN_DBG_CHANNEL,
765 			    "arn: arn_setup_channels(): "
766 			    "hal channel %d (%u/%x) "
767 			    "cannot be handled, ignored\n",
768 			    index, c->channel, c->channelFlags));
769 			continue;
770 		}
771 
772 		/*
773 		 * Calculate net80211 flags; most are compatible
774 		 * but some need massaging.  Note the static turbo
775 		 * conversion can be removed once net80211 is updated
776 		 * to understand static vs. dynamic turbo.
777 		 */
778 
779 		flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
780 
781 		if (ic->ic_sup_channels[index].ich_freq == 0) {
782 			ic->ic_sup_channels[index].ich_freq = c->channel;
783 			ic->ic_sup_channels[index].ich_flags = flags;
784 		} else {
785 			/* channels overlap; e.g. 11g and 11b */
786 			ic->ic_sup_channels[index].ich_flags |= flags;
787 		}
788 		if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
789 			sc->sc_have11g = 1;
790 			ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
791 			    IEEE80211_C_SHSLOT;	/* short slot time */
792 		}
793 	}
794 
795 	return (0);
796 }
797 
798 uint32_t
799 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
800 {
801 	uint32_t channel_mode;
802 	switch (ieee80211_chan2mode(isc, chan)) {
803 	case IEEE80211_MODE_11NA:
804 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
805 			channel_mode = CHANNEL_A_HT40PLUS;
806 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
807 			channel_mode = CHANNEL_A_HT40MINUS;
808 		else
809 			channel_mode = CHANNEL_A_HT20;
810 		break;
811 	case IEEE80211_MODE_11NG:
812 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
813 			channel_mode = CHANNEL_G_HT40PLUS;
814 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
815 			channel_mode = CHANNEL_G_HT40MINUS;
816 		else
817 			channel_mode = CHANNEL_G_HT20;
818 		break;
819 	case IEEE80211_MODE_TURBO_G:
820 	case IEEE80211_MODE_STURBO_A:
821 	case IEEE80211_MODE_TURBO_A:
822 		channel_mode = 0;
823 		break;
824 	case IEEE80211_MODE_11A:
825 		channel_mode = CHANNEL_A;
826 		break;
827 	case IEEE80211_MODE_11G:
828 		channel_mode = CHANNEL_B;
829 		break;
830 	case IEEE80211_MODE_11B:
831 		channel_mode = CHANNEL_G;
832 		break;
833 	case IEEE80211_MODE_FH:
834 		channel_mode = 0;
835 		break;
836 	default:
837 		break;
838 	}
839 
840 	return (channel_mode);
841 }
842 
843 /*
844  * Update internal state after a channel change.
845  */
846 void
847 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
848 {
849 	struct ieee80211com *ic = &sc->sc_isc;
850 	enum ieee80211_phymode mode;
851 	enum wireless_mode wlmode;
852 
853 	/*
854 	 * Change channels and update the h/w rate map
855 	 * if we're switching; e.g. 11a to 11b/g.
856 	 */
857 	mode = ieee80211_chan2mode(ic, chan);
858 	switch (mode) {
859 	case IEEE80211_MODE_11A:
860 		wlmode = ATH9K_MODE_11A;
861 		break;
862 	case IEEE80211_MODE_11B:
863 		wlmode = ATH9K_MODE_11B;
864 		break;
865 	case IEEE80211_MODE_11G:
866 		wlmode = ATH9K_MODE_11B;
867 		break;
868 	default:
869 		break;
870 	}
871 	if (wlmode != sc->sc_curmode)
872 		arn_setcurmode(sc, wlmode);
873 
874 }
875 
876 /*
877  * Set/change channels.  If the channel is really being changed, it's done
878  * by reseting the chip.  To accomplish this we must first cleanup any pending
879  * DMA, then restart stuff.
880  */
881 static int
882 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
883 {
884 	struct ath_hal *ah = sc->sc_ah;
885 	ieee80211com_t *ic = &sc->sc_isc;
886 	boolean_t fastcc = B_TRUE;
887 	boolean_t  stopped;
888 	struct ieee80211_channel chan;
889 	enum wireless_mode curmode;
890 
891 	if (sc->sc_flags & SC_OP_INVALID)
892 		return (EIO);
893 
894 	if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
895 	    hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
896 	    (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
897 	    (sc->sc_flags & SC_OP_FULL_RESET)) {
898 		int status;
899 
900 		/*
901 		 * This is only performed if the channel settings have
902 		 * actually changed.
903 		 *
904 		 * To switch channels clear any pending DMA operations;
905 		 * wait long enough for the RX fifo to drain, reset the
906 		 * hardware at the new frequency, and then re-enable
907 		 * the relevant bits of the h/w.
908 		 */
909 		(void) ath9k_hw_set_interrupts(ah, 0);	/* disable interrupts */
910 		arn_draintxq(sc, B_FALSE);	/* clear pending tx frames */
911 		stopped = arn_stoprecv(sc);	/* turn off frame recv */
912 
913 		/*
914 		 * XXX: do not flush receive queue here. We don't want
915 		 * to flush data frames already in queue because of
916 		 * changing channel.
917 		 */
918 
919 		if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
920 			fastcc = B_FALSE;
921 
922 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
923 		    "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
924 		    sc->sc_ah->ah_curchan->channel,
925 		    hchan->channel, hchan->channelFlags, sc->tx_chan_width));
926 
927 		if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
928 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
929 		    sc->sc_ht_extprotspacing, fastcc, &status)) {
930 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
931 			    "unable to reset channel %u (%uMhz) "
932 			    "flags 0x%x hal status %u\n",
933 			    ath9k_hw_mhz2ieee(ah, hchan->channel,
934 			    hchan->channelFlags),
935 			    hchan->channel, hchan->channelFlags, status));
936 			return (EIO);
937 		}
938 
939 		sc->sc_curchan = *hchan;
940 
941 		sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
942 		sc->sc_flags &= ~SC_OP_FULL_RESET;
943 
944 		if (arn_startrecv(sc) != 0) {
945 			arn_problem("arn: arn_set_channel(): "
946 			    "unable to restart recv logic\n");
947 			return (EIO);
948 		}
949 
950 		chan.ich_freq = hchan->channel;
951 		chan.ich_flags = hchan->channelFlags;
952 		ic->ic_ibss_chan = &chan;
953 
954 		/*
955 		 * Change channels and update the h/w rate map
956 		 * if we're switching; e.g. 11a to 11b/g.
957 		 */
958 		curmode = arn_chan2mode(hchan);
959 		if (curmode != sc->sc_curmode)
960 			arn_setcurmode(sc, arn_chan2mode(hchan));
961 
962 		arn_update_txpow(sc);
963 
964 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
965 	}
966 
967 	return (0);
968 }
969 
970 /*
971  *  This routine performs the periodic noise floor calibration function
972  *  that is used to adjust and optimize the chip performance.  This
973  *  takes environmental changes (location, temperature) into account.
974  *  When the task is complete, it reschedules itself depending on the
975  *  appropriate interval that was calculated.
976  */
977 static void
978 arn_ani_calibrate(void *arg)
979 
980 {
981 	ieee80211com_t *ic = (ieee80211com_t *)arg;
982 	struct arn_softc *sc = (struct arn_softc *)ic;
983 	struct ath_hal *ah = sc->sc_ah;
984 	boolean_t longcal = B_FALSE;
985 	boolean_t shortcal = B_FALSE;
986 	boolean_t aniflag = B_FALSE;
987 	unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
988 	uint32_t cal_interval;
989 
990 	/*
991 	 * don't calibrate when we're scanning.
992 	 * we are most likely not on our home channel.
993 	 */
994 	if (ic->ic_state != IEEE80211_S_RUN)
995 		goto settimer;
996 
997 	/* Long calibration runs independently of short calibration. */
998 	if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
999 		longcal = B_TRUE;
1000 		ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1001 		    "%s: longcal @%lu\n", __func__, drv_hztousec));
1002 		sc->sc_ani.sc_longcal_timer = timestamp;
1003 	}
1004 
1005 	/* Short calibration applies only while sc_caldone is FALSE */
1006 	if (!sc->sc_ani.sc_caldone) {
1007 		if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1008 		    ATH_SHORT_CALINTERVAL) {
1009 			shortcal = B_TRUE;
1010 			ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1011 			    "%s: shortcal @%lu\n",
1012 			    __func__, drv_hztousec));
1013 			sc->sc_ani.sc_shortcal_timer = timestamp;
1014 			sc->sc_ani.sc_resetcal_timer = timestamp;
1015 		}
1016 	} else {
1017 		if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1018 		    ATH_RESTART_CALINTERVAL) {
1019 			ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1020 						&sc->sc_ani.sc_caldone);
1021 			if (sc->sc_ani.sc_caldone)
1022 				sc->sc_ani.sc_resetcal_timer = timestamp;
1023 		}
1024 	}
1025 
1026 	/* Verify whether we must check ANI */
1027 	if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1028 	    ATH_ANI_POLLINTERVAL) {
1029 		aniflag = B_TRUE;
1030 		sc->sc_ani.sc_checkani_timer = timestamp;
1031 	}
1032 
1033 	/* Skip all processing if there's nothing to do. */
1034 	if (longcal || shortcal || aniflag) {
1035 		/* Call ANI routine if necessary */
1036 		if (aniflag)
1037 			ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1038 			    ah->ah_curchan);
1039 
1040 		/* Perform calibration if necessary */
1041 		if (longcal || shortcal) {
1042 			boolean_t iscaldone = B_FALSE;
1043 
1044 			if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1045 			    sc->sc_rx_chainmask, longcal, &iscaldone)) {
1046 				if (longcal)
1047 					sc->sc_ani.sc_noise_floor =
1048 					    ath9k_hw_getchan_noise(ah,
1049 					    ah->ah_curchan);
1050 
1051 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1052 				    "%s: calibrate chan %u/%x nf: %d\n",
1053 				    __func__,
1054 				    ah->ah_curchan->channel,
1055 				    ah->ah_curchan->channelFlags,
1056 				    sc->sc_ani.sc_noise_floor));
1057 			} else {
1058 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1059 				    "%s: calibrate chan %u/%x failed\n",
1060 				    __func__,
1061 				    ah->ah_curchan->channel,
1062 				    ah->ah_curchan->channelFlags));
1063 			}
1064 			sc->sc_ani.sc_caldone = iscaldone;
1065 		}
1066 	}
1067 
1068 settimer:
1069 	/*
1070 	 * Set timer interval based on previous results.
1071 	 * The interval must be the shortest necessary to satisfy ANI,
1072 	 * short calibration and long calibration.
1073 	 */
1074 	cal_interval = ATH_LONG_CALINTERVAL;
1075 	if (sc->sc_ah->ah_config.enable_ani)
1076 		cal_interval =
1077 		    min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1078 
1079 	if (!sc->sc_ani.sc_caldone)
1080 		cal_interval = min(cal_interval,
1081 		    (uint32_t)ATH_SHORT_CALINTERVAL);
1082 
1083 	sc->sc_scan_timer = 0;
1084 	sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1085 	    drv_usectohz(cal_interval * 1000));
1086 }
1087 
1088 static void
1089 arn_stop_caltimer(struct arn_softc *sc)
1090 {
1091 	timeout_id_t tmp_id = 0;
1092 
1093 	while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1094 		tmp_id = sc->sc_cal_timer;
1095 		(void) untimeout(tmp_id);
1096 	}
1097 	sc->sc_cal_timer = 0;
1098 }
1099 
1100 static uint_t
1101 arn_isr(caddr_t arg)
1102 {
1103 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1104 	struct arn_softc *sc = (struct arn_softc *)arg;
1105 	struct ath_hal *ah = sc->sc_ah;
1106 	enum ath9k_int status;
1107 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1108 
1109 	ARN_LOCK(sc);
1110 
1111 	if (sc->sc_flags & SC_OP_INVALID) {
1112 		/*
1113 		 * The hardware is not ready/present, don't
1114 		 * touch anything. Note this can happen early
1115 		 * on if the IRQ is shared.
1116 		 */
1117 		ARN_UNLOCK(sc);
1118 		return (DDI_INTR_UNCLAIMED);
1119 	}
1120 	if (!ath9k_hw_intrpend(ah)) {	/* shared irq, not for us */
1121 		ARN_UNLOCK(sc);
1122 		return (DDI_INTR_UNCLAIMED);
1123 	}
1124 
1125 	/*
1126 	 * Figure out the reason(s) for the interrupt. Note
1127 	 * that the hal returns a pseudo-ISR that may include
1128 	 * bits we haven't explicitly enabled so we mask the
1129 	 * value to insure we only process bits we requested.
1130 	 */
1131 	(void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1132 
1133 	status &= sc->sc_imask; /* discard unasked-for bits */
1134 
1135 	/*
1136 	 * If there are no status bits set, then this interrupt was not
1137 	 * for me (should have been caught above).
1138 	 */
1139 	if (!status) {
1140 		ARN_UNLOCK(sc);
1141 		return (DDI_INTR_UNCLAIMED);
1142 	}
1143 
1144 	sc->sc_intrstatus = status;
1145 
1146 	if (status & ATH9K_INT_FATAL) {
1147 		/* need a chip reset */
1148 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1149 		    "ATH9K_INT_FATAL\n"));
1150 		goto reset;
1151 	} else if (status & ATH9K_INT_RXORN) {
1152 		/* need a chip reset */
1153 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1154 		    "ATH9K_INT_RXORN\n"));
1155 		goto reset;
1156 	} else {
1157 		if (status & ATH9K_INT_RXEOL) {
1158 			/*
1159 			 * NB: the hardware should re-read the link when
1160 			 * RXE bit is written, but it doesn't work
1161 			 * at least on older hardware revs.
1162 			 */
1163 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1164 			    "ATH9K_INT_RXEOL\n"));
1165 			sc->sc_rxlink = NULL;
1166 		}
1167 		if (status & ATH9K_INT_TXURN) {
1168 			/* bump tx trigger level */
1169 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1170 			    "ATH9K_INT_TXURN\n"));
1171 			(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1172 		}
1173 		/* XXX: optimize this */
1174 		if (status & ATH9K_INT_RX) {
1175 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1176 			    "ATH9K_INT_RX\n"));
1177 			sc->sc_rx_pend = 1;
1178 			ddi_trigger_softintr(sc->sc_softint_id);
1179 		}
1180 		if (status & ATH9K_INT_TX) {
1181 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1182 			    "ATH9K_INT_TX\n"));
1183 			if (ddi_taskq_dispatch(sc->sc_tq,
1184 			    arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1185 			    DDI_SUCCESS) {
1186 				arn_problem("arn: arn_isr(): "
1187 				    "No memory for tx taskq\n");
1188 				}
1189 			}
1190 #ifdef ARN_ATH9K_INT_MIB
1191 		if (status & ATH9K_INT_MIB) {
1192 			/*
1193 			 * Disable interrupts until we service the MIB
1194 			 * interrupt; otherwise it will continue to
1195 			 * fire.
1196 			 */
1197 			(void) ath9k_hw_set_interrupts(ah, 0);
1198 			/*
1199 			 * Let the hal handle the event. We assume
1200 			 * it will clear whatever condition caused
1201 			 * the interrupt.
1202 			 */
1203 			ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1204 			(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1205 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1206 			    "ATH9K_INT_MIB\n"));
1207 		}
1208 #endif
1209 
1210 #ifdef ARN_ATH9K_INT_TIM_TIMER
1211 		if (status & ATH9K_INT_TIM_TIMER) {
1212 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1213 			    "ATH9K_INT_TIM_TIMER\n"));
1214 			if (!(ah->ah_caps.hw_caps &
1215 			    ATH9K_HW_CAP_AUTOSLEEP)) {
1216 				/*
1217 				 * Clear RxAbort bit so that we can
1218 				 * receive frames
1219 				 */
1220 				ath9k_hw_setrxabort(ah, 0);
1221 				goto reset;
1222 			}
1223 		}
1224 #endif
1225 
1226 		if (status & ATH9K_INT_BMISS) {
1227 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1228 			    "ATH9K_INT_BMISS\n"));
1229 #ifdef ARN_HW_BEACON_MISS_HANDLE
1230 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1231 			    "handle beacon mmiss by H/W mechanism\n"));
1232 			if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1233 			    sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1234 				arn_problem("arn: arn_isr(): "
1235 				    "No memory available for bmiss taskq\n");
1236 			}
1237 #else
1238 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1239 			    "handle beacon mmiss by S/W mechanism\n"));
1240 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1241 		}
1242 
1243 		ARN_UNLOCK(sc);
1244 
1245 #ifdef ARN_ATH9K_INT_CST
1246 		/* carrier sense timeout */
1247 		if (status & ATH9K_INT_CST) {
1248 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1249 			    "ATH9K_INT_CST\n"));
1250 			return (DDI_INTR_CLAIMED);
1251 		}
1252 #endif
1253 
1254 		if (status & ATH9K_INT_SWBA) {
1255 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1256 			    "ATH9K_INT_SWBA\n"));
1257 			/* This will occur only in Host-AP or Ad-Hoc mode */
1258 			return (DDI_INTR_CLAIMED);
1259 		}
1260 	}
1261 
1262 	return (DDI_INTR_CLAIMED);
1263 reset:
1264 	ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1265 	(void) arn_reset(ic);
1266 	ARN_UNLOCK(sc);
1267 	return (DDI_INTR_CLAIMED);
1268 }
1269 
1270 static int
1271 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1272 {
1273 	int i;
1274 
1275 	for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1276 		if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1277 			return (i);
1278 	}
1279 
1280 	return (-1);
1281 }
1282 
1283 int
1284 arn_reset(ieee80211com_t *ic)
1285 {
1286 	struct arn_softc *sc = (struct arn_softc *)ic;
1287 	struct ath_hal *ah = sc->sc_ah;
1288 	int status;
1289 	int error = 0;
1290 
1291 	(void) ath9k_hw_set_interrupts(ah, 0);
1292 	arn_draintxq(sc, 0);
1293 	(void) arn_stoprecv(sc);
1294 
1295 	if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1296 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1297 	    sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1298 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1299 		    "unable to reset hardware; hal status %u\n", status));
1300 		error = EIO;
1301 	}
1302 
1303 	if (arn_startrecv(sc) != 0)
1304 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1305 		    "unable to start recv logic\n"));
1306 
1307 	/*
1308 	 * We may be doing a reset in response to a request
1309 	 * that changes the channel so update any state that
1310 	 * might change as a result.
1311 	 */
1312 	arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1313 
1314 	arn_update_txpow(sc);
1315 
1316 	if (sc->sc_flags & SC_OP_BEACONS)
1317 		arn_beacon_config(sc);	/* restart beacons */
1318 
1319 	(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1320 
1321 	return (error);
1322 }
1323 
1324 int
1325 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1326 {
1327 	int qnum;
1328 
1329 	switch (queue) {
1330 	case WME_AC_VO:
1331 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1332 		break;
1333 	case WME_AC_VI:
1334 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1335 		break;
1336 	case WME_AC_BE:
1337 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1338 		break;
1339 	case WME_AC_BK:
1340 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1341 		break;
1342 	default:
1343 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1344 		break;
1345 	}
1346 
1347 	return (qnum);
1348 }
1349 
1350 static struct {
1351 	uint32_t version;
1352 	const char *name;
1353 } ath_mac_bb_names[] = {
1354 	{ AR_SREV_VERSION_5416_PCI,	"5416" },
1355 	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
1356 	{ AR_SREV_VERSION_9100,		"9100" },
1357 	{ AR_SREV_VERSION_9160,		"9160" },
1358 	{ AR_SREV_VERSION_9280,		"9280" },
1359 	{ AR_SREV_VERSION_9285,		"9285" }
1360 };
1361 
1362 static struct {
1363 	uint16_t version;
1364 	const char *name;
1365 } ath_rf_names[] = {
1366 	{ 0,				"5133" },
1367 	{ AR_RAD5133_SREV_MAJOR,	"5133" },
1368 	{ AR_RAD5122_SREV_MAJOR,	"5122" },
1369 	{ AR_RAD2133_SREV_MAJOR,	"2133" },
1370 	{ AR_RAD2122_SREV_MAJOR,	"2122" }
1371 };
1372 
1373 /*
1374  * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1375  */
1376 
1377 static const char *
1378 arn_mac_bb_name(uint32_t mac_bb_version)
1379 {
1380 	int i;
1381 
1382 	for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1383 		if (ath_mac_bb_names[i].version == mac_bb_version) {
1384 			return (ath_mac_bb_names[i].name);
1385 		}
1386 	}
1387 
1388 	return ("????");
1389 }
1390 
1391 /*
1392  * Return the RF name. "????" is returned if the RF is unknown.
1393  */
1394 
1395 static const char *
1396 arn_rf_name(uint16_t rf_version)
1397 {
1398 	int i;
1399 
1400 	for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1401 		if (ath_rf_names[i].version == rf_version) {
1402 			return (ath_rf_names[i].name);
1403 		}
1404 	}
1405 
1406 	return ("????");
1407 }
1408 
1409 static void
1410 arn_next_scan(void *arg)
1411 {
1412 	ieee80211com_t *ic = arg;
1413 	struct arn_softc *sc = (struct arn_softc *)ic;
1414 
1415 	sc->sc_scan_timer = 0;
1416 	if (ic->ic_state == IEEE80211_S_SCAN) {
1417 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1418 		    drv_usectohz(arn_dwelltime * 1000));
1419 		ieee80211_next_scan(ic);
1420 	}
1421 }
1422 
1423 static void
1424 arn_stop_scantimer(struct arn_softc *sc)
1425 {
1426 	timeout_id_t tmp_id = 0;
1427 
1428 	while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1429 		tmp_id = sc->sc_scan_timer;
1430 		(void) untimeout(tmp_id);
1431 	}
1432 	sc->sc_scan_timer = 0;
1433 }
1434 
1435 static int32_t
1436 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1437 {
1438 	struct arn_softc *sc = (struct arn_softc *)ic;
1439 	struct ath_hal *ah = sc->sc_ah;
1440 	struct ieee80211_node *in;
1441 	int32_t i, error;
1442 	uint8_t *bssid;
1443 	uint32_t rfilt;
1444 	enum ieee80211_state ostate;
1445 	struct ath9k_channel *channel;
1446 	int pos;
1447 
1448 	/* Should set up & init LED here */
1449 
1450 	if (sc->sc_flags & SC_OP_INVALID)
1451 		return (0);
1452 
1453 	ostate = ic->ic_state;
1454 	ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1455 	    "%x -> %x!\n", ostate, nstate));
1456 
1457 	ARN_LOCK(sc);
1458 
1459 	if (nstate != IEEE80211_S_SCAN)
1460 		arn_stop_scantimer(sc);
1461 	if (nstate != IEEE80211_S_RUN)
1462 		arn_stop_caltimer(sc);
1463 
1464 	/* Should set LED here */
1465 
1466 	if (nstate == IEEE80211_S_INIT) {
1467 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1468 		/*
1469 		 * Disable interrupts.
1470 		 */
1471 		(void) ath9k_hw_set_interrupts
1472 		    (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1473 
1474 #ifdef ARN_IBSS
1475 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1476 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1477 			arn_beacon_return(sc);
1478 		}
1479 #endif
1480 		ARN_UNLOCK(sc);
1481 		ieee80211_stop_watchdog(ic);
1482 		goto done;
1483 	}
1484 	in = ic->ic_bss;
1485 
1486 	pos = arn_get_channel(sc, ic->ic_curchan);
1487 
1488 	if (pos == -1) {
1489 		ARN_DBG((ARN_DBG_FATAL, "arn: "
1490 		    "%s: Invalid channel\n", __func__));
1491 		error = EINVAL;
1492 		ARN_UNLOCK(sc);
1493 		goto bad;
1494 	}
1495 
1496 	if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1497 		arn_update_chainmask(sc);
1498 		sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1499 	} else
1500 		sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1501 
1502 	sc->sc_ah->ah_channels[pos].chanmode =
1503 	    arn_chan2flags(ic, ic->ic_curchan);
1504 	channel = &sc->sc_ah->ah_channels[pos];
1505 	if (channel == NULL) {
1506 		arn_problem("arn_newstate(): channel == NULL");
1507 		ARN_UNLOCK(sc);
1508 		goto bad;
1509 	}
1510 	error = arn_set_channel(sc, channel);
1511 	if (error != 0) {
1512 		if (nstate != IEEE80211_S_SCAN) {
1513 			ARN_UNLOCK(sc);
1514 			ieee80211_reset_chan(ic);
1515 			goto bad;
1516 		}
1517 	}
1518 
1519 	/*
1520 	 * Get the receive filter according to the
1521 	 * operating mode and state
1522 	 */
1523 	rfilt = arn_calcrxfilter(sc);
1524 
1525 	if (nstate == IEEE80211_S_SCAN)
1526 		bssid = ic->ic_macaddr;
1527 	else
1528 		bssid = in->in_bssid;
1529 
1530 	ath9k_hw_setrxfilter(ah, rfilt);
1531 
1532 	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1533 		ath9k_hw_write_associd(ah, bssid, in->in_associd);
1534 	else
1535 		ath9k_hw_write_associd(ah, bssid, 0);
1536 
1537 	/* Check for WLAN_CAPABILITY_PRIVACY ? */
1538 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1539 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1540 			if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1541 				(void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1542 				    bssid);
1543 		}
1544 	}
1545 
1546 	if (nstate == IEEE80211_S_RUN) {
1547 		switch (ic->ic_opmode) {
1548 #ifdef ARN_IBSS
1549 		case IEEE80211_M_IBSS:
1550 			/*
1551 			 * Allocate and setup the beacon frame.
1552 			 * Stop any previous beacon DMA.
1553 			 */
1554 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1555 			arn_beacon_return(sc);
1556 			error = arn_beacon_alloc(sc, in);
1557 			if (error != 0) {
1558 				ARN_UNLOCK(sc);
1559 				goto bad;
1560 			}
1561 			/*
1562 			 * If joining an adhoc network defer beacon timer
1563 			 * configuration to the next beacon frame so we
1564 			 * have a current TSF to use.  Otherwise we're
1565 			 * starting an ibss/bss so there's no need to delay.
1566 			 */
1567 			if (ic->ic_opmode == IEEE80211_M_IBSS &&
1568 			    ic->ic_bss->in_tstamp.tsf != 0) {
1569 				sc->sc_bsync = 1;
1570 			} else {
1571 				arn_beacon_config(sc);
1572 			}
1573 			break;
1574 #endif /* ARN_IBSS */
1575 		case IEEE80211_M_STA:
1576 			if (ostate != IEEE80211_S_RUN) {
1577 				/*
1578 				 * Defer beacon timer configuration to the next
1579 				 * beacon frame so we have a current TSF to use.
1580 				 * Any TSF collected when scanning is likely old
1581 				 */
1582 #ifdef ARN_IBSS
1583 				sc->sc_bsync = 1;
1584 #else
1585 				/* Configure the beacon and sleep timers. */
1586 				arn_beacon_config(sc);
1587 				/* Reset rssi stats */
1588 				sc->sc_halstats.ns_avgbrssi =
1589 				    ATH_RSSI_DUMMY_MARKER;
1590 				sc->sc_halstats.ns_avgrssi =
1591 				    ATH_RSSI_DUMMY_MARKER;
1592 				sc->sc_halstats.ns_avgtxrssi =
1593 				    ATH_RSSI_DUMMY_MARKER;
1594 				sc->sc_halstats.ns_avgtxrate =
1595 				    ATH_RATE_DUMMY_MARKER;
1596 /* end */
1597 
1598 #endif /* ARN_IBSS */
1599 			}
1600 			break;
1601 		default:
1602 			break;
1603 		}
1604 	} else {
1605 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1606 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1607 	}
1608 
1609 	/*
1610 	 * Reset the rate control state.
1611 	 */
1612 	arn_rate_ctl_reset(sc, nstate);
1613 
1614 	ARN_UNLOCK(sc);
1615 done:
1616 	/*
1617 	 * Invoke the parent method to complete the work.
1618 	 */
1619 	error = sc->sc_newstate(ic, nstate, arg);
1620 
1621 	/*
1622 	 * Finally, start any timers.
1623 	 */
1624 	if (nstate == IEEE80211_S_RUN) {
1625 		ieee80211_start_watchdog(ic, 1);
1626 		ASSERT(sc->sc_cal_timer == 0);
1627 		sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1628 		    drv_usectohz(100 * 1000));
1629 	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1630 		/* start ap/neighbor scan timer */
1631 		/* ASSERT(sc->sc_scan_timer == 0); */
1632 		if (sc->sc_scan_timer != 0) {
1633 			(void) untimeout(sc->sc_scan_timer);
1634 			sc->sc_scan_timer = 0;
1635 		}
1636 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1637 		    drv_usectohz(arn_dwelltime * 1000));
1638 	}
1639 
1640 bad:
1641 	return (error);
1642 }
1643 
1644 static void
1645 arn_watchdog(void *arg)
1646 {
1647 	struct arn_softc *sc = arg;
1648 	ieee80211com_t *ic = &sc->sc_isc;
1649 	int ntimer = 0;
1650 
1651 	ARN_LOCK(sc);
1652 	ic->ic_watchdog_timer = 0;
1653 	if (sc->sc_flags & SC_OP_INVALID) {
1654 		ARN_UNLOCK(sc);
1655 		return;
1656 	}
1657 
1658 	if (ic->ic_state == IEEE80211_S_RUN) {
1659 		/*
1660 		 * Start the background rate control thread if we
1661 		 * are not configured to use a fixed xmit rate.
1662 		 */
1663 #ifdef ARN_LEGACY_RC
1664 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1665 			sc->sc_stats.ast_rate_calls ++;
1666 			if (ic->ic_opmode == IEEE80211_M_STA)
1667 				arn_rate_ctl(ic, ic->ic_bss);
1668 			else
1669 				ieee80211_iterate_nodes(&ic->ic_sta,
1670 				    arn_rate_ctl, sc);
1671 		}
1672 #endif /* ARN_LEGACY_RC */
1673 
1674 #ifdef ARN_HW_BEACON_MISS_HANDLE
1675 	/* nothing to do here */
1676 #else
1677 	/* currently set 10 seconds as beacon miss threshold */
1678 	if (ic->ic_beaconmiss++ > 100) {
1679 		ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1680 		    "Beacon missed for 10 seconds, run"
1681 		    "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1682 		ARN_UNLOCK(sc);
1683 		(void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1684 		return;
1685 	}
1686 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1687 
1688 		ntimer = 1;
1689 	}
1690 	ARN_UNLOCK(sc);
1691 
1692 	ieee80211_watchdog(ic);
1693 	if (ntimer != 0)
1694 		ieee80211_start_watchdog(ic, ntimer);
1695 }
1696 
1697 /* ARGSUSED */
1698 static struct ieee80211_node *
1699 arn_node_alloc(ieee80211com_t *ic)
1700 {
1701 	struct ath_node *an;
1702 #ifdef ARN_TX_AGGREGATION
1703 	struct arn_softc *sc = (struct arn_softc *)ic;
1704 #endif
1705 
1706 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1707 
1708 	/* legacy rate control */
1709 #ifdef ARN_LEGACY_RC
1710 	arn_rate_update(sc, &an->an_node, 0);
1711 #endif
1712 
1713 #ifdef ARN_TX_AGGREGATION
1714 	if (sc->sc_flags & SC_OP_TXAGGR) {
1715 		arn_tx_node_init(sc, an);
1716 	}
1717 #endif /* ARN_TX_AGGREGATION */
1718 
1719 	an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1720 
1721 	return ((an != NULL) ? &an->an_node : NULL);
1722 }
1723 
1724 static void
1725 arn_node_free(struct ieee80211_node *in)
1726 {
1727 	ieee80211com_t *ic = in->in_ic;
1728 	struct arn_softc *sc = (struct arn_softc *)ic;
1729 	struct ath_buf *bf;
1730 	struct ath_txq *txq;
1731 	int32_t i;
1732 
1733 #ifdef ARN_TX_AGGREGATION
1734 	if (sc->sc_flags & SC_OP_TXAGGR)
1735 		arn_tx_node_cleanup(sc, in);
1736 #endif /* TX_AGGREGATION */
1737 
1738 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1739 		if (ARN_TXQ_SETUP(sc, i)) {
1740 			txq = &sc->sc_txq[i];
1741 			mutex_enter(&txq->axq_lock);
1742 			bf = list_head(&txq->axq_list);
1743 			while (bf != NULL) {
1744 				if (bf->bf_in == in) {
1745 					bf->bf_in = NULL;
1746 				}
1747 				bf = list_next(&txq->axq_list, bf);
1748 			}
1749 			mutex_exit(&txq->axq_lock);
1750 		}
1751 	}
1752 
1753 	ic->ic_node_cleanup(in);
1754 
1755 	if (in->in_wpa_ie != NULL)
1756 		ieee80211_free(in->in_wpa_ie);
1757 
1758 	if (in->in_wme_ie != NULL)
1759 		ieee80211_free(in->in_wme_ie);
1760 
1761 	if (in->in_htcap_ie != NULL)
1762 		ieee80211_free(in->in_htcap_ie);
1763 
1764 	kmem_free(in, sizeof (struct ath_node));
1765 }
1766 
1767 /*
1768  * Allocate tx/rx key slots for TKIP.  We allocate one slot for
1769  * each key. MIC is right after the decrypt/encrypt key.
1770  */
1771 static uint16_t
1772 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1773     ieee80211_keyix *rxkeyix)
1774 {
1775 	uint16_t i, keyix;
1776 
1777 	ASSERT(!sc->sc_splitmic);
1778 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1779 		uint8_t b = sc->sc_keymap[i];
1780 		if (b == 0xff)
1781 			continue;
1782 		for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1783 		    keyix++, b >>= 1) {
1784 			if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1785 				/* full pair unavailable */
1786 				continue;
1787 			}
1788 			set_bit(keyix, sc->sc_keymap);
1789 			set_bit(keyix+64, sc->sc_keymap);
1790 			ARN_DBG((ARN_DBG_KEYCACHE,
1791 			    "arn_key_alloc_pair(): key pair %u,%u\n",
1792 			    keyix, keyix+64));
1793 			*txkeyix = *rxkeyix = keyix;
1794 			return (1);
1795 		}
1796 	}
1797 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1798 	    " out of pair space\n"));
1799 
1800 	return (0);
1801 }
1802 
1803 /*
1804  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
1805  * each key, one for decrypt/encrypt and the other for the MIC.
1806  */
1807 static int
1808 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1809     ieee80211_keyix *rxkeyix)
1810 {
1811 	uint16_t i, keyix;
1812 
1813 	ASSERT(sc->sc_splitmic);
1814 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1815 		uint8_t b = sc->sc_keymap[i];
1816 		if (b != 0xff) {
1817 			/*
1818 			 * One or more slots in this byte are free.
1819 			 */
1820 			keyix = i*NBBY;
1821 			while (b & 1) {
1822 		again:
1823 				keyix++;
1824 				b >>= 1;
1825 			}
1826 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1827 			if (is_set(keyix+32, sc->sc_keymap) ||
1828 			    is_set(keyix+64, sc->sc_keymap) ||
1829 			    is_set(keyix+32+64, sc->sc_keymap)) {
1830 				/* full pair unavailable */
1831 				if (keyix == (i+1)*NBBY) {
1832 					/* no slots were appropriate, advance */
1833 					continue;
1834 				}
1835 				goto again;
1836 			}
1837 			set_bit(keyix, sc->sc_keymap);
1838 			set_bit(keyix+64, sc->sc_keymap);
1839 			set_bit(keyix+32, sc->sc_keymap);
1840 			set_bit(keyix+32+64, sc->sc_keymap);
1841 			ARN_DBG((ARN_DBG_KEYCACHE,
1842 			    "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1843 			    keyix, keyix+64,
1844 			    keyix+32, keyix+32+64));
1845 			*txkeyix = *rxkeyix = keyix;
1846 			return (1);
1847 		}
1848 	}
1849 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1850 	    " out of pair space\n"));
1851 
1852 	return (0);
1853 }
1854 /*
1855  * Allocate a single key cache slot.
1856  */
1857 static int
1858 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1859     ieee80211_keyix *rxkeyix)
1860 {
1861 	uint16_t i, keyix;
1862 
1863 	/* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1864 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1865 		uint8_t b = sc->sc_keymap[i];
1866 
1867 		if (b != 0xff) {
1868 			/*
1869 			 * One or more slots are free.
1870 			 */
1871 			keyix = i*NBBY;
1872 			while (b & 1)
1873 				keyix++, b >>= 1;
1874 			set_bit(keyix, sc->sc_keymap);
1875 			ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1876 			    "key %u\n", keyix));
1877 			*txkeyix = *rxkeyix = keyix;
1878 			return (1);
1879 		}
1880 	}
1881 	return (0);
1882 }
1883 
1884 /*
1885  * Allocate one or more key cache slots for a unicast key.  The
1886  * key itself is needed only to identify the cipher.  For hardware
1887  * TKIP with split cipher+MIC keys we allocate two key cache slot
1888  * pairs so that we can setup separate TX and RX MIC keys.  Note
1889  * that the MIC key for a TKIP key at slot i is assumed by the
1890  * hardware to be at slot i+64.  This limits TKIP keys to the first
1891  * 64 entries.
1892  */
1893 /* ARGSUSED */
1894 int
1895 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1896     ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1897 {
1898 	struct arn_softc *sc = (struct arn_softc *)ic;
1899 
1900 	/*
1901 	 * We allocate two pair for TKIP when using the h/w to do
1902 	 * the MIC.  For everything else, including software crypto,
1903 	 * we allocate a single entry.  Note that s/w crypto requires
1904 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
1905 	 * not support pass-through cache entries and we map all
1906 	 * those requests to slot 0.
1907 	 */
1908 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1909 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1910 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1911 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1912 		if (sc->sc_splitmic)
1913 			return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1914 		else
1915 			return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1916 	} else {
1917 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1918 	}
1919 }
1920 
1921 /*
1922  * Delete an entry in the key cache allocated by ath_key_alloc.
1923  */
1924 int
1925 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1926 {
1927 	struct arn_softc *sc = (struct arn_softc *)ic;
1928 	struct ath_hal *ah = sc->sc_ah;
1929 	const struct ieee80211_cipher *cip = k->wk_cipher;
1930 	ieee80211_keyix keyix = k->wk_keyix;
1931 
1932 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1933 	    " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1934 
1935 	(void) ath9k_hw_keyreset(ah, keyix);
1936 	/*
1937 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
1938 	 */
1939 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1940 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1941 		(void) ath9k_hw_keyreset(ah, keyix+32);		/* RX key */
1942 
1943 	if (keyix >= IEEE80211_WEP_NKID) {
1944 		/*
1945 		 * Don't touch keymap entries for global keys so
1946 		 * they are never considered for dynamic allocation.
1947 		 */
1948 		clr_bit(keyix, sc->sc_keymap);
1949 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1950 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1951 			/*
1952 			 * If splitmic is true +64 is TX key MIC,
1953 			 * else +64 is RX key + RX key MIC.
1954 			 */
1955 			clr_bit(keyix+64, sc->sc_keymap);
1956 			if (sc->sc_splitmic) {
1957 				/* Rx key */
1958 				clr_bit(keyix+32, sc->sc_keymap);
1959 				/* RX key MIC */
1960 				clr_bit(keyix+32+64, sc->sc_keymap);
1961 			}
1962 		}
1963 	}
1964 	return (1);
1965 }
1966 
1967 /*
1968  * Set a TKIP key into the hardware.  This handles the
1969  * potential distribution of key state to multiple key
1970  * cache slots for TKIP.
1971  */
1972 static int
1973 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1974     struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1975 {
1976 	uint8_t *key_rxmic = NULL;
1977 	uint8_t *key_txmic = NULL;
1978 	uint8_t  *key = (uint8_t *)&(k->wk_key[0]);
1979 	struct ath_hal *ah = sc->sc_ah;
1980 
1981 	key_txmic = key + 16;
1982 	key_rxmic = key + 24;
1983 
1984 	if (mac == NULL) {
1985 		/* Group key installation */
1986 		(void) memcpy(hk->kv_mic,  key_rxmic, sizeof (hk->kv_mic));
1987 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1988 		    mac, B_FALSE));
1989 	}
1990 	if (!sc->sc_splitmic) {
1991 		/*
1992 		 * data key goes at first index,
1993 		 * the hal handles the MIC keys at index+64.
1994 		 */
1995 		(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1996 		(void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1997 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1998 		    mac, B_FALSE));
1999 	}
2000 	/*
2001 	 * TX key goes at first index, RX key at +32.
2002 	 * The hal handles the MIC keys at index+64.
2003 	 */
2004 	(void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2005 	if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2006 	    B_FALSE))) {
2007 		/* Txmic entry failed. No need to proceed further */
2008 		ARN_DBG((ARN_DBG_KEYCACHE,
2009 		    "%s Setting TX MIC Key Failed\n", __func__));
2010 		return (0);
2011 	}
2012 
2013 	(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2014 
2015 	/* XXX delete tx key on failure? */
2016 	return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2017 
2018 }
2019 
2020 int
2021 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2022     const uint8_t mac[IEEE80211_ADDR_LEN])
2023 {
2024 	struct arn_softc *sc = (struct arn_softc *)ic;
2025 	const struct ieee80211_cipher *cip = k->wk_cipher;
2026 	struct ath9k_keyval hk;
2027 
2028 	/* cipher table */
2029 	static const uint8_t ciphermap[] = {
2030 		ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
2031 		ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
2032 		ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
2033 		ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
2034 		ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
2035 		ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
2036 	};
2037 
2038 	bzero(&hk, sizeof (hk));
2039 
2040 	/*
2041 	 * Software crypto uses a "clear key" so non-crypto
2042 	 * state kept in the key cache are maintainedd so that
2043 	 * rx frames have an entry to match.
2044 	 */
2045 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2046 		ASSERT(cip->ic_cipher < 6);
2047 		hk.kv_type = ciphermap[cip->ic_cipher];
2048 		hk.kv_len = k->wk_keylen;
2049 		bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2050 	} else {
2051 		hk.kv_type = ATH9K_CIPHER_CLR;
2052 	}
2053 
2054 	if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2055 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2056 		return (arn_keyset_tkip(sc, k, &hk, mac));
2057 	} else {
2058 		return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2059 		    k->wk_keyix, &hk, mac, B_FALSE));
2060 	}
2061 }
2062 
2063 /*
2064  * Enable/Disable short slot timing
2065  */
2066 void
2067 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2068 {
2069 	struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2070 
2071 	if (onoff)
2072 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2073 	else
2074 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2075 }
2076 
2077 static int
2078 arn_open(struct arn_softc *sc)
2079 {
2080 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2081 	struct ieee80211_channel *curchan = ic->ic_curchan;
2082 	struct ath9k_channel *init_channel;
2083 	int error = 0, pos, status;
2084 
2085 	ARN_LOCK_ASSERT(sc);
2086 
2087 	pos = arn_get_channel(sc, curchan);
2088 	if (pos == -1) {
2089 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2090 		    "%s: Invalid channel\n", __func__));
2091 		error = EINVAL;
2092 		goto error;
2093 	}
2094 
2095 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2096 
2097 	if (sc->sc_curmode == ATH9K_MODE_11A) {
2098 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2099 	} else {
2100 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2101 	}
2102 
2103 	init_channel = &sc->sc_ah->ah_channels[pos];
2104 
2105 	/* Reset SERDES registers */
2106 	ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2107 
2108 	/*
2109 	 * The basic interface to setting the hardware in a good
2110 	 * state is ``reset''.	On return the hardware is known to
2111 	 * be powered up and with interrupts disabled.	This must
2112 	 * be followed by initialization of the appropriate bits
2113 	 * and then setup of the interrupt mask.
2114 	 */
2115 	if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2116 	    sc->tx_chan_width, sc->sc_tx_chainmask,
2117 	    sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2118 	    B_FALSE, &status)) {
2119 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2120 		    "%s: unable to reset hardware; hal status %u "
2121 		    "(freq %u flags 0x%x)\n", __func__, status,
2122 		    init_channel->channel, init_channel->channelFlags));
2123 
2124 		error = EIO;
2125 		goto error;
2126 	}
2127 
2128 	/*
2129 	 * This is needed only to setup initial state
2130 	 * but it's best done after a reset.
2131 	 */
2132 	arn_update_txpow(sc);
2133 
2134 	/*
2135 	 * Setup the hardware after reset:
2136 	 * The receive engine is set going.
2137 	 * Frame transmit is handled entirely
2138 	 * in the frame output path; there's nothing to do
2139 	 * here except setup the interrupt mask.
2140 	 */
2141 	if (arn_startrecv(sc) != 0) {
2142 		ARN_DBG((ARN_DBG_INIT, "arn: "
2143 		    "%s: unable to start recv logic\n", __func__));
2144 		error = EIO;
2145 		goto error;
2146 	}
2147 
2148 	/* Setup our intr mask. */
2149 	sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2150 	    ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2151 	    ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2152 #ifdef ARN_ATH9K_HW_CAP_GTT
2153 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2154 		sc->sc_imask |= ATH9K_INT_GTT;
2155 #endif
2156 
2157 #ifdef ARN_ATH9K_HW_CAP_GTT
2158 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2159 		sc->sc_imask |= ATH9K_INT_CST;
2160 #endif
2161 
2162 	/*
2163 	 * Enable MIB interrupts when there are hardware phy counters.
2164 	 * Note we only do this (at the moment) for station mode.
2165 	 */
2166 #ifdef ARN_ATH9K_INT_MIB
2167 	if (ath9k_hw_phycounters(sc->sc_ah) &&
2168 	    ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2169 	    (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2170 		sc->sc_imask |= ATH9K_INT_MIB;
2171 #endif
2172 	/*
2173 	 * Some hardware processes the TIM IE and fires an
2174 	 * interrupt when the TIM bit is set.  For hardware
2175 	 * that does, if not overridden by configuration,
2176 	 * enable the TIM interrupt when operating as station.
2177 	 */
2178 #ifdef ARN_ATH9K_INT_TIM
2179 	if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2180 	    (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2181 	    !sc->sc_config.swBeaconProcess)
2182 		sc->sc_imask |= ATH9K_INT_TIM;
2183 #endif
2184 	if (arn_chan2mode(init_channel) != sc->sc_curmode)
2185 		arn_setcurmode(sc, arn_chan2mode(init_channel));
2186 	ARN_DBG((ARN_DBG_INIT, "arn: "
2187 	    "%s: current mode after arn_setcurmode is %d\n",
2188 	    __func__, sc->sc_curmode));
2189 
2190 	sc->sc_isrunning = 1;
2191 
2192 	/* Disable BMISS interrupt when we're not associated */
2193 	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2194 	(void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2195 
2196 	return (0);
2197 
2198 error:
2199 	return (error);
2200 }
2201 
2202 static void
2203 arn_close(struct arn_softc *sc)
2204 {
2205 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2206 	struct ath_hal *ah = sc->sc_ah;
2207 
2208 	ARN_LOCK_ASSERT(sc);
2209 
2210 	if (!sc->sc_isrunning)
2211 		return;
2212 
2213 	/*
2214 	 * Shutdown the hardware and driver
2215 	 * Note that some of this work is not possible if the
2216 	 * hardware is gone (invalid).
2217 	 */
2218 	ARN_UNLOCK(sc);
2219 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2220 	ieee80211_stop_watchdog(ic);
2221 	ARN_LOCK(sc);
2222 
2223 	/*
2224 	 * make sure h/w will not generate any interrupt
2225 	 * before setting the invalid flag.
2226 	 */
2227 	(void) ath9k_hw_set_interrupts(ah, 0);
2228 
2229 	if (!(sc->sc_flags & SC_OP_INVALID)) {
2230 		arn_draintxq(sc, 0);
2231 		(void) arn_stoprecv(sc);
2232 		(void) ath9k_hw_phy_disable(ah);
2233 	} else {
2234 		sc->sc_rxlink = NULL;
2235 	}
2236 
2237 	sc->sc_isrunning = 0;
2238 }
2239 
2240 /*
2241  * MAC callback functions
2242  */
2243 static int
2244 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2245 {
2246 	struct arn_softc *sc = arg;
2247 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2248 	struct ieee80211_node *in;
2249 	struct ieee80211_rateset *rs;
2250 
2251 	ARN_LOCK(sc);
2252 	switch (stat) {
2253 	case MAC_STAT_IFSPEED:
2254 		in = ic->ic_bss;
2255 		rs = &in->in_rates;
2256 		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2257 		    1000000ull;
2258 		break;
2259 	case MAC_STAT_NOXMTBUF:
2260 		*val = sc->sc_stats.ast_tx_nobuf +
2261 		    sc->sc_stats.ast_tx_nobufmgt;
2262 		break;
2263 	case MAC_STAT_IERRORS:
2264 		*val = sc->sc_stats.ast_rx_tooshort;
2265 		break;
2266 	case MAC_STAT_RBYTES:
2267 		*val = ic->ic_stats.is_rx_bytes;
2268 		break;
2269 	case MAC_STAT_IPACKETS:
2270 		*val = ic->ic_stats.is_rx_frags;
2271 		break;
2272 	case MAC_STAT_OBYTES:
2273 		*val = ic->ic_stats.is_tx_bytes;
2274 		break;
2275 	case MAC_STAT_OPACKETS:
2276 		*val = ic->ic_stats.is_tx_frags;
2277 		break;
2278 	case MAC_STAT_OERRORS:
2279 	case WIFI_STAT_TX_FAILED:
2280 		*val = sc->sc_stats.ast_tx_fifoerr +
2281 		    sc->sc_stats.ast_tx_xretries +
2282 		    sc->sc_stats.ast_tx_discard;
2283 		break;
2284 	case WIFI_STAT_TX_RETRANS:
2285 		*val = sc->sc_stats.ast_tx_xretries;
2286 		break;
2287 	case WIFI_STAT_FCS_ERRORS:
2288 		*val = sc->sc_stats.ast_rx_crcerr;
2289 		break;
2290 	case WIFI_STAT_WEP_ERRORS:
2291 		*val = sc->sc_stats.ast_rx_badcrypt;
2292 		break;
2293 	case WIFI_STAT_TX_FRAGS:
2294 	case WIFI_STAT_MCAST_TX:
2295 	case WIFI_STAT_RTS_SUCCESS:
2296 	case WIFI_STAT_RTS_FAILURE:
2297 	case WIFI_STAT_ACK_FAILURE:
2298 	case WIFI_STAT_RX_FRAGS:
2299 	case WIFI_STAT_MCAST_RX:
2300 	case WIFI_STAT_RX_DUPS:
2301 		ARN_UNLOCK(sc);
2302 		return (ieee80211_stat(ic, stat, val));
2303 	default:
2304 		ARN_UNLOCK(sc);
2305 		return (ENOTSUP);
2306 	}
2307 	ARN_UNLOCK(sc);
2308 
2309 	return (0);
2310 }
2311 
2312 int
2313 arn_m_start(void *arg)
2314 {
2315 	struct arn_softc *sc = arg;
2316 	int err = 0;
2317 
2318 	ARN_LOCK(sc);
2319 
2320 	/*
2321 	 * Stop anything previously setup.  This is safe
2322 	 * whether this is the first time through or not.
2323 	 */
2324 
2325 	arn_close(sc);
2326 
2327 	if ((err = arn_open(sc)) != 0) {
2328 		ARN_UNLOCK(sc);
2329 		return (err);
2330 	}
2331 
2332 	/* H/W is reday now */
2333 	sc->sc_flags &= ~SC_OP_INVALID;
2334 
2335 	ARN_UNLOCK(sc);
2336 
2337 	return (0);
2338 }
2339 
2340 static void
2341 arn_m_stop(void *arg)
2342 {
2343 	struct arn_softc *sc = arg;
2344 
2345 	ARN_LOCK(sc);
2346 	arn_close(sc);
2347 
2348 	/* disable HAL and put h/w to sleep */
2349 	(void) ath9k_hw_disable(sc->sc_ah);
2350 	ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2351 
2352 	/* XXX: hardware will not be ready in suspend state */
2353 	sc->sc_flags |= SC_OP_INVALID;
2354 	ARN_UNLOCK(sc);
2355 }
2356 
2357 static int
2358 arn_m_promisc(void *arg, boolean_t on)
2359 {
2360 	struct arn_softc *sc = arg;
2361 	struct ath_hal *ah = sc->sc_ah;
2362 	uint32_t rfilt;
2363 
2364 	ARN_LOCK(sc);
2365 
2366 	rfilt = ath9k_hw_getrxfilter(ah);
2367 	if (on)
2368 		rfilt |= ATH9K_RX_FILTER_PROM;
2369 	else
2370 		rfilt &= ~ATH9K_RX_FILTER_PROM;
2371 	sc->sc_promisc = on;
2372 	ath9k_hw_setrxfilter(ah, rfilt);
2373 
2374 	ARN_UNLOCK(sc);
2375 
2376 	return (0);
2377 }
2378 
2379 static int
2380 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2381 {
2382 	struct arn_softc *sc = arg;
2383 	struct ath_hal *ah = sc->sc_ah;
2384 	uint32_t val, index, bit;
2385 	uint8_t pos;
2386 	uint32_t *mfilt = sc->sc_mcast_hash;
2387 
2388 	ARN_LOCK(sc);
2389 
2390 	/* calculate XOR of eight 6bit values */
2391 	val = ARN_LE_READ_32(mca + 0);
2392 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2393 	val = ARN_LE_READ_32(mca + 3);
2394 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2395 	pos &= 0x3f;
2396 	index = pos / 32;
2397 	bit = 1 << (pos % 32);
2398 
2399 	if (add) {	/* enable multicast */
2400 		sc->sc_mcast_refs[pos]++;
2401 		mfilt[index] |= bit;
2402 	} else {	/* disable multicast */
2403 		if (--sc->sc_mcast_refs[pos] == 0)
2404 			mfilt[index] &= ~bit;
2405 	}
2406 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2407 
2408 	ARN_UNLOCK(sc);
2409 	return (0);
2410 }
2411 
2412 static int
2413 arn_m_unicst(void *arg, const uint8_t *macaddr)
2414 {
2415 	struct arn_softc *sc = arg;
2416 	struct ath_hal *ah = sc->sc_ah;
2417 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2418 
2419 	ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2420 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2421 	    macaddr[0], macaddr[1], macaddr[2],
2422 	    macaddr[3], macaddr[4], macaddr[5]));
2423 
2424 	ARN_LOCK(sc);
2425 	IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2426 	(void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2427 	(void) arn_reset(ic);
2428 	ARN_UNLOCK(sc);
2429 	return (0);
2430 }
2431 
2432 static mblk_t *
2433 arn_m_tx(void *arg, mblk_t *mp)
2434 {
2435 	struct arn_softc *sc = arg;
2436 	int error = 0;
2437 	mblk_t *next;
2438 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2439 
2440 	/*
2441 	 * No data frames go out unless we're associated; this
2442 	 * should not happen as the 802.11 layer does not enable
2443 	 * the xmit queue until we enter the RUN state.
2444 	 */
2445 	if (ic->ic_state != IEEE80211_S_RUN) {
2446 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2447 		    "discard, state %u\n", ic->ic_state));
2448 		sc->sc_stats.ast_tx_discard++;
2449 		freemsgchain(mp);
2450 		return (NULL);
2451 	}
2452 
2453 	while (mp != NULL) {
2454 		next = mp->b_next;
2455 		mp->b_next = NULL;
2456 		error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2457 		if (error != 0) {
2458 			mp->b_next = next;
2459 			if (error == ENOMEM) {
2460 				break;
2461 			} else {
2462 				freemsgchain(mp);
2463 				return (NULL);
2464 			}
2465 		}
2466 		mp = next;
2467 	}
2468 
2469 	return (mp);
2470 }
2471 
2472 static void
2473 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2474 {
2475 	struct arn_softc *sc = arg;
2476 	int32_t err;
2477 
2478 	err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2479 
2480 	ARN_LOCK(sc);
2481 	if (err == ENETRESET) {
2482 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2483 			ARN_UNLOCK(sc);
2484 
2485 			(void) arn_m_start(sc);
2486 
2487 			(void) ieee80211_new_state(&sc->sc_isc,
2488 			    IEEE80211_S_SCAN, -1);
2489 			ARN_LOCK(sc);
2490 		}
2491 	}
2492 	ARN_UNLOCK(sc);
2493 }
2494 
2495 static int
2496 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2497     uint_t wldp_length, const void *wldp_buf)
2498 {
2499 	struct arn_softc *sc = arg;
2500 	int	err;
2501 
2502 	err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2503 	    wldp_length, wldp_buf);
2504 
2505 	ARN_LOCK(sc);
2506 
2507 	if (err == ENETRESET) {
2508 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2509 			ARN_UNLOCK(sc);
2510 			(void) arn_m_start(sc);
2511 			(void) ieee80211_new_state(&sc->sc_isc,
2512 			    IEEE80211_S_SCAN, -1);
2513 			ARN_LOCK(sc);
2514 		}
2515 		err = 0;
2516 	}
2517 
2518 	ARN_UNLOCK(sc);
2519 
2520 	return (err);
2521 }
2522 
2523 /* ARGSUSED */
2524 static int
2525 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2526     uint_t wldp_length, void *wldp_buf)
2527 {
2528 	struct arn_softc *sc = arg;
2529 	int	err = 0;
2530 
2531 	err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2532 	    wldp_length, wldp_buf);
2533 
2534 	return (err);
2535 }
2536 
2537 static void
2538 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2539     mac_prop_info_handle_t prh)
2540 {
2541 	struct arn_softc *sc = arg;
2542 
2543 	ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2544 }
2545 
2546 /* return bus cachesize in 4B word units */
2547 static void
2548 arn_pci_config_cachesize(struct arn_softc *sc)
2549 {
2550 	uint8_t csz;
2551 
2552 	/*
2553 	 * Cache line size is used to size and align various
2554 	 * structures used to communicate with the hardware.
2555 	 */
2556 	csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2557 	if (csz == 0) {
2558 		/*
2559 		 * We must have this setup properly for rx buffer
2560 		 * DMA to work so force a reasonable value here if it
2561 		 * comes up zero.
2562 		 */
2563 		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2564 		pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2565 		    csz);
2566 	}
2567 	sc->sc_cachelsz = csz << 2;
2568 }
2569 
2570 static int
2571 arn_pci_setup(struct arn_softc *sc)
2572 {
2573 	uint16_t command;
2574 
2575 	/*
2576 	 * Enable memory mapping and bus mastering
2577 	 */
2578 	ASSERT(sc != NULL);
2579 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2580 	command	|= PCI_COMM_MAE | PCI_COMM_ME;
2581 	pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2582 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2583 	if ((command & PCI_COMM_MAE) == 0) {
2584 		arn_problem("arn: arn_pci_setup(): "
2585 		    "failed to enable memory mapping\n");
2586 		return (EIO);
2587 	}
2588 	if ((command & PCI_COMM_ME) == 0) {
2589 		arn_problem("arn: arn_pci_setup(): "
2590 		    "failed to enable bus mastering\n");
2591 		return (EIO);
2592 	}
2593 	ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2594 	    "set command reg to 0x%x \n", command));
2595 
2596 	return (0);
2597 }
2598 
2599 static void
2600 arn_get_hw_encap(struct arn_softc *sc)
2601 {
2602 	ieee80211com_t *ic;
2603 	struct ath_hal *ah;
2604 
2605 	ic = (ieee80211com_t *)sc;
2606 	ah = sc->sc_ah;
2607 
2608 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2609 	    ATH9K_CIPHER_AES_CCM, NULL))
2610 		ic->ic_caps |= IEEE80211_C_AES_CCM;
2611 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2612 	    ATH9K_CIPHER_AES_OCB, NULL))
2613 		ic->ic_caps |= IEEE80211_C_AES;
2614 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2615 	    ATH9K_CIPHER_TKIP, NULL))
2616 		ic->ic_caps |= IEEE80211_C_TKIP;
2617 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2618 	    ATH9K_CIPHER_WEP, NULL))
2619 		ic->ic_caps |= IEEE80211_C_WEP;
2620 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2621 	    ATH9K_CIPHER_MIC, NULL))
2622 		ic->ic_caps |= IEEE80211_C_TKIPMIC;
2623 }
2624 
2625 static void
2626 arn_setup_ht_cap(struct arn_softc *sc)
2627 {
2628 #define	ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3	/* 2 ^ 16 */
2629 #define	ATH9K_HT_CAP_MPDUDENSITY_8 0x6		/* 8 usec */
2630 
2631 	/* LINTED E_FUNC_SET_NOT_USED */
2632 	uint8_t tx_streams;
2633 	uint8_t rx_streams;
2634 
2635 	arn_ht_conf *ht_info = &sc->sc_ht_conf;
2636 
2637 	ht_info->ht_supported = B_TRUE;
2638 
2639 	/* Todo: IEEE80211_HTCAP_SMPS */
2640 	ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2641 	    IEEE80211_HTCAP_SHORTGI40 |
2642 	    IEEE80211_HTCAP_DSSSCCK40;
2643 
2644 	ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2645 	ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2646 
2647 	/* set up supported mcs set */
2648 	(void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2649 	tx_streams = ISP2(sc->sc_ah->ah_caps.tx_chainmask) ? 1 : 2;
2650 	rx_streams = ISP2(sc->sc_ah->ah_caps.rx_chainmask) ? 1 : 2;
2651 
2652 	ht_info->rx_mcs_mask[0] = 0xff;
2653 	if (rx_streams >= 2)
2654 		ht_info->rx_mcs_mask[1] = 0xff;
2655 }
2656 
2657 /* xxx should be used for ht rate set negotiating ? */
2658 static void
2659 arn_overwrite_11n_rateset(struct arn_softc *sc)
2660 {
2661 	uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2662 	int mcs_idx, mcs_count = 0;
2663 	int i, j;
2664 
2665 	(void) memset(&ieee80211_rateset_11n, 0,
2666 	    sizeof (ieee80211_rateset_11n));
2667 	for (i = 0; i < 10; i++) {
2668 		for (j = 0; j < 8; j++) {
2669 			if (ht_rs[i] & (1 << j)) {
2670 				mcs_idx = i * 8 + j;
2671 				if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2672 					break;
2673 				}
2674 
2675 				ieee80211_rateset_11n.rs_rates[mcs_idx] =
2676 				    (uint8_t)mcs_idx;
2677 				mcs_count++;
2678 			}
2679 		}
2680 	}
2681 
2682 	ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2683 
2684 	ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2685 	    "MCS rate set supported by this station is as follows:\n"));
2686 
2687 	for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2688 		ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2689 		    i, ieee80211_rateset_11n.rs_rates[i]));
2690 	}
2691 
2692 }
2693 
2694 /*
2695  * Update WME parameters for a transmit queue.
2696  */
2697 static int
2698 arn_tx_queue_update(struct arn_softc *sc, int ac)
2699 {
2700 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2701 #define	ATH_TXOP_TO_US(v)		(v<<5)
2702 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2703 	struct ath_txq *txq;
2704 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2705 	struct ath_hal *ah = sc->sc_ah;
2706 	struct ath9k_tx_queue_info qi;
2707 
2708 	txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2709 	(void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2710 
2711 	/*
2712 	 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2713 	 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2714 	 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2715 	 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2716 	 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2717 	 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2718 	 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2719 	 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2720 	 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2721 	 */
2722 
2723 	/* xxx should update these flags here? */
2724 #if 0
2725 	qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2726 	    TXQ_FLAG_TXERRINT_ENABLE |
2727 	    TXQ_FLAG_TXDESCINT_ENABLE |
2728 	    TXQ_FLAG_TXURNINT_ENABLE;
2729 #endif
2730 
2731 	qi.tqi_aifs = wmep->wmep_aifsn;
2732 	qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2733 	qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2734 	qi.tqi_readyTime = 0;
2735 	qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2736 
2737 	ARN_DBG((ARN_DBG_INIT,
2738 	    "%s:"
2739 	    "Q%u"
2740 	    "qflags 0x%x"
2741 	    "aifs %u"
2742 	    "cwmin %u"
2743 	    "cwmax %u"
2744 	    "burstTime %u\n",
2745 	    __func__,
2746 	    txq->axq_qnum,
2747 	    qi.tqi_qflags,
2748 	    qi.tqi_aifs,
2749 	    qi.tqi_cwmin,
2750 	    qi.tqi_cwmax,
2751 	    qi.tqi_burstTime));
2752 
2753 	if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2754 		arn_problem("unable to update hardware queue "
2755 		    "parameters for %s traffic!\n",
2756 		    ieee80211_wme_acnames[ac]);
2757 		return (0);
2758 	} else {
2759 		/* push to H/W */
2760 		(void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2761 		return (1);
2762 	}
2763 
2764 #undef ATH_TXOP_TO_US
2765 #undef ATH_EXPONENT_TO_VALUE
2766 }
2767 
2768 /* Update WME parameters */
2769 static int
2770 arn_wme_update(ieee80211com_t *ic)
2771 {
2772 	struct arn_softc *sc = (struct arn_softc *)ic;
2773 
2774 	/* updateing */
2775 	return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2776 	    !arn_tx_queue_update(sc, WME_AC_BK) ||
2777 	    !arn_tx_queue_update(sc, WME_AC_VI) ||
2778 	    !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2779 }
2780 
2781 /*
2782  * Update tx/rx chainmask. For legacy association,
2783  * hard code chainmask to 1x1, for 11n association, use
2784  * the chainmask configuration.
2785  */
2786 void
2787 arn_update_chainmask(struct arn_softc *sc)
2788 {
2789 	boolean_t is_ht = B_FALSE;
2790 	sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2791 
2792 	is_ht = sc->sc_ht_conf.ht_supported;
2793 	if (is_ht) {
2794 		sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2795 		sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2796 	} else {
2797 		sc->sc_tx_chainmask = 1;
2798 		sc->sc_rx_chainmask = 1;
2799 	}
2800 
2801 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2802 	    "tx_chainmask = %d, rx_chainmask = %d\n",
2803 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2804 }
2805 
2806 static int
2807 arn_resume(dev_info_t *devinfo)
2808 {
2809 	struct arn_softc *sc;
2810 	int ret = DDI_SUCCESS;
2811 
2812 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2813 	if (sc == NULL) {
2814 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2815 		    "failed to get soft state\n"));
2816 		return (DDI_FAILURE);
2817 	}
2818 
2819 	ARN_LOCK(sc);
2820 	/*
2821 	 * Set up config space command register(s). Refuse
2822 	 * to resume on failure.
2823 	 */
2824 	if (arn_pci_setup(sc) != 0) {
2825 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2826 		    "ath_pci_setup() failed\n"));
2827 		ARN_UNLOCK(sc);
2828 		return (DDI_FAILURE);
2829 	}
2830 
2831 	if (!(sc->sc_flags & SC_OP_INVALID))
2832 		ret = arn_open(sc);
2833 	ARN_UNLOCK(sc);
2834 
2835 	return (ret);
2836 }
2837 
2838 static int
2839 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2840 {
2841 	struct arn_softc *sc;
2842 	int		instance;
2843 	int		status;
2844 	int32_t		err;
2845 	uint16_t	vendor_id;
2846 	uint16_t	device_id;
2847 	uint32_t	i;
2848 	uint32_t	val;
2849 	char		strbuf[32];
2850 	ieee80211com_t *ic;
2851 	struct ath_hal *ah;
2852 	wifi_data_t wd = { 0 };
2853 	mac_register_t *macp;
2854 
2855 	switch (cmd) {
2856 	case DDI_ATTACH:
2857 		break;
2858 	case DDI_RESUME:
2859 		return (arn_resume(devinfo));
2860 	default:
2861 		return (DDI_FAILURE);
2862 	}
2863 
2864 	instance = ddi_get_instance(devinfo);
2865 	if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2866 		ARN_DBG((ARN_DBG_ATTACH, "arn: "
2867 		    "%s: Unable to alloc softstate\n", __func__));
2868 		return (DDI_FAILURE);
2869 	}
2870 
2871 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2872 	ic = (ieee80211com_t *)sc;
2873 	sc->sc_dev = devinfo;
2874 
2875 	mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2876 	mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2877 	mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2878 	mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2879 	mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2880 #ifdef ARN_IBSS
2881 	mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2882 #endif
2883 
2884 	sc->sc_flags |= SC_OP_INVALID;
2885 
2886 	err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2887 	if (err != DDI_SUCCESS) {
2888 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2889 		    "pci_config_setup() failed"));
2890 		goto attach_fail0;
2891 	}
2892 
2893 	if (arn_pci_setup(sc) != 0)
2894 		goto attach_fail1;
2895 
2896 	/* Cache line size set up */
2897 	arn_pci_config_cachesize(sc);
2898 
2899 	vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2900 	device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2901 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2902 	    "device id 0x%x, cache size %d\n",
2903 	    vendor_id, device_id,
2904 	    pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2905 
2906 	pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2907 	val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2908 	if ((val & 0x0000ff00) != 0)
2909 		pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2910 
2911 	err = ddi_regs_map_setup(devinfo, 1,
2912 	    &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2913 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2914 	    "regs map1 = %x err=%d\n", sc->mem, err));
2915 	if (err != DDI_SUCCESS) {
2916 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2917 		    "ddi_regs_map_setup() failed"));
2918 		goto attach_fail1;
2919 	}
2920 
2921 	ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2922 	if (ah == NULL) {
2923 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2924 		    "unable to attach hw: H/W status %u\n",
2925 		    status));
2926 		goto attach_fail2;
2927 	}
2928 	sc->sc_ah = ah;
2929 
2930 	ath9k_hw_getmac(ah, ic->ic_macaddr);
2931 
2932 	/* Get the hardware key cache size. */
2933 	sc->sc_keymax = ah->ah_caps.keycache_size;
2934 	if (sc->sc_keymax > ATH_KEYMAX) {
2935 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2936 		    "Warning, using only %u entries in %u key cache\n",
2937 		    ATH_KEYMAX, sc->sc_keymax));
2938 		sc->sc_keymax = ATH_KEYMAX;
2939 	}
2940 
2941 	/*
2942 	 * Reset the key cache since some parts do not
2943 	 * reset the contents on initial power up.
2944 	 */
2945 	for (i = 0; i < sc->sc_keymax; i++)
2946 		(void) ath9k_hw_keyreset(ah, (uint16_t)i);
2947 	/*
2948 	 * Mark key cache slots associated with global keys
2949 	 * as in use.  If we knew TKIP was not to be used we
2950 	 * could leave the +32, +64, and +32+64 slots free.
2951 	 * XXX only for splitmic.
2952 	 */
2953 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2954 		set_bit(i, sc->sc_keymap);
2955 		set_bit(i + 32, sc->sc_keymap);
2956 		set_bit(i + 64, sc->sc_keymap);
2957 		set_bit(i + 32 + 64, sc->sc_keymap);
2958 	}
2959 
2960 	/* Collect the channel list using the default country code */
2961 	err = arn_setup_channels(sc);
2962 	if (err == EINVAL) {
2963 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2964 		    "ERR:arn_setup_channels\n"));
2965 		goto attach_fail3;
2966 	}
2967 
2968 	/* default to STA mode */
2969 	sc->sc_ah->ah_opmode = ATH9K_M_STA;
2970 
2971 	/* Setup rate tables */
2972 	arn_rate_attach(sc);
2973 	arn_setup_rates(sc, IEEE80211_MODE_11A);
2974 	arn_setup_rates(sc, IEEE80211_MODE_11B);
2975 	arn_setup_rates(sc, IEEE80211_MODE_11G);
2976 
2977 	/* Setup current mode here */
2978 	arn_setcurmode(sc, ATH9K_MODE_11G);
2979 
2980 	/* 802.11g features */
2981 	if (sc->sc_have11g)
2982 		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2983 		    IEEE80211_C_SHSLOT;		/* short slot time */
2984 
2985 	/* Temp workaround */
2986 	sc->sc_mrretry = 1;
2987 	sc->sc_config.ath_aggr_prot = 0;
2988 
2989 	/* Setup tx/rx descriptors */
2990 	err = arn_desc_alloc(devinfo, sc);
2991 	if (err != DDI_SUCCESS) {
2992 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2993 		    "failed to allocate descriptors: %d\n", err));
2994 		goto attach_fail3;
2995 	}
2996 
2997 	if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2998 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2999 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3000 		    "ERR:ddi_taskq_create\n"));
3001 		goto attach_fail4;
3002 	}
3003 
3004 	/*
3005 	 * Allocate hardware transmit queues: one queue for
3006 	 * beacon frames and one data queue for each QoS
3007 	 * priority.  Note that the hal handles reseting
3008 	 * these queues at the needed time.
3009 	 */
3010 #ifdef ARN_IBSS
3011 	sc->sc_beaconq = arn_beaconq_setup(ah);
3012 	if (sc->sc_beaconq == (-1)) {
3013 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3014 		    "unable to setup a beacon xmit queue\n"));
3015 		goto attach_fail4;
3016 	}
3017 #endif
3018 #ifdef ARN_HOSTAP
3019 	sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3020 	if (sc->sc_cabq == NULL) {
3021 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3022 		    "unable to setup CAB xmit queue\n"));
3023 		goto attach_fail4;
3024 	}
3025 
3026 	sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3027 	ath_cabq_update(sc);
3028 #endif
3029 
3030 	for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3031 		sc->sc_haltype2q[i] = -1;
3032 
3033 	/* Setup data queues */
3034 	/* NB: ensure BK queue is the lowest priority h/w queue */
3035 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3036 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3037 		    "unable to setup xmit queue for BK traffic\n"));
3038 		goto attach_fail4;
3039 	}
3040 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3041 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3042 		    "unable to setup xmit queue for BE traffic\n"));
3043 		goto attach_fail4;
3044 	}
3045 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3046 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3047 		    "unable to setup xmit queue for VI traffic\n"));
3048 		goto attach_fail4;
3049 	}
3050 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3051 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3052 		    "unable to setup xmit queue for VO traffic\n"));
3053 		goto attach_fail4;
3054 	}
3055 
3056 	/*
3057 	 * Initializes the noise floor to a reasonable default value.
3058 	 * Later on this will be updated during ANI processing.
3059 	 */
3060 
3061 	sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3062 
3063 
3064 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3065 	    ATH9K_CIPHER_TKIP, NULL)) {
3066 		/*
3067 		 * Whether we should enable h/w TKIP MIC.
3068 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3069 		 * report WMM capable, so it's always safe to turn on
3070 		 * TKIP MIC in this case.
3071 		 */
3072 		(void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3073 		    0, 1, NULL);
3074 	}
3075 
3076 	/* Get cipher releated capability information */
3077 	arn_get_hw_encap(sc);
3078 
3079 	/*
3080 	 * Check whether the separate key cache entries
3081 	 * are required to handle both tx+rx MIC keys.
3082 	 * With split mic keys the number of stations is limited
3083 	 * to 27 otherwise 59.
3084 	 */
3085 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3086 	    ATH9K_CIPHER_TKIP, NULL) &&
3087 	    ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3088 	    ATH9K_CIPHER_MIC, NULL) &&
3089 	    ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3090 	    0, NULL))
3091 		sc->sc_splitmic = 1;
3092 
3093 	/* turn on mcast key search if possible */
3094 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3095 		(void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3096 		    1, NULL);
3097 
3098 	sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3099 	sc->sc_config.txpowlimit_override = 0;
3100 
3101 	/* 11n Capabilities */
3102 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3103 		sc->sc_flags |= SC_OP_TXAGGR;
3104 		sc->sc_flags |= SC_OP_RXAGGR;
3105 		arn_setup_ht_cap(sc);
3106 		arn_overwrite_11n_rateset(sc);
3107 	}
3108 
3109 	sc->sc_tx_chainmask = 1;
3110 	sc->sc_rx_chainmask = 1;
3111 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3112 	    "tx_chainmask = %d, rx_chainmask = %d\n",
3113 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3114 
3115 	/* arn_update_chainmask(sc); */
3116 
3117 	(void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3118 	sc->sc_defant = ath9k_hw_getdefantenna(ah);
3119 
3120 	ath9k_hw_getmac(ah, sc->sc_myaddr);
3121 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3122 		ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3123 		ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3124 		(void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3125 	}
3126 
3127 	/* set default value to short slot time */
3128 	sc->sc_slottime = ATH9K_SLOT_TIME_9;
3129 	(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3130 
3131 	/* initialize beacon slots */
3132 	for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3133 		sc->sc_bslot[i] = ATH_IF_ID_ANY;
3134 
3135 	/* Save MISC configurations */
3136 	sc->sc_config.swBeaconProcess = 1;
3137 
3138 	/* Support QoS/WME */
3139 	ic->ic_caps |= IEEE80211_C_WME;
3140 	ic->ic_wme.wme_update = arn_wme_update;
3141 
3142 	/* Support 802.11n/HT */
3143 	if (sc->sc_ht_conf.ht_supported) {
3144 		ic->ic_htcaps =
3145 		    IEEE80211_HTCAP_CHWIDTH40 |
3146 		    IEEE80211_HTCAP_SHORTGI40 |
3147 		    IEEE80211_HTCAP_DSSSCCK40 |
3148 		    IEEE80211_HTCAP_MAXAMSDU_7935 |
3149 		    IEEE80211_HTC_HT |
3150 		    IEEE80211_HTC_AMSDU |
3151 		    IEEE80211_HTCAP_RXSTBC_2STREAM;
3152 
3153 #ifdef ARN_TX_AGGREGATION
3154 	ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3155 #endif
3156 	}
3157 
3158 	/* Header padding requested by driver */
3159 	ic->ic_flags |= IEEE80211_F_DATAPAD;
3160 	/* Support WPA/WPA2 */
3161 	ic->ic_caps |= IEEE80211_C_WPA;
3162 #if 0
3163 	ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3164 	ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3165 #endif
3166 	ic->ic_phytype = IEEE80211_T_HT;
3167 	ic->ic_opmode = IEEE80211_M_STA;
3168 	ic->ic_state = IEEE80211_S_INIT;
3169 	ic->ic_maxrssi = ARN_MAX_RSSI;
3170 	ic->ic_set_shortslot = arn_set_shortslot;
3171 	ic->ic_xmit = arn_tx;
3172 	ieee80211_attach(ic);
3173 
3174 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3175 	    "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3176 
3177 	/* different instance has different WPA door */
3178 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3179 	    ddi_driver_name(devinfo),
3180 	    ddi_get_instance(devinfo));
3181 
3182 	if (sc->sc_ht_conf.ht_supported) {
3183 		sc->sc_recv_action = ic->ic_recv_action;
3184 		ic->ic_recv_action = arn_ampdu_recv_action;
3185 		// sc->sc_send_action = ic->ic_send_action;
3186 		// ic->ic_send_action = arn_ampdu_send_action;
3187 
3188 		ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3189 		ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3190 		ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3191 	}
3192 
3193 	/* Override 80211 default routines */
3194 	sc->sc_newstate = ic->ic_newstate;
3195 	ic->ic_newstate = arn_newstate;
3196 #ifdef ARN_IBSS
3197 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3198 	ic->ic_recv_mgmt = arn_recv_mgmt;
3199 #endif
3200 	ic->ic_watchdog = arn_watchdog;
3201 	ic->ic_node_alloc = arn_node_alloc;
3202 	ic->ic_node_free = arn_node_free;
3203 	ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3204 	ic->ic_crypto.cs_key_delete = arn_key_delete;
3205 	ic->ic_crypto.cs_key_set = arn_key_set;
3206 
3207 	ieee80211_media_init(ic);
3208 
3209 	/*
3210 	 * initialize default tx key
3211 	 */
3212 	ic->ic_def_txkey = 0;
3213 
3214 	sc->sc_rx_pend = 0;
3215 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3216 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3217 	    &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3218 	if (err != DDI_SUCCESS) {
3219 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3220 		    "ddi_add_softintr() failed....\n"));
3221 		goto attach_fail5;
3222 	}
3223 
3224 	if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3225 	    != DDI_SUCCESS) {
3226 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3227 		    "Can not get iblock cookie for INT\n"));
3228 		goto attach_fail6;
3229 	}
3230 
3231 	if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3232 	    (caddr_t)sc) != DDI_SUCCESS) {
3233 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3234 		    "Can not set intr for ARN driver\n"));
3235 		goto attach_fail6;
3236 	}
3237 
3238 	/*
3239 	 * Provide initial settings for the WiFi plugin; whenever this
3240 	 * information changes, we need to call mac_plugindata_update()
3241 	 */
3242 	wd.wd_opmode = ic->ic_opmode;
3243 	wd.wd_secalloc = WIFI_SEC_NONE;
3244 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3245 
3246 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3247 	    "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3248 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3249 	    wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3250 	    wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3251 
3252 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3253 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3254 		    "MAC version mismatch\n"));
3255 		goto attach_fail7;
3256 	}
3257 
3258 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
3259 	macp->m_driver		= sc;
3260 	macp->m_dip		= devinfo;
3261 	macp->m_src_addr	= ic->ic_macaddr;
3262 	macp->m_callbacks	= &arn_m_callbacks;
3263 	macp->m_min_sdu		= 0;
3264 	macp->m_max_sdu		= IEEE80211_MTU;
3265 	macp->m_pdata		= &wd;
3266 	macp->m_pdata_size	= sizeof (wd);
3267 
3268 	err = mac_register(macp, &ic->ic_mach);
3269 	mac_free(macp);
3270 	if (err != 0) {
3271 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3272 		    "mac_register err %x\n", err));
3273 		goto attach_fail7;
3274 	}
3275 
3276 	/* Create minor node of type DDI_NT_NET_WIFI */
3277 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3278 	    ARN_NODENAME, instance);
3279 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3280 	    instance + 1, DDI_NT_NET_WIFI, 0);
3281 	if (err != DDI_SUCCESS)
3282 		ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3283 		    "Create minor node failed - %d\n", err));
3284 
3285 	/* Notify link is down now */
3286 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3287 
3288 	sc->sc_promisc = B_FALSE;
3289 	bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3290 	bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3291 
3292 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3293 	    "Atheros AR%s MAC/BB Rev:%x "
3294 	    "AR%s RF Rev:%x: mem=0x%lx\n",
3295 	    arn_mac_bb_name(ah->ah_macVersion),
3296 	    ah->ah_macRev,
3297 	    arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3298 	    ah->ah_phyRev,
3299 	    (unsigned long)sc->mem));
3300 
3301 	/* XXX: hardware will not be ready until arn_open() being called */
3302 	sc->sc_flags |= SC_OP_INVALID;
3303 	sc->sc_isrunning = 0;
3304 
3305 	return (DDI_SUCCESS);
3306 
3307 attach_fail7:
3308 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3309 attach_fail6:
3310 	ddi_remove_softintr(sc->sc_softint_id);
3311 attach_fail5:
3312 	(void) ieee80211_detach(ic);
3313 attach_fail4:
3314 	arn_desc_free(sc);
3315 	if (sc->sc_tq)
3316 		ddi_taskq_destroy(sc->sc_tq);
3317 attach_fail3:
3318 	ath9k_hw_detach(ah);
3319 attach_fail2:
3320 	ddi_regs_map_free(&sc->sc_io_handle);
3321 attach_fail1:
3322 	pci_config_teardown(&sc->sc_cfg_handle);
3323 attach_fail0:
3324 	sc->sc_flags |= SC_OP_INVALID;
3325 	/* cleanup tx queues */
3326 	mutex_destroy(&sc->sc_txbuflock);
3327 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3328 		if (ARN_TXQ_SETUP(sc, i)) {
3329 			/* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3330 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3331 		}
3332 	}
3333 	mutex_destroy(&sc->sc_rxbuflock);
3334 	mutex_destroy(&sc->sc_serial_rw);
3335 	mutex_destroy(&sc->sc_genlock);
3336 	mutex_destroy(&sc->sc_resched_lock);
3337 #ifdef ARN_IBSS
3338 	mutex_destroy(&sc->sc_bcbuflock);
3339 #endif
3340 
3341 	ddi_soft_state_free(arn_soft_state_p, instance);
3342 
3343 	return (DDI_FAILURE);
3344 
3345 }
3346 
3347 /*
3348  * Suspend transmit/receive for powerdown
3349  */
3350 static int
3351 arn_suspend(struct arn_softc *sc)
3352 {
3353 	ARN_LOCK(sc);
3354 	arn_close(sc);
3355 	ARN_UNLOCK(sc);
3356 
3357 	return (DDI_SUCCESS);
3358 }
3359 
3360 static int32_t
3361 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3362 {
3363 	struct arn_softc *sc;
3364 	int i;
3365 
3366 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3367 	ASSERT(sc != NULL);
3368 
3369 	switch (cmd) {
3370 	case DDI_DETACH:
3371 		break;
3372 
3373 	case DDI_SUSPEND:
3374 		return (arn_suspend(sc));
3375 
3376 	default:
3377 		return (DDI_FAILURE);
3378 	}
3379 
3380 	if (mac_disable(sc->sc_isc.ic_mach) != 0)
3381 		return (DDI_FAILURE);
3382 
3383 	arn_stop_scantimer(sc);
3384 	arn_stop_caltimer(sc);
3385 
3386 	/* disable interrupts */
3387 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3388 
3389 	/*
3390 	 * Unregister from the MAC layer subsystem
3391 	 */
3392 	(void) mac_unregister(sc->sc_isc.ic_mach);
3393 
3394 	/* free intterrupt resources */
3395 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3396 	ddi_remove_softintr(sc->sc_softint_id);
3397 
3398 	/*
3399 	 * NB: the order of these is important:
3400 	 * o call the 802.11 layer before detaching the hal to
3401 	 *   insure callbacks into the driver to delete global
3402 	 *   key cache entries can be handled
3403 	 * o reclaim the tx queue data structures after calling
3404 	 *   the 802.11 layer as we'll get called back to reclaim
3405 	 *   node state and potentially want to use them
3406 	 * o to cleanup the tx queues the hal is called, so detach
3407 	 *   it last
3408 	 */
3409 	ieee80211_detach(&sc->sc_isc);
3410 
3411 	arn_desc_free(sc);
3412 
3413 	ddi_taskq_destroy(sc->sc_tq);
3414 
3415 	if (!(sc->sc_flags & SC_OP_INVALID))
3416 		(void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3417 
3418 	/* cleanup tx queues */
3419 	mutex_destroy(&sc->sc_txbuflock);
3420 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3421 		if (ARN_TXQ_SETUP(sc, i)) {
3422 			arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3423 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3424 		}
3425 	}
3426 
3427 	ath9k_hw_detach(sc->sc_ah);
3428 
3429 	/* free io handle */
3430 	ddi_regs_map_free(&sc->sc_io_handle);
3431 	pci_config_teardown(&sc->sc_cfg_handle);
3432 
3433 	/* destroy locks */
3434 	mutex_destroy(&sc->sc_genlock);
3435 	mutex_destroy(&sc->sc_serial_rw);
3436 	mutex_destroy(&sc->sc_rxbuflock);
3437 	mutex_destroy(&sc->sc_resched_lock);
3438 #ifdef ARN_IBSS
3439 	mutex_destroy(&sc->sc_bcbuflock);
3440 #endif
3441 
3442 	ddi_remove_minor_node(devinfo, NULL);
3443 	ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3444 
3445 	return (DDI_SUCCESS);
3446 }
3447 
3448 /*
3449  * quiesce(9E) entry point.
3450  *
3451  * This function is called when the system is single-threaded at high
3452  * PIL with preemption disabled. Therefore, this function must not be
3453  * blocked.
3454  *
3455  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3456  * DDI_FAILURE indicates an error condition and should almost never happen.
3457  */
3458 static int32_t
3459 arn_quiesce(dev_info_t *devinfo)
3460 {
3461 	struct arn_softc *sc;
3462 	int i;
3463 	struct ath_hal *ah;
3464 
3465 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3466 
3467 	if (sc == NULL || (ah = sc->sc_ah) == NULL)
3468 		return (DDI_FAILURE);
3469 
3470 	/*
3471 	 * Disable interrupts
3472 	 */
3473 	(void) ath9k_hw_set_interrupts(ah, 0);
3474 
3475 	/*
3476 	 * Disable TX HW
3477 	 */
3478 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3479 		if (ARN_TXQ_SETUP(sc, i))
3480 			(void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3481 	}
3482 
3483 	/*
3484 	 * Disable RX HW
3485 	 */
3486 	ath9k_hw_stoppcurecv(ah);
3487 	ath9k_hw_setrxfilter(ah, 0);
3488 	(void) ath9k_hw_stopdmarecv(ah);
3489 	drv_usecwait(3000);
3490 
3491 	/*
3492 	 * Power down HW
3493 	 */
3494 	(void) ath9k_hw_phy_disable(ah);
3495 
3496 	return (DDI_SUCCESS);
3497 }
3498 
3499 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3500     nodev, NULL, D_MP, NULL, arn_quiesce);
3501 
3502 static struct modldrv arn_modldrv = {
3503 	&mod_driverops, /* Type of module.  This one is a driver */
3504 	"arn-Atheros 9000 series driver:2.0", /* short description */
3505 	&arn_dev_ops /* driver specific ops */
3506 };
3507 
3508 static struct modlinkage modlinkage = {
3509 	MODREV_1, (void *)&arn_modldrv, NULL
3510 };
3511 
3512 int
3513 _info(struct modinfo *modinfop)
3514 {
3515 	return (mod_info(&modlinkage, modinfop));
3516 }
3517 
3518 int
3519 _init(void)
3520 {
3521 	int status;
3522 
3523 	status = ddi_soft_state_init
3524 	    (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3525 	if (status != 0)
3526 		return (status);
3527 
3528 	mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3529 	mac_init_ops(&arn_dev_ops, "arn");
3530 	status = mod_install(&modlinkage);
3531 	if (status != 0) {
3532 		mac_fini_ops(&arn_dev_ops);
3533 		mutex_destroy(&arn_loglock);
3534 		ddi_soft_state_fini(&arn_soft_state_p);
3535 	}
3536 
3537 	return (status);
3538 }
3539 
3540 int
3541 _fini(void)
3542 {
3543 	int status;
3544 
3545 	status = mod_remove(&modlinkage);
3546 	if (status == 0) {
3547 		mac_fini_ops(&arn_dev_ops);
3548 		mutex_destroy(&arn_loglock);
3549 		ddi_soft_state_fini(&arn_soft_state_p);
3550 	}
3551 	return (status);
3552 }
3553