xref: /titanic_41/usr/src/uts/common/io/arn/arn_main.c (revision 0a47c91c895e274dd0990009919e30e984364a8b)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/param.h>
23 #include <sys/types.h>
24 #include <sys/signal.h>
25 #include <sys/stream.h>
26 #include <sys/termio.h>
27 #include <sys/errno.h>
28 #include <sys/file.h>
29 #include <sys/cmn_err.h>
30 #include <sys/stropts.h>
31 #include <sys/strsubr.h>
32 #include <sys/strtty.h>
33 #include <sys/kbio.h>
34 #include <sys/cred.h>
35 #include <sys/stat.h>
36 #include <sys/consdev.h>
37 #include <sys/kmem.h>
38 #include <sys/modctl.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/pci.h>
42 #include <sys/errno.h>
43 #include <sys/mac_provider.h>
44 #include <sys/dlpi.h>
45 #include <sys/ethernet.h>
46 #include <sys/list.h>
47 #include <sys/byteorder.h>
48 #include <sys/strsun.h>
49 #include <sys/policy.h>
50 #include <inet/common.h>
51 #include <inet/nd.h>
52 #include <inet/mi.h>
53 #include <inet/wifi_ioctl.h>
54 #include <sys/mac_wifi.h>
55 #include <sys/net80211.h>
56 #include <sys/net80211_proto.h>
57 #include <sys/net80211_ht.h>
58 
59 
60 #include "arn_ath9k.h"
61 #include "arn_core.h"
62 #include "arn_reg.h"
63 #include "arn_hw.h"
64 
65 #define	ARN_MAX_RSSI	45	/* max rssi */
66 
67 /*
68  * Default 11n reates supported by this station.
69  */
70 extern struct ieee80211_htrateset ieee80211_rateset_11n;
71 
72 /*
73  * PIO access attributes for registers
74  */
75 static ddi_device_acc_attr_t arn_reg_accattr = {
76 	DDI_DEVICE_ATTR_V0,
77 	DDI_STRUCTURE_LE_ACC,
78 	DDI_STRICTORDER_ACC,
79 	DDI_DEFAULT_ACC
80 };
81 
82 /*
83  * DMA access attributes for descriptors: NOT to be byte swapped.
84  */
85 static ddi_device_acc_attr_t arn_desc_accattr = {
86 	DDI_DEVICE_ATTR_V0,
87 	DDI_STRUCTURE_LE_ACC,
88 	DDI_STRICTORDER_ACC,
89 	DDI_DEFAULT_ACC
90 };
91 
92 /*
93  * Describes the chip's DMA engine
94  */
95 static ddi_dma_attr_t arn_dma_attr = {
96 	DMA_ATTR_V0,	/* version number */
97 	0,				/* low address */
98 	0xffffffffU,	/* high address */
99 	0x3ffffU,		/* counter register max */
100 	1,				/* alignment */
101 	0xFFF,			/* burst sizes */
102 	1,				/* minimum transfer size */
103 	0x3ffffU,		/* max transfer size */
104 	0xffffffffU,	/* address register max */
105 	1,				/* no scatter-gather */
106 	1,				/* granularity of device */
107 	0,				/* DMA flags */
108 };
109 
110 static ddi_dma_attr_t arn_desc_dma_attr = {
111 	DMA_ATTR_V0,	/* version number */
112 	0,				/* low address */
113 	0xffffffffU,	/* high address */
114 	0xffffffffU,	/* counter register max */
115 	0x1000,			/* alignment */
116 	0xFFF,			/* burst sizes */
117 	1,				/* minimum transfer size */
118 	0xffffffffU,	/* max transfer size */
119 	0xffffffffU,	/* address register max */
120 	1,				/* no scatter-gather */
121 	1,				/* granularity of device */
122 	0,				/* DMA flags */
123 };
124 
125 #define	ATH_DEF_CACHE_BYTES	32 /* default cache line size */
126 
127 static kmutex_t arn_loglock;
128 static void *arn_soft_state_p = NULL;
129 static int arn_dwelltime = 200; /* scan interval */
130 
131 static int	arn_m_stat(void *,  uint_t, uint64_t *);
132 static int	arn_m_start(void *);
133 static void	arn_m_stop(void *);
134 static int	arn_m_promisc(void *, boolean_t);
135 static int	arn_m_multicst(void *, boolean_t, const uint8_t *);
136 static int	arn_m_unicst(void *, const uint8_t *);
137 static mblk_t	*arn_m_tx(void *, mblk_t *);
138 static void	arn_m_ioctl(void *, queue_t *, mblk_t *);
139 static int	arn_m_setprop(void *, const char *, mac_prop_id_t,
140     uint_t, const void *);
141 static int	arn_m_getprop(void *, const char *, mac_prop_id_t,
142     uint_t, void *);
143 static void	arn_m_propinfo(void *, const char *, mac_prop_id_t,
144     mac_prop_info_handle_t);
145 
146 /* MAC Callcack Functions */
147 static mac_callbacks_t arn_m_callbacks = {
148 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
149 	arn_m_stat,
150 	arn_m_start,
151 	arn_m_stop,
152 	arn_m_promisc,
153 	arn_m_multicst,
154 	arn_m_unicst,
155 	arn_m_tx,
156 	NULL,
157 	arn_m_ioctl,
158 	NULL,
159 	NULL,
160 	NULL,
161 	arn_m_setprop,
162 	arn_m_getprop,
163 	arn_m_propinfo
164 };
165 
166 /*
167  * ARN_DBG_HW
168  * ARN_DBG_REG_IO
169  * ARN_DBG_QUEUE
170  * ARN_DBG_EEPROM
171  * ARN_DBG_XMIT
172  * ARN_DBG_RECV
173  * ARN_DBG_CALIBRATE
174  * ARN_DBG_CHANNEL
175  * ARN_DBG_INTERRUPT
176  * ARN_DBG_REGULATORY
177  * ARN_DBG_ANI
178  * ARN_DBG_POWER_MGMT
179  * ARN_DBG_KEYCACHE
180  * ARN_DBG_BEACON
181  * ARN_DBG_RATE
182  * ARN_DBG_INIT
183  * ARN_DBG_ATTACH
184  * ARN_DBG_DEATCH
185  * ARN_DBG_AGGR
186  * ARN_DBG_RESET
187  * ARN_DBG_FATAL
188  * ARN_DBG_ANY
189  * ARN_DBG_ALL
190  */
191 uint32_t arn_dbg_mask = 0;
192 
193 /*
194  * Exception/warning cases not leading to panic.
195  */
196 void
197 arn_problem(const int8_t *fmt, ...)
198 {
199 	va_list args;
200 
201 	mutex_enter(&arn_loglock);
202 
203 	va_start(args, fmt);
204 	vcmn_err(CE_WARN, fmt, args);
205 	va_end(args);
206 
207 	mutex_exit(&arn_loglock);
208 }
209 
210 /*
211  * Normal log information independent of debug.
212  */
213 void
214 arn_log(const int8_t *fmt, ...)
215 {
216 	va_list args;
217 
218 	mutex_enter(&arn_loglock);
219 
220 	va_start(args, fmt);
221 	vcmn_err(CE_CONT, fmt, args);
222 	va_end(args);
223 
224 	mutex_exit(&arn_loglock);
225 }
226 
227 void
228 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
229 {
230 	va_list args;
231 
232 	if (dbg_flags & arn_dbg_mask) {
233 		mutex_enter(&arn_loglock);
234 		va_start(args, fmt);
235 		vcmn_err(CE_CONT, fmt, args);
236 		va_end(args);
237 		mutex_exit(&arn_loglock);
238 	}
239 }
240 
241 /*
242  * Read and write, they both share the same lock. We do this to serialize
243  * reads and writes on Atheros 802.11n PCI devices only. This is required
244  * as the FIFO on these devices can only accept sanely 2 requests. After
245  * that the device goes bananas. Serializing the reads/writes prevents this
246  * from happening.
247  */
248 void
249 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
250 {
251 	struct arn_softc *sc = ah->ah_sc;
252 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
253 		mutex_enter(&sc->sc_serial_rw);
254 		ddi_put32(sc->sc_io_handle,
255 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
256 		mutex_exit(&sc->sc_serial_rw);
257 	} else {
258 		ddi_put32(sc->sc_io_handle,
259 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
260 	}
261 }
262 
263 unsigned int
264 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
265 {
266 	uint32_t val;
267 	struct arn_softc *sc = ah->ah_sc;
268 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
269 		mutex_enter(&sc->sc_serial_rw);
270 		val = ddi_get32(sc->sc_io_handle,
271 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
272 		mutex_exit(&sc->sc_serial_rw);
273 	} else {
274 		val = ddi_get32(sc->sc_io_handle,
275 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
276 	}
277 
278 	return (val);
279 }
280 
281 /*
282  * Allocate an area of memory and a DMA handle for accessing it
283  */
284 static int
285 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
286     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
287     uint_t bind_flags, dma_area_t *dma_p)
288 {
289 	int err;
290 
291 	/*
292 	 * Allocate handle
293 	 */
294 	err = ddi_dma_alloc_handle(devinfo, dma_attr,
295 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
296 	if (err != DDI_SUCCESS)
297 		return (DDI_FAILURE);
298 
299 	/*
300 	 * Allocate memory
301 	 */
302 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
303 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
304 	    &dma_p->alength, &dma_p->acc_hdl);
305 	if (err != DDI_SUCCESS)
306 		return (DDI_FAILURE);
307 
308 	/*
309 	 * Bind the two together
310 	 */
311 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
312 	    dma_p->mem_va, dma_p->alength, bind_flags,
313 	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
314 	if (err != DDI_DMA_MAPPED)
315 		return (DDI_FAILURE);
316 
317 	dma_p->nslots = ~0U;
318 	dma_p->size = ~0U;
319 	dma_p->token = ~0U;
320 	dma_p->offset = 0;
321 	return (DDI_SUCCESS);
322 }
323 
324 /*
325  * Free one allocated area of DMAable memory
326  */
327 static void
328 arn_free_dma_mem(dma_area_t *dma_p)
329 {
330 	if (dma_p->dma_hdl != NULL) {
331 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
332 		if (dma_p->acc_hdl != NULL) {
333 			ddi_dma_mem_free(&dma_p->acc_hdl);
334 			dma_p->acc_hdl = NULL;
335 		}
336 		ddi_dma_free_handle(&dma_p->dma_hdl);
337 		dma_p->ncookies = 0;
338 		dma_p->dma_hdl = NULL;
339 	}
340 }
341 
342 /*
343  * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
344  * each buffer.
345  */
346 static int
347 arn_buflist_setup(dev_info_t *devinfo,
348     struct arn_softc *sc,
349     list_t *bflist,
350     struct ath_buf **pbf,
351     struct ath_desc **pds,
352     int nbuf,
353     uint_t dmabflags,
354     uint32_t buflen)
355 {
356 	int i, err;
357 	struct ath_buf *bf = *pbf;
358 	struct ath_desc *ds = *pds;
359 
360 	list_create(bflist, sizeof (struct ath_buf),
361 	    offsetof(struct ath_buf, bf_node));
362 	for (i = 0; i < nbuf; i++, bf++, ds++) {
363 		bf->bf_desc = ds;
364 		bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
365 		    ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
366 		list_insert_tail(bflist, bf);
367 
368 		/* alloc DMA memory */
369 		err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
370 		    buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
371 		    dmabflags, &bf->bf_dma);
372 		if (err != DDI_SUCCESS)
373 			return (err);
374 	}
375 	*pbf = bf;
376 	*pds = ds;
377 
378 	return (DDI_SUCCESS);
379 }
380 
381 /*
382  * Destroy tx, rx or beacon buffer list. Free DMA memory.
383  */
384 static void
385 arn_buflist_cleanup(list_t *buflist)
386 {
387 	struct ath_buf *bf;
388 
389 	if (!buflist)
390 		return;
391 
392 	bf = list_head(buflist);
393 	while (bf != NULL) {
394 		if (bf->bf_m != NULL) {
395 			freemsg(bf->bf_m);
396 			bf->bf_m = NULL;
397 		}
398 		/* Free DMA buffer */
399 		arn_free_dma_mem(&bf->bf_dma);
400 		if (bf->bf_in != NULL) {
401 			ieee80211_free_node(bf->bf_in);
402 			bf->bf_in = NULL;
403 		}
404 		list_remove(buflist, bf);
405 		bf = list_head(buflist);
406 	}
407 	list_destroy(buflist);
408 }
409 
410 static void
411 arn_desc_free(struct arn_softc *sc)
412 {
413 	arn_buflist_cleanup(&sc->sc_txbuf_list);
414 	arn_buflist_cleanup(&sc->sc_rxbuf_list);
415 #ifdef ARN_IBSS
416 	arn_buflist_cleanup(&sc->sc_bcbuf_list);
417 #endif
418 
419 	/* Free descriptor DMA buffer */
420 	arn_free_dma_mem(&sc->sc_desc_dma);
421 
422 	kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
423 	sc->sc_vbufptr = NULL;
424 }
425 
426 static int
427 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
428 {
429 	int err;
430 	size_t size;
431 	struct ath_desc *ds;
432 	struct ath_buf *bf;
433 
434 #ifdef ARN_IBSS
435 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
436 #else
437 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
438 #endif
439 
440 	err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
441 	    &arn_desc_accattr, DDI_DMA_CONSISTENT,
442 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
443 
444 	/* virtual address of the first descriptor */
445 	sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
446 
447 	ds = sc->sc_desc;
448 	ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
449 	    "%p (%d) -> %p\n",
450 	    sc->sc_desc, sc->sc_desc_dma.alength,
451 	    sc->sc_desc_dma.cookie.dmac_address));
452 
453 	/* allocate data structures to describe TX/RX DMA buffers */
454 #ifdef ARN_IBSS
455 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
456 	    ATH_BCBUF);
457 #else
458 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
459 #endif
460 	bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
461 	sc->sc_vbufptr = bf;
462 
463 	/* DMA buffer size for each TX/RX packet */
464 #ifdef ARN_TX_AGGREGRATION
465 	sc->tx_dmabuf_size =
466 	    roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
467 	    min(sc->sc_cachelsz, (uint16_t)64));
468 #else
469 	sc->tx_dmabuf_size =
470 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
471 #endif
472 	sc->rx_dmabuf_size =
473 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
474 
475 	/* create RX buffer list */
476 	err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
477 	    ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
478 	if (err != DDI_SUCCESS) {
479 		arn_desc_free(sc);
480 		return (err);
481 	}
482 
483 	/* create TX buffer list */
484 	err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
485 	    ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
486 	if (err != DDI_SUCCESS) {
487 		arn_desc_free(sc);
488 		return (err);
489 	}
490 
491 	/* create beacon buffer list */
492 #ifdef ARN_IBSS
493 	err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
494 	    ATH_BCBUF, DDI_DMA_STREAMING);
495 	if (err != DDI_SUCCESS) {
496 		arn_desc_free(sc);
497 		return (err);
498 	}
499 #endif
500 
501 	return (DDI_SUCCESS);
502 }
503 
504 static struct ath_rate_table *
505 /* LINTED E_STATIC_UNUSED */
506 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
507 {
508 	struct ath_rate_table *rate_table = NULL;
509 
510 	switch (mode) {
511 	case IEEE80211_MODE_11A:
512 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
513 		break;
514 	case IEEE80211_MODE_11B:
515 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
516 		break;
517 	case IEEE80211_MODE_11G:
518 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
519 		break;
520 #ifdef ARB_11N
521 	case IEEE80211_MODE_11NA_HT20:
522 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
523 		break;
524 	case IEEE80211_MODE_11NG_HT20:
525 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
526 		break;
527 	case IEEE80211_MODE_11NA_HT40PLUS:
528 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
529 		break;
530 	case IEEE80211_MODE_11NA_HT40MINUS:
531 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
532 		break;
533 	case IEEE80211_MODE_11NG_HT40PLUS:
534 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
535 		break;
536 	case IEEE80211_MODE_11NG_HT40MINUS:
537 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
538 		break;
539 #endif
540 	default:
541 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
542 		    "invalid mode %u\n", mode));
543 		return (NULL);
544 	}
545 
546 	return (rate_table);
547 
548 }
549 
550 static void
551 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
552 {
553 	struct ath_rate_table *rt;
554 	int i;
555 
556 	for (i = 0; i < sizeof (sc->asc_rixmap); i++)
557 		sc->asc_rixmap[i] = 0xff;
558 
559 	rt = sc->hw_rate_table[mode];
560 	ASSERT(rt != NULL);
561 
562 	for (i = 0; i < rt->rate_cnt; i++)
563 		sc->asc_rixmap[rt->info[i].dot11rate &
564 		    IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
565 
566 	sc->sc_currates = rt;
567 	sc->sc_curmode = mode;
568 
569 	/*
570 	 * All protection frames are transmited at 2Mb/s for
571 	 * 11g, otherwise at 1Mb/s.
572 	 * XXX select protection rate index from rate table.
573 	 */
574 	sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
575 }
576 
577 static enum wireless_mode
578 arn_chan2mode(struct ath9k_channel *chan)
579 {
580 	if (chan->chanmode == CHANNEL_A)
581 		return (ATH9K_MODE_11A);
582 	else if (chan->chanmode == CHANNEL_G)
583 		return (ATH9K_MODE_11G);
584 	else if (chan->chanmode == CHANNEL_B)
585 		return (ATH9K_MODE_11B);
586 	else if (chan->chanmode == CHANNEL_A_HT20)
587 		return (ATH9K_MODE_11NA_HT20);
588 	else if (chan->chanmode == CHANNEL_G_HT20)
589 		return (ATH9K_MODE_11NG_HT20);
590 	else if (chan->chanmode == CHANNEL_A_HT40PLUS)
591 		return (ATH9K_MODE_11NA_HT40PLUS);
592 	else if (chan->chanmode == CHANNEL_A_HT40MINUS)
593 		return (ATH9K_MODE_11NA_HT40MINUS);
594 	else if (chan->chanmode == CHANNEL_G_HT40PLUS)
595 		return (ATH9K_MODE_11NG_HT40PLUS);
596 	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
597 		return (ATH9K_MODE_11NG_HT40MINUS);
598 
599 	return (ATH9K_MODE_11B);
600 }
601 
602 static void
603 arn_update_txpow(struct arn_softc *sc)
604 {
605 	struct ath_hal 	*ah = sc->sc_ah;
606 	uint32_t txpow;
607 
608 	if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
609 		(void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
610 		/* read back in case value is clamped */
611 		(void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
612 		sc->sc_curtxpow = (uint32_t)txpow;
613 	}
614 }
615 
616 uint8_t
617 parse_mpdudensity(uint8_t mpdudensity)
618 {
619 	/*
620 	 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
621 	 *   0 for no restriction
622 	 *   1 for 1/4 us
623 	 *   2 for 1/2 us
624 	 *   3 for 1 us
625 	 *   4 for 2 us
626 	 *   5 for 4 us
627 	 *   6 for 8 us
628 	 *   7 for 16 us
629 	 */
630 	switch (mpdudensity) {
631 	case 0:
632 		return (0);
633 	case 1:
634 	case 2:
635 	case 3:
636 		/*
637 		 * Our lower layer calculations limit our
638 		 * precision to 1 microsecond
639 		 */
640 		return (1);
641 	case 4:
642 		return (2);
643 	case 5:
644 		return (4);
645 	case 6:
646 		return (8);
647 	case 7:
648 		return (16);
649 	default:
650 		return (0);
651 	}
652 }
653 
654 static void
655 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
656 {
657 	int i, maxrates;
658 	struct ath_rate_table *rate_table = NULL;
659 	struct ieee80211_rateset *rateset;
660 	ieee80211com_t *ic = (ieee80211com_t *)sc;
661 
662 	/* rate_table = arn_get_ratetable(sc, mode); */
663 	switch (mode) {
664 	case IEEE80211_MODE_11A:
665 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
666 		break;
667 	case IEEE80211_MODE_11B:
668 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
669 		break;
670 	case IEEE80211_MODE_11G:
671 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
672 		break;
673 #ifdef ARN_11N
674 	case IEEE80211_MODE_11NA_HT20:
675 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
676 		break;
677 	case IEEE80211_MODE_11NG_HT20:
678 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
679 		break;
680 	case IEEE80211_MODE_11NA_HT40PLUS:
681 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
682 		break;
683 	case IEEE80211_MODE_11NA_HT40MINUS:
684 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
685 		break;
686 	case IEEE80211_MODE_11NG_HT40PLUS:
687 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
688 		break;
689 	case IEEE80211_MODE_11NG_HT40MINUS:
690 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
691 		break;
692 #endif
693 	default:
694 		ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
695 		    "invalid mode %u\n", mode));
696 		break;
697 	}
698 	if (rate_table == NULL)
699 		return;
700 	if (rate_table->rate_cnt > ATH_RATE_MAX) {
701 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
702 		    "rate table too small (%u > %u)\n",
703 		    rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
704 		maxrates = ATH_RATE_MAX;
705 	} else
706 		maxrates = rate_table->rate_cnt;
707 
708 	ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
709 	    "maxrates is %d\n", maxrates));
710 
711 	rateset = &ic->ic_sup_rates[mode];
712 	for (i = 0; i < maxrates; i++) {
713 		rateset->ir_rates[i] = rate_table->info[i].dot11rate;
714 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
715 		    "%d\n", rate_table->info[i].dot11rate));
716 	}
717 	rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
718 }
719 
720 static int
721 arn_setup_channels(struct arn_softc *sc)
722 {
723 	struct ath_hal *ah = sc->sc_ah;
724 	ieee80211com_t *ic = (ieee80211com_t *)sc;
725 	int nchan, i, index;
726 	uint8_t regclassids[ATH_REGCLASSIDS_MAX];
727 	uint32_t nregclass = 0;
728 	struct ath9k_channel *c;
729 
730 	/* Fill in ah->ah_channels */
731 	if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
732 	    regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
733 	    B_FALSE, 1)) {
734 		uint32_t rd = ah->ah_currentRD;
735 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
736 		    "unable to collect channel list; "
737 		    "regdomain likely %u country code %u\n",
738 		    rd, CTRY_DEFAULT));
739 		return (EINVAL);
740 	}
741 
742 	ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
743 	    "number of channel is %d\n", nchan));
744 
745 	for (i = 0; i < nchan; i++) {
746 		c = &ah->ah_channels[i];
747 		uint32_t flags;
748 		index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
749 
750 		if (index > IEEE80211_CHAN_MAX) {
751 			ARN_DBG((ARN_DBG_CHANNEL,
752 			    "arn: arn_setup_channels(): "
753 			    "bad hal channel %d (%u/%x) ignored\n",
754 			    index, c->channel, c->channelFlags));
755 			continue;
756 		}
757 		/* NB: flags are known to be compatible */
758 		if (index < 0) {
759 			/*
760 			 * can't handle frequency <2400MHz (negative
761 			 * channels) right now
762 			 */
763 			ARN_DBG((ARN_DBG_CHANNEL,
764 			    "arn: arn_setup_channels(): "
765 			    "hal channel %d (%u/%x) "
766 			    "cannot be handled, ignored\n",
767 			    index, c->channel, c->channelFlags));
768 			continue;
769 		}
770 
771 		/*
772 		 * Calculate net80211 flags; most are compatible
773 		 * but some need massaging.  Note the static turbo
774 		 * conversion can be removed once net80211 is updated
775 		 * to understand static vs. dynamic turbo.
776 		 */
777 
778 		flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
779 
780 		if (ic->ic_sup_channels[index].ich_freq == 0) {
781 			ic->ic_sup_channels[index].ich_freq = c->channel;
782 			ic->ic_sup_channels[index].ich_flags = flags;
783 		} else {
784 			/* channels overlap; e.g. 11g and 11b */
785 			ic->ic_sup_channels[index].ich_flags |= flags;
786 		}
787 		if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
788 			sc->sc_have11g = 1;
789 			ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
790 			    IEEE80211_C_SHSLOT;	/* short slot time */
791 		}
792 	}
793 
794 	return (0);
795 }
796 
797 uint32_t
798 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
799 {
800 	uint32_t channel_mode;
801 	switch (ieee80211_chan2mode(isc, chan)) {
802 	case IEEE80211_MODE_11NA:
803 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
804 			channel_mode = CHANNEL_A_HT40PLUS;
805 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
806 			channel_mode = CHANNEL_A_HT40MINUS;
807 		else
808 			channel_mode = CHANNEL_A_HT20;
809 		break;
810 	case IEEE80211_MODE_11NG:
811 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
812 			channel_mode = CHANNEL_G_HT40PLUS;
813 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
814 			channel_mode = CHANNEL_G_HT40MINUS;
815 		else
816 			channel_mode = CHANNEL_G_HT20;
817 		break;
818 	case IEEE80211_MODE_TURBO_G:
819 	case IEEE80211_MODE_STURBO_A:
820 	case IEEE80211_MODE_TURBO_A:
821 		channel_mode = 0;
822 		break;
823 	case IEEE80211_MODE_11A:
824 		channel_mode = CHANNEL_A;
825 		break;
826 	case IEEE80211_MODE_11G:
827 		channel_mode = CHANNEL_B;
828 		break;
829 	case IEEE80211_MODE_11B:
830 		channel_mode = CHANNEL_G;
831 		break;
832 	case IEEE80211_MODE_FH:
833 		channel_mode = 0;
834 		break;
835 	default:
836 		break;
837 	}
838 
839 	return (channel_mode);
840 }
841 
842 /*
843  * Update internal state after a channel change.
844  */
845 void
846 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
847 {
848 	struct ieee80211com *ic = &sc->sc_isc;
849 	enum ieee80211_phymode mode;
850 	enum wireless_mode wlmode;
851 
852 	/*
853 	 * Change channels and update the h/w rate map
854 	 * if we're switching; e.g. 11a to 11b/g.
855 	 */
856 	mode = ieee80211_chan2mode(ic, chan);
857 	switch (mode) {
858 	case IEEE80211_MODE_11A:
859 		wlmode = ATH9K_MODE_11A;
860 		break;
861 	case IEEE80211_MODE_11B:
862 		wlmode = ATH9K_MODE_11B;
863 		break;
864 	case IEEE80211_MODE_11G:
865 		wlmode = ATH9K_MODE_11B;
866 		break;
867 	default:
868 		break;
869 	}
870 	if (wlmode != sc->sc_curmode)
871 		arn_setcurmode(sc, wlmode);
872 
873 }
874 
875 /*
876  * Set/change channels.  If the channel is really being changed, it's done
877  * by reseting the chip.  To accomplish this we must first cleanup any pending
878  * DMA, then restart stuff.
879  */
880 static int
881 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
882 {
883 	struct ath_hal *ah = sc->sc_ah;
884 	ieee80211com_t *ic = &sc->sc_isc;
885 	boolean_t fastcc = B_TRUE;
886 	boolean_t  stopped;
887 	struct ieee80211_channel chan;
888 	enum wireless_mode curmode;
889 
890 	if (sc->sc_flags & SC_OP_INVALID)
891 		return (EIO);
892 
893 	if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
894 	    hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
895 	    (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
896 	    (sc->sc_flags & SC_OP_FULL_RESET)) {
897 		int status;
898 
899 		/*
900 		 * This is only performed if the channel settings have
901 		 * actually changed.
902 		 *
903 		 * To switch channels clear any pending DMA operations;
904 		 * wait long enough for the RX fifo to drain, reset the
905 		 * hardware at the new frequency, and then re-enable
906 		 * the relevant bits of the h/w.
907 		 */
908 		(void) ath9k_hw_set_interrupts(ah, 0);	/* disable interrupts */
909 		arn_draintxq(sc, B_FALSE);	/* clear pending tx frames */
910 		stopped = arn_stoprecv(sc);	/* turn off frame recv */
911 
912 		/*
913 		 * XXX: do not flush receive queue here. We don't want
914 		 * to flush data frames already in queue because of
915 		 * changing channel.
916 		 */
917 
918 		if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
919 			fastcc = B_FALSE;
920 
921 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
922 		    "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
923 		    sc->sc_ah->ah_curchan->channel,
924 		    hchan->channel, hchan->channelFlags, sc->tx_chan_width));
925 
926 		if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
927 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
928 		    sc->sc_ht_extprotspacing, fastcc, &status)) {
929 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
930 			    "unable to reset channel %u (%uMhz) "
931 			    "flags 0x%x hal status %u\n",
932 			    ath9k_hw_mhz2ieee(ah, hchan->channel,
933 			    hchan->channelFlags),
934 			    hchan->channel, hchan->channelFlags, status));
935 			return (EIO);
936 		}
937 
938 		sc->sc_curchan = *hchan;
939 
940 		sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
941 		sc->sc_flags &= ~SC_OP_FULL_RESET;
942 
943 		if (arn_startrecv(sc) != 0) {
944 			arn_problem("arn: arn_set_channel(): "
945 			    "unable to restart recv logic\n");
946 			return (EIO);
947 		}
948 
949 		chan.ich_freq = hchan->channel;
950 		chan.ich_flags = hchan->channelFlags;
951 		ic->ic_ibss_chan = &chan;
952 
953 		/*
954 		 * Change channels and update the h/w rate map
955 		 * if we're switching; e.g. 11a to 11b/g.
956 		 */
957 		curmode = arn_chan2mode(hchan);
958 		if (curmode != sc->sc_curmode)
959 			arn_setcurmode(sc, arn_chan2mode(hchan));
960 
961 		arn_update_txpow(sc);
962 
963 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
964 	}
965 
966 	return (0);
967 }
968 
969 /*
970  *  This routine performs the periodic noise floor calibration function
971  *  that is used to adjust and optimize the chip performance.  This
972  *  takes environmental changes (location, temperature) into account.
973  *  When the task is complete, it reschedules itself depending on the
974  *  appropriate interval that was calculated.
975  */
976 static void
977 arn_ani_calibrate(void *arg)
978 
979 {
980 	ieee80211com_t *ic = (ieee80211com_t *)arg;
981 	struct arn_softc *sc = (struct arn_softc *)ic;
982 	struct ath_hal *ah = sc->sc_ah;
983 	boolean_t longcal = B_FALSE;
984 	boolean_t shortcal = B_FALSE;
985 	boolean_t aniflag = B_FALSE;
986 	unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
987 	uint32_t cal_interval;
988 
989 	/*
990 	 * don't calibrate when we're scanning.
991 	 * we are most likely not on our home channel.
992 	 */
993 	if (ic->ic_state != IEEE80211_S_RUN)
994 		goto settimer;
995 
996 	/* Long calibration runs independently of short calibration. */
997 	if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
998 		longcal = B_TRUE;
999 		ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1000 		    "%s: longcal @%lu\n", __func__, drv_hztousec));
1001 		sc->sc_ani.sc_longcal_timer = timestamp;
1002 	}
1003 
1004 	/* Short calibration applies only while sc_caldone is FALSE */
1005 	if (!sc->sc_ani.sc_caldone) {
1006 		if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1007 		    ATH_SHORT_CALINTERVAL) {
1008 			shortcal = B_TRUE;
1009 			ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1010 			    "%s: shortcal @%lu\n",
1011 			    __func__, drv_hztousec));
1012 			sc->sc_ani.sc_shortcal_timer = timestamp;
1013 			sc->sc_ani.sc_resetcal_timer = timestamp;
1014 		}
1015 	} else {
1016 		if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1017 		    ATH_RESTART_CALINTERVAL) {
1018 			ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1019 						&sc->sc_ani.sc_caldone);
1020 			if (sc->sc_ani.sc_caldone)
1021 				sc->sc_ani.sc_resetcal_timer = timestamp;
1022 		}
1023 	}
1024 
1025 	/* Verify whether we must check ANI */
1026 	if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1027 	    ATH_ANI_POLLINTERVAL) {
1028 		aniflag = B_TRUE;
1029 		sc->sc_ani.sc_checkani_timer = timestamp;
1030 	}
1031 
1032 	/* Skip all processing if there's nothing to do. */
1033 	if (longcal || shortcal || aniflag) {
1034 		/* Call ANI routine if necessary */
1035 		if (aniflag)
1036 			ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1037 			    ah->ah_curchan);
1038 
1039 		/* Perform calibration if necessary */
1040 		if (longcal || shortcal) {
1041 			boolean_t iscaldone = B_FALSE;
1042 
1043 			if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1044 			    sc->sc_rx_chainmask, longcal, &iscaldone)) {
1045 				if (longcal)
1046 					sc->sc_ani.sc_noise_floor =
1047 					    ath9k_hw_getchan_noise(ah,
1048 					    ah->ah_curchan);
1049 
1050 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1051 				    "%s: calibrate chan %u/%x nf: %d\n",
1052 				    __func__,
1053 				    ah->ah_curchan->channel,
1054 				    ah->ah_curchan->channelFlags,
1055 				    sc->sc_ani.sc_noise_floor));
1056 			} else {
1057 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1058 				    "%s: calibrate chan %u/%x failed\n",
1059 				    __func__,
1060 				    ah->ah_curchan->channel,
1061 				    ah->ah_curchan->channelFlags));
1062 			}
1063 			sc->sc_ani.sc_caldone = iscaldone;
1064 		}
1065 	}
1066 
1067 settimer:
1068 	/*
1069 	 * Set timer interval based on previous results.
1070 	 * The interval must be the shortest necessary to satisfy ANI,
1071 	 * short calibration and long calibration.
1072 	 */
1073 	cal_interval = ATH_LONG_CALINTERVAL;
1074 	if (sc->sc_ah->ah_config.enable_ani)
1075 		cal_interval =
1076 		    min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1077 
1078 	if (!sc->sc_ani.sc_caldone)
1079 		cal_interval = min(cal_interval,
1080 		    (uint32_t)ATH_SHORT_CALINTERVAL);
1081 
1082 	sc->sc_scan_timer = 0;
1083 	sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1084 	    drv_usectohz(cal_interval * 1000));
1085 }
1086 
1087 static void
1088 arn_stop_caltimer(struct arn_softc *sc)
1089 {
1090 	timeout_id_t tmp_id = 0;
1091 
1092 	while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1093 		tmp_id = sc->sc_cal_timer;
1094 		(void) untimeout(tmp_id);
1095 	}
1096 	sc->sc_cal_timer = 0;
1097 }
1098 
1099 static uint_t
1100 arn_isr(caddr_t arg)
1101 {
1102 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1103 	struct arn_softc *sc = (struct arn_softc *)arg;
1104 	struct ath_hal *ah = sc->sc_ah;
1105 	enum ath9k_int status;
1106 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1107 
1108 	ARN_LOCK(sc);
1109 
1110 	if (sc->sc_flags & SC_OP_INVALID) {
1111 		/*
1112 		 * The hardware is not ready/present, don't
1113 		 * touch anything. Note this can happen early
1114 		 * on if the IRQ is shared.
1115 		 */
1116 		ARN_UNLOCK(sc);
1117 		return (DDI_INTR_UNCLAIMED);
1118 	}
1119 	if (!ath9k_hw_intrpend(ah)) {	/* shared irq, not for us */
1120 		ARN_UNLOCK(sc);
1121 		return (DDI_INTR_UNCLAIMED);
1122 	}
1123 
1124 	/*
1125 	 * Figure out the reason(s) for the interrupt. Note
1126 	 * that the hal returns a pseudo-ISR that may include
1127 	 * bits we haven't explicitly enabled so we mask the
1128 	 * value to insure we only process bits we requested.
1129 	 */
1130 	(void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1131 
1132 	status &= sc->sc_imask; /* discard unasked-for bits */
1133 
1134 	/*
1135 	 * If there are no status bits set, then this interrupt was not
1136 	 * for me (should have been caught above).
1137 	 */
1138 	if (!status) {
1139 		ARN_UNLOCK(sc);
1140 		return (DDI_INTR_UNCLAIMED);
1141 	}
1142 
1143 	sc->sc_intrstatus = status;
1144 
1145 	if (status & ATH9K_INT_FATAL) {
1146 		/* need a chip reset */
1147 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1148 		    "ATH9K_INT_FATAL\n"));
1149 		goto reset;
1150 	} else if (status & ATH9K_INT_RXORN) {
1151 		/* need a chip reset */
1152 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1153 		    "ATH9K_INT_RXORN\n"));
1154 		goto reset;
1155 	} else {
1156 		if (status & ATH9K_INT_RXEOL) {
1157 			/*
1158 			 * NB: the hardware should re-read the link when
1159 			 * RXE bit is written, but it doesn't work
1160 			 * at least on older hardware revs.
1161 			 */
1162 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1163 			    "ATH9K_INT_RXEOL\n"));
1164 			sc->sc_rxlink = NULL;
1165 		}
1166 		if (status & ATH9K_INT_TXURN) {
1167 			/* bump tx trigger level */
1168 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1169 			    "ATH9K_INT_TXURN\n"));
1170 			(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1171 		}
1172 		/* XXX: optimize this */
1173 		if (status & ATH9K_INT_RX) {
1174 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1175 			    "ATH9K_INT_RX\n"));
1176 			sc->sc_rx_pend = 1;
1177 			ddi_trigger_softintr(sc->sc_softint_id);
1178 		}
1179 		if (status & ATH9K_INT_TX) {
1180 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1181 			    "ATH9K_INT_TX\n"));
1182 			if (ddi_taskq_dispatch(sc->sc_tq,
1183 			    arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1184 			    DDI_SUCCESS) {
1185 				arn_problem("arn: arn_isr(): "
1186 				    "No memory for tx taskq\n");
1187 				}
1188 			}
1189 #ifdef ARN_ATH9K_INT_MIB
1190 		if (status & ATH9K_INT_MIB) {
1191 			/*
1192 			 * Disable interrupts until we service the MIB
1193 			 * interrupt; otherwise it will continue to
1194 			 * fire.
1195 			 */
1196 			(void) ath9k_hw_set_interrupts(ah, 0);
1197 			/*
1198 			 * Let the hal handle the event. We assume
1199 			 * it will clear whatever condition caused
1200 			 * the interrupt.
1201 			 */
1202 			ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1203 			(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1204 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1205 			    "ATH9K_INT_MIB\n"));
1206 		}
1207 #endif
1208 
1209 #ifdef ARN_ATH9K_INT_TIM_TIMER
1210 		if (status & ATH9K_INT_TIM_TIMER) {
1211 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1212 			    "ATH9K_INT_TIM_TIMER\n"));
1213 			if (!(ah->ah_caps.hw_caps &
1214 			    ATH9K_HW_CAP_AUTOSLEEP)) {
1215 				/*
1216 				 * Clear RxAbort bit so that we can
1217 				 * receive frames
1218 				 */
1219 				ath9k_hw_setrxabort(ah, 0);
1220 				goto reset;
1221 			}
1222 		}
1223 #endif
1224 
1225 		if (status & ATH9K_INT_BMISS) {
1226 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1227 			    "ATH9K_INT_BMISS\n"));
1228 #ifdef ARN_HW_BEACON_MISS_HANDLE
1229 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1230 			    "handle beacon mmiss by H/W mechanism\n"));
1231 			if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1232 			    sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1233 				arn_problem("arn: arn_isr(): "
1234 				    "No memory available for bmiss taskq\n");
1235 			}
1236 #else
1237 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1238 			    "handle beacon mmiss by S/W mechanism\n"));
1239 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1240 		}
1241 
1242 		ARN_UNLOCK(sc);
1243 
1244 #ifdef ARN_ATH9K_INT_CST
1245 		/* carrier sense timeout */
1246 		if (status & ATH9K_INT_CST) {
1247 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1248 			    "ATH9K_INT_CST\n"));
1249 			return (DDI_INTR_CLAIMED);
1250 		}
1251 #endif
1252 
1253 		if (status & ATH9K_INT_SWBA) {
1254 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1255 			    "ATH9K_INT_SWBA\n"));
1256 			/* This will occur only in Host-AP or Ad-Hoc mode */
1257 			return (DDI_INTR_CLAIMED);
1258 		}
1259 	}
1260 
1261 	return (DDI_INTR_CLAIMED);
1262 reset:
1263 	ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1264 	(void) arn_reset(ic);
1265 	ARN_UNLOCK(sc);
1266 	return (DDI_INTR_CLAIMED);
1267 }
1268 
1269 static int
1270 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1271 {
1272 	int i;
1273 
1274 	for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1275 		if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1276 			return (i);
1277 	}
1278 
1279 	return (-1);
1280 }
1281 
1282 int
1283 arn_reset(ieee80211com_t *ic)
1284 {
1285 	struct arn_softc *sc = (struct arn_softc *)ic;
1286 	struct ath_hal *ah = sc->sc_ah;
1287 	int status;
1288 	int error = 0;
1289 
1290 	(void) ath9k_hw_set_interrupts(ah, 0);
1291 	arn_draintxq(sc, 0);
1292 	(void) arn_stoprecv(sc);
1293 
1294 	if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1295 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1296 	    sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1297 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1298 		    "unable to reset hardware; hal status %u\n", status));
1299 		error = EIO;
1300 	}
1301 
1302 	if (arn_startrecv(sc) != 0)
1303 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1304 		    "unable to start recv logic\n"));
1305 
1306 	/*
1307 	 * We may be doing a reset in response to a request
1308 	 * that changes the channel so update any state that
1309 	 * might change as a result.
1310 	 */
1311 	arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1312 
1313 	arn_update_txpow(sc);
1314 
1315 	if (sc->sc_flags & SC_OP_BEACONS)
1316 		arn_beacon_config(sc);	/* restart beacons */
1317 
1318 	(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1319 
1320 	return (error);
1321 }
1322 
1323 int
1324 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1325 {
1326 	int qnum;
1327 
1328 	switch (queue) {
1329 	case WME_AC_VO:
1330 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1331 		break;
1332 	case WME_AC_VI:
1333 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1334 		break;
1335 	case WME_AC_BE:
1336 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1337 		break;
1338 	case WME_AC_BK:
1339 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1340 		break;
1341 	default:
1342 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1343 		break;
1344 	}
1345 
1346 	return (qnum);
1347 }
1348 
1349 static struct {
1350 	uint32_t version;
1351 	const char *name;
1352 } ath_mac_bb_names[] = {
1353 	{ AR_SREV_VERSION_5416_PCI,	"5416" },
1354 	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
1355 	{ AR_SREV_VERSION_9100,		"9100" },
1356 	{ AR_SREV_VERSION_9160,		"9160" },
1357 	{ AR_SREV_VERSION_9280,		"9280" },
1358 	{ AR_SREV_VERSION_9285,		"9285" }
1359 };
1360 
1361 static struct {
1362 	uint16_t version;
1363 	const char *name;
1364 } ath_rf_names[] = {
1365 	{ 0,				"5133" },
1366 	{ AR_RAD5133_SREV_MAJOR,	"5133" },
1367 	{ AR_RAD5122_SREV_MAJOR,	"5122" },
1368 	{ AR_RAD2133_SREV_MAJOR,	"2133" },
1369 	{ AR_RAD2122_SREV_MAJOR,	"2122" }
1370 };
1371 
1372 /*
1373  * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1374  */
1375 
1376 static const char *
1377 arn_mac_bb_name(uint32_t mac_bb_version)
1378 {
1379 	int i;
1380 
1381 	for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1382 		if (ath_mac_bb_names[i].version == mac_bb_version) {
1383 			return (ath_mac_bb_names[i].name);
1384 		}
1385 	}
1386 
1387 	return ("????");
1388 }
1389 
1390 /*
1391  * Return the RF name. "????" is returned if the RF is unknown.
1392  */
1393 
1394 static const char *
1395 arn_rf_name(uint16_t rf_version)
1396 {
1397 	int i;
1398 
1399 	for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1400 		if (ath_rf_names[i].version == rf_version) {
1401 			return (ath_rf_names[i].name);
1402 		}
1403 	}
1404 
1405 	return ("????");
1406 }
1407 
1408 static void
1409 arn_next_scan(void *arg)
1410 {
1411 	ieee80211com_t *ic = arg;
1412 	struct arn_softc *sc = (struct arn_softc *)ic;
1413 
1414 	sc->sc_scan_timer = 0;
1415 	if (ic->ic_state == IEEE80211_S_SCAN) {
1416 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1417 		    drv_usectohz(arn_dwelltime * 1000));
1418 		ieee80211_next_scan(ic);
1419 	}
1420 }
1421 
1422 static void
1423 arn_stop_scantimer(struct arn_softc *sc)
1424 {
1425 	timeout_id_t tmp_id = 0;
1426 
1427 	while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1428 		tmp_id = sc->sc_scan_timer;
1429 		(void) untimeout(tmp_id);
1430 	}
1431 	sc->sc_scan_timer = 0;
1432 }
1433 
1434 static int32_t
1435 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1436 {
1437 	struct arn_softc *sc = (struct arn_softc *)ic;
1438 	struct ath_hal *ah = sc->sc_ah;
1439 	struct ieee80211_node *in;
1440 	int32_t i, error;
1441 	uint8_t *bssid;
1442 	uint32_t rfilt;
1443 	enum ieee80211_state ostate;
1444 	struct ath9k_channel *channel;
1445 	int pos;
1446 
1447 	/* Should set up & init LED here */
1448 
1449 	if (sc->sc_flags & SC_OP_INVALID)
1450 		return (0);
1451 
1452 	ostate = ic->ic_state;
1453 	ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1454 	    "%x -> %x!\n", ostate, nstate));
1455 
1456 	ARN_LOCK(sc);
1457 
1458 	if (nstate != IEEE80211_S_SCAN)
1459 		arn_stop_scantimer(sc);
1460 	if (nstate != IEEE80211_S_RUN)
1461 		arn_stop_caltimer(sc);
1462 
1463 	/* Should set LED here */
1464 
1465 	if (nstate == IEEE80211_S_INIT) {
1466 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1467 		/*
1468 		 * Disable interrupts.
1469 		 */
1470 		(void) ath9k_hw_set_interrupts
1471 		    (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1472 
1473 #ifdef ARN_IBSS
1474 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1475 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1476 			arn_beacon_return(sc);
1477 		}
1478 #endif
1479 		ARN_UNLOCK(sc);
1480 		ieee80211_stop_watchdog(ic);
1481 		goto done;
1482 	}
1483 	in = ic->ic_bss;
1484 
1485 	pos = arn_get_channel(sc, ic->ic_curchan);
1486 
1487 	if (pos == -1) {
1488 		ARN_DBG((ARN_DBG_FATAL, "arn: "
1489 		    "%s: Invalid channel\n", __func__));
1490 		error = EINVAL;
1491 		ARN_UNLOCK(sc);
1492 		goto bad;
1493 	}
1494 
1495 	if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1496 		arn_update_chainmask(sc);
1497 		sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1498 	} else
1499 		sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1500 
1501 	sc->sc_ah->ah_channels[pos].chanmode =
1502 	    arn_chan2flags(ic, ic->ic_curchan);
1503 	channel = &sc->sc_ah->ah_channels[pos];
1504 	if (channel == NULL) {
1505 		arn_problem("arn_newstate(): channel == NULL");
1506 		ARN_UNLOCK(sc);
1507 		goto bad;
1508 	}
1509 	error = arn_set_channel(sc, channel);
1510 	if (error != 0) {
1511 		if (nstate != IEEE80211_S_SCAN) {
1512 			ARN_UNLOCK(sc);
1513 			ieee80211_reset_chan(ic);
1514 			goto bad;
1515 		}
1516 	}
1517 
1518 	/*
1519 	 * Get the receive filter according to the
1520 	 * operating mode and state
1521 	 */
1522 	rfilt = arn_calcrxfilter(sc);
1523 
1524 	if (nstate == IEEE80211_S_SCAN)
1525 		bssid = ic->ic_macaddr;
1526 	else
1527 		bssid = in->in_bssid;
1528 
1529 	ath9k_hw_setrxfilter(ah, rfilt);
1530 
1531 	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1532 		ath9k_hw_write_associd(ah, bssid, in->in_associd);
1533 	else
1534 		ath9k_hw_write_associd(ah, bssid, 0);
1535 
1536 	/* Check for WLAN_CAPABILITY_PRIVACY ? */
1537 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1538 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1539 			if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1540 				(void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1541 				    bssid);
1542 		}
1543 	}
1544 
1545 	if (nstate == IEEE80211_S_RUN) {
1546 		switch (ic->ic_opmode) {
1547 #ifdef ARN_IBSS
1548 		case IEEE80211_M_IBSS:
1549 			/*
1550 			 * Allocate and setup the beacon frame.
1551 			 * Stop any previous beacon DMA.
1552 			 */
1553 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1554 			arn_beacon_return(sc);
1555 			error = arn_beacon_alloc(sc, in);
1556 			if (error != 0) {
1557 				ARN_UNLOCK(sc);
1558 				goto bad;
1559 			}
1560 			/*
1561 			 * If joining an adhoc network defer beacon timer
1562 			 * configuration to the next beacon frame so we
1563 			 * have a current TSF to use.  Otherwise we're
1564 			 * starting an ibss/bss so there's no need to delay.
1565 			 */
1566 			if (ic->ic_opmode == IEEE80211_M_IBSS &&
1567 			    ic->ic_bss->in_tstamp.tsf != 0) {
1568 				sc->sc_bsync = 1;
1569 			} else {
1570 				arn_beacon_config(sc);
1571 			}
1572 			break;
1573 #endif /* ARN_IBSS */
1574 		case IEEE80211_M_STA:
1575 			if (ostate != IEEE80211_S_RUN) {
1576 				/*
1577 				 * Defer beacon timer configuration to the next
1578 				 * beacon frame so we have a current TSF to use.
1579 				 * Any TSF collected when scanning is likely old
1580 				 */
1581 #ifdef ARN_IBSS
1582 				sc->sc_bsync = 1;
1583 #else
1584 				/* Configure the beacon and sleep timers. */
1585 				arn_beacon_config(sc);
1586 				/* Reset rssi stats */
1587 				sc->sc_halstats.ns_avgbrssi =
1588 				    ATH_RSSI_DUMMY_MARKER;
1589 				sc->sc_halstats.ns_avgrssi =
1590 				    ATH_RSSI_DUMMY_MARKER;
1591 				sc->sc_halstats.ns_avgtxrssi =
1592 				    ATH_RSSI_DUMMY_MARKER;
1593 				sc->sc_halstats.ns_avgtxrate =
1594 				    ATH_RATE_DUMMY_MARKER;
1595 /* end */
1596 
1597 #endif /* ARN_IBSS */
1598 			}
1599 			break;
1600 		default:
1601 			break;
1602 		}
1603 	} else {
1604 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1605 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1606 	}
1607 
1608 	/*
1609 	 * Reset the rate control state.
1610 	 */
1611 	arn_rate_ctl_reset(sc, nstate);
1612 
1613 	ARN_UNLOCK(sc);
1614 done:
1615 	/*
1616 	 * Invoke the parent method to complete the work.
1617 	 */
1618 	error = sc->sc_newstate(ic, nstate, arg);
1619 
1620 	/*
1621 	 * Finally, start any timers.
1622 	 */
1623 	if (nstate == IEEE80211_S_RUN) {
1624 		ieee80211_start_watchdog(ic, 1);
1625 		ASSERT(sc->sc_cal_timer == 0);
1626 		sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1627 		    drv_usectohz(100 * 1000));
1628 	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1629 		/* start ap/neighbor scan timer */
1630 		/* ASSERT(sc->sc_scan_timer == 0); */
1631 		if (sc->sc_scan_timer != 0) {
1632 			(void) untimeout(sc->sc_scan_timer);
1633 			sc->sc_scan_timer = 0;
1634 		}
1635 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1636 		    drv_usectohz(arn_dwelltime * 1000));
1637 	}
1638 
1639 bad:
1640 	return (error);
1641 }
1642 
1643 static void
1644 arn_watchdog(void *arg)
1645 {
1646 	struct arn_softc *sc = arg;
1647 	ieee80211com_t *ic = &sc->sc_isc;
1648 	int ntimer = 0;
1649 
1650 	ARN_LOCK(sc);
1651 	ic->ic_watchdog_timer = 0;
1652 	if (sc->sc_flags & SC_OP_INVALID) {
1653 		ARN_UNLOCK(sc);
1654 		return;
1655 	}
1656 
1657 	if (ic->ic_state == IEEE80211_S_RUN) {
1658 		/*
1659 		 * Start the background rate control thread if we
1660 		 * are not configured to use a fixed xmit rate.
1661 		 */
1662 #ifdef ARN_LEGACY_RC
1663 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1664 			sc->sc_stats.ast_rate_calls ++;
1665 			if (ic->ic_opmode == IEEE80211_M_STA)
1666 				arn_rate_ctl(ic, ic->ic_bss);
1667 			else
1668 				ieee80211_iterate_nodes(&ic->ic_sta,
1669 				    arn_rate_ctl, sc);
1670 		}
1671 #endif /* ARN_LEGACY_RC */
1672 
1673 #ifdef ARN_HW_BEACON_MISS_HANDLE
1674 	/* nothing to do here */
1675 #else
1676 	/* currently set 10 seconds as beacon miss threshold */
1677 	if (ic->ic_beaconmiss++ > 100) {
1678 		ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1679 		    "Beacon missed for 10 seconds, run"
1680 		    "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1681 		ARN_UNLOCK(sc);
1682 		(void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1683 		return;
1684 	}
1685 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1686 
1687 		ntimer = 1;
1688 	}
1689 	ARN_UNLOCK(sc);
1690 
1691 	ieee80211_watchdog(ic);
1692 	if (ntimer != 0)
1693 		ieee80211_start_watchdog(ic, ntimer);
1694 }
1695 
1696 /* ARGSUSED */
1697 static struct ieee80211_node *
1698 arn_node_alloc(ieee80211com_t *ic)
1699 {
1700 	struct ath_node *an;
1701 #ifdef ARN_TX_AGGREGATION
1702 	struct arn_softc *sc = (struct arn_softc *)ic;
1703 #endif
1704 
1705 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1706 
1707 	/* legacy rate control */
1708 #ifdef ARN_LEGACY_RC
1709 	arn_rate_update(sc, &an->an_node, 0);
1710 #endif
1711 
1712 #ifdef ARN_TX_AGGREGATION
1713 	if (sc->sc_flags & SC_OP_TXAGGR) {
1714 		arn_tx_node_init(sc, an);
1715 	}
1716 #endif /* ARN_TX_AGGREGATION */
1717 
1718 	an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1719 
1720 	return ((an != NULL) ? &an->an_node : NULL);
1721 }
1722 
1723 static void
1724 arn_node_free(struct ieee80211_node *in)
1725 {
1726 	ieee80211com_t *ic = in->in_ic;
1727 	struct arn_softc *sc = (struct arn_softc *)ic;
1728 	struct ath_buf *bf;
1729 	struct ath_txq *txq;
1730 	int32_t i;
1731 
1732 #ifdef ARN_TX_AGGREGATION
1733 	if (sc->sc_flags & SC_OP_TXAGGR)
1734 		arn_tx_node_cleanup(sc, in);
1735 #endif /* TX_AGGREGATION */
1736 
1737 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1738 		if (ARN_TXQ_SETUP(sc, i)) {
1739 			txq = &sc->sc_txq[i];
1740 			mutex_enter(&txq->axq_lock);
1741 			bf = list_head(&txq->axq_list);
1742 			while (bf != NULL) {
1743 				if (bf->bf_in == in) {
1744 					bf->bf_in = NULL;
1745 				}
1746 				bf = list_next(&txq->axq_list, bf);
1747 			}
1748 			mutex_exit(&txq->axq_lock);
1749 		}
1750 	}
1751 
1752 	ic->ic_node_cleanup(in);
1753 
1754 	if (in->in_wpa_ie != NULL)
1755 		ieee80211_free(in->in_wpa_ie);
1756 
1757 	if (in->in_wme_ie != NULL)
1758 		ieee80211_free(in->in_wme_ie);
1759 
1760 	if (in->in_htcap_ie != NULL)
1761 		ieee80211_free(in->in_htcap_ie);
1762 
1763 	kmem_free(in, sizeof (struct ath_node));
1764 }
1765 
1766 /*
1767  * Allocate tx/rx key slots for TKIP.  We allocate one slot for
1768  * each key. MIC is right after the decrypt/encrypt key.
1769  */
1770 static uint16_t
1771 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1772     ieee80211_keyix *rxkeyix)
1773 {
1774 	uint16_t i, keyix;
1775 
1776 	ASSERT(!sc->sc_splitmic);
1777 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1778 		uint8_t b = sc->sc_keymap[i];
1779 		if (b == 0xff)
1780 			continue;
1781 		for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1782 		    keyix++, b >>= 1) {
1783 			if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1784 				/* full pair unavailable */
1785 				continue;
1786 			}
1787 			set_bit(keyix, sc->sc_keymap);
1788 			set_bit(keyix+64, sc->sc_keymap);
1789 			ARN_DBG((ARN_DBG_KEYCACHE,
1790 			    "arn_key_alloc_pair(): key pair %u,%u\n",
1791 			    keyix, keyix+64));
1792 			*txkeyix = *rxkeyix = keyix;
1793 			return (1);
1794 		}
1795 	}
1796 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1797 	    " out of pair space\n"));
1798 
1799 	return (0);
1800 }
1801 
1802 /*
1803  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
1804  * each key, one for decrypt/encrypt and the other for the MIC.
1805  */
1806 static int
1807 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1808     ieee80211_keyix *rxkeyix)
1809 {
1810 	uint16_t i, keyix;
1811 
1812 	ASSERT(sc->sc_splitmic);
1813 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1814 		uint8_t b = sc->sc_keymap[i];
1815 		if (b != 0xff) {
1816 			/*
1817 			 * One or more slots in this byte are free.
1818 			 */
1819 			keyix = i*NBBY;
1820 			while (b & 1) {
1821 		again:
1822 				keyix++;
1823 				b >>= 1;
1824 			}
1825 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1826 			if (is_set(keyix+32, sc->sc_keymap) ||
1827 			    is_set(keyix+64, sc->sc_keymap) ||
1828 			    is_set(keyix+32+64, sc->sc_keymap)) {
1829 				/* full pair unavailable */
1830 				if (keyix == (i+1)*NBBY) {
1831 					/* no slots were appropriate, advance */
1832 					continue;
1833 				}
1834 				goto again;
1835 			}
1836 			set_bit(keyix, sc->sc_keymap);
1837 			set_bit(keyix+64, sc->sc_keymap);
1838 			set_bit(keyix+32, sc->sc_keymap);
1839 			set_bit(keyix+32+64, sc->sc_keymap);
1840 			ARN_DBG((ARN_DBG_KEYCACHE,
1841 			    "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1842 			    keyix, keyix+64,
1843 			    keyix+32, keyix+32+64));
1844 			*txkeyix = *rxkeyix = keyix;
1845 			return (1);
1846 		}
1847 	}
1848 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1849 	    " out of pair space\n"));
1850 
1851 	return (0);
1852 }
1853 /*
1854  * Allocate a single key cache slot.
1855  */
1856 static int
1857 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1858     ieee80211_keyix *rxkeyix)
1859 {
1860 	uint16_t i, keyix;
1861 
1862 	/* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1863 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1864 		uint8_t b = sc->sc_keymap[i];
1865 
1866 		if (b != 0xff) {
1867 			/*
1868 			 * One or more slots are free.
1869 			 */
1870 			keyix = i*NBBY;
1871 			while (b & 1)
1872 				keyix++, b >>= 1;
1873 			set_bit(keyix, sc->sc_keymap);
1874 			ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1875 			    "key %u\n", keyix));
1876 			*txkeyix = *rxkeyix = keyix;
1877 			return (1);
1878 		}
1879 	}
1880 	return (0);
1881 }
1882 
1883 /*
1884  * Allocate one or more key cache slots for a unicast key.  The
1885  * key itself is needed only to identify the cipher.  For hardware
1886  * TKIP with split cipher+MIC keys we allocate two key cache slot
1887  * pairs so that we can setup separate TX and RX MIC keys.  Note
1888  * that the MIC key for a TKIP key at slot i is assumed by the
1889  * hardware to be at slot i+64.  This limits TKIP keys to the first
1890  * 64 entries.
1891  */
1892 /* ARGSUSED */
1893 int
1894 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1895     ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1896 {
1897 	struct arn_softc *sc = (struct arn_softc *)ic;
1898 
1899 	/*
1900 	 * We allocate two pair for TKIP when using the h/w to do
1901 	 * the MIC.  For everything else, including software crypto,
1902 	 * we allocate a single entry.  Note that s/w crypto requires
1903 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
1904 	 * not support pass-through cache entries and we map all
1905 	 * those requests to slot 0.
1906 	 */
1907 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1908 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1909 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1910 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1911 		if (sc->sc_splitmic)
1912 			return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1913 		else
1914 			return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1915 	} else {
1916 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1917 	}
1918 }
1919 
1920 /*
1921  * Delete an entry in the key cache allocated by ath_key_alloc.
1922  */
1923 int
1924 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1925 {
1926 	struct arn_softc *sc = (struct arn_softc *)ic;
1927 	struct ath_hal *ah = sc->sc_ah;
1928 	const struct ieee80211_cipher *cip = k->wk_cipher;
1929 	ieee80211_keyix keyix = k->wk_keyix;
1930 
1931 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1932 	    " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1933 
1934 	(void) ath9k_hw_keyreset(ah, keyix);
1935 	/*
1936 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
1937 	 */
1938 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1939 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1940 		(void) ath9k_hw_keyreset(ah, keyix+32);		/* RX key */
1941 
1942 	if (keyix >= IEEE80211_WEP_NKID) {
1943 		/*
1944 		 * Don't touch keymap entries for global keys so
1945 		 * they are never considered for dynamic allocation.
1946 		 */
1947 		clr_bit(keyix, sc->sc_keymap);
1948 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1949 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1950 			/*
1951 			 * If splitmic is true +64 is TX key MIC,
1952 			 * else +64 is RX key + RX key MIC.
1953 			 */
1954 			clr_bit(keyix+64, sc->sc_keymap);
1955 			if (sc->sc_splitmic) {
1956 				/* Rx key */
1957 				clr_bit(keyix+32, sc->sc_keymap);
1958 				/* RX key MIC */
1959 				clr_bit(keyix+32+64, sc->sc_keymap);
1960 			}
1961 		}
1962 	}
1963 	return (1);
1964 }
1965 
1966 /*
1967  * Set a TKIP key into the hardware.  This handles the
1968  * potential distribution of key state to multiple key
1969  * cache slots for TKIP.
1970  */
1971 static int
1972 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1973     struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1974 {
1975 	uint8_t *key_rxmic = NULL;
1976 	uint8_t *key_txmic = NULL;
1977 	uint8_t  *key = (uint8_t *)&(k->wk_key[0]);
1978 	struct ath_hal *ah = sc->sc_ah;
1979 
1980 	key_txmic = key + 16;
1981 	key_rxmic = key + 24;
1982 
1983 	if (mac == NULL) {
1984 		/* Group key installation */
1985 		(void) memcpy(hk->kv_mic,  key_rxmic, sizeof (hk->kv_mic));
1986 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1987 		    mac, B_FALSE));
1988 	}
1989 	if (!sc->sc_splitmic) {
1990 		/*
1991 		 * data key goes at first index,
1992 		 * the hal handles the MIC keys at index+64.
1993 		 */
1994 		(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1995 		(void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1996 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1997 		    mac, B_FALSE));
1998 	}
1999 	/*
2000 	 * TX key goes at first index, RX key at +32.
2001 	 * The hal handles the MIC keys at index+64.
2002 	 */
2003 	(void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2004 	if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2005 	    B_FALSE))) {
2006 		/* Txmic entry failed. No need to proceed further */
2007 		ARN_DBG((ARN_DBG_KEYCACHE,
2008 		    "%s Setting TX MIC Key Failed\n", __func__));
2009 		return (0);
2010 	}
2011 
2012 	(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2013 
2014 	/* XXX delete tx key on failure? */
2015 	return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2016 
2017 }
2018 
2019 int
2020 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2021     const uint8_t mac[IEEE80211_ADDR_LEN])
2022 {
2023 	struct arn_softc *sc = (struct arn_softc *)ic;
2024 	const struct ieee80211_cipher *cip = k->wk_cipher;
2025 	struct ath9k_keyval hk;
2026 
2027 	/* cipher table */
2028 	static const uint8_t ciphermap[] = {
2029 		ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
2030 		ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
2031 		ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
2032 		ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
2033 		ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
2034 		ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
2035 	};
2036 
2037 	bzero(&hk, sizeof (hk));
2038 
2039 	/*
2040 	 * Software crypto uses a "clear key" so non-crypto
2041 	 * state kept in the key cache are maintainedd so that
2042 	 * rx frames have an entry to match.
2043 	 */
2044 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2045 		ASSERT(cip->ic_cipher < 6);
2046 		hk.kv_type = ciphermap[cip->ic_cipher];
2047 		hk.kv_len = k->wk_keylen;
2048 		bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2049 	} else {
2050 		hk.kv_type = ATH9K_CIPHER_CLR;
2051 	}
2052 
2053 	if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2054 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2055 		return (arn_keyset_tkip(sc, k, &hk, mac));
2056 	} else {
2057 		return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2058 		    k->wk_keyix, &hk, mac, B_FALSE));
2059 	}
2060 }
2061 
2062 /*
2063  * Enable/Disable short slot timing
2064  */
2065 void
2066 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2067 {
2068 	struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2069 
2070 	if (onoff)
2071 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2072 	else
2073 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2074 }
2075 
2076 static int
2077 arn_open(struct arn_softc *sc)
2078 {
2079 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2080 	struct ieee80211_channel *curchan = ic->ic_curchan;
2081 	struct ath9k_channel *init_channel;
2082 	int error = 0, pos, status;
2083 
2084 	ARN_LOCK_ASSERT(sc);
2085 
2086 	pos = arn_get_channel(sc, curchan);
2087 	if (pos == -1) {
2088 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2089 		    "%s: Invalid channel\n", __func__));
2090 		error = EINVAL;
2091 		goto error;
2092 	}
2093 
2094 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2095 
2096 	if (sc->sc_curmode == ATH9K_MODE_11A) {
2097 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2098 	} else {
2099 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2100 	}
2101 
2102 	init_channel = &sc->sc_ah->ah_channels[pos];
2103 
2104 	/* Reset SERDES registers */
2105 	ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2106 
2107 	/*
2108 	 * The basic interface to setting the hardware in a good
2109 	 * state is ``reset''.	On return the hardware is known to
2110 	 * be powered up and with interrupts disabled.	This must
2111 	 * be followed by initialization of the appropriate bits
2112 	 * and then setup of the interrupt mask.
2113 	 */
2114 	if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2115 	    sc->tx_chan_width, sc->sc_tx_chainmask,
2116 	    sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2117 	    B_FALSE, &status)) {
2118 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2119 		    "%s: unable to reset hardware; hal status %u "
2120 		    "(freq %u flags 0x%x)\n", __func__, status,
2121 		    init_channel->channel, init_channel->channelFlags));
2122 
2123 		error = EIO;
2124 		goto error;
2125 	}
2126 
2127 	/*
2128 	 * This is needed only to setup initial state
2129 	 * but it's best done after a reset.
2130 	 */
2131 	arn_update_txpow(sc);
2132 
2133 	/*
2134 	 * Setup the hardware after reset:
2135 	 * The receive engine is set going.
2136 	 * Frame transmit is handled entirely
2137 	 * in the frame output path; there's nothing to do
2138 	 * here except setup the interrupt mask.
2139 	 */
2140 	if (arn_startrecv(sc) != 0) {
2141 		ARN_DBG((ARN_DBG_INIT, "arn: "
2142 		    "%s: unable to start recv logic\n", __func__));
2143 		error = EIO;
2144 		goto error;
2145 	}
2146 
2147 	/* Setup our intr mask. */
2148 	sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2149 	    ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2150 	    ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2151 #ifdef ARN_ATH9K_HW_CAP_GTT
2152 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2153 		sc->sc_imask |= ATH9K_INT_GTT;
2154 #endif
2155 
2156 #ifdef ARN_ATH9K_HW_CAP_GTT
2157 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2158 		sc->sc_imask |= ATH9K_INT_CST;
2159 #endif
2160 
2161 	/*
2162 	 * Enable MIB interrupts when there are hardware phy counters.
2163 	 * Note we only do this (at the moment) for station mode.
2164 	 */
2165 #ifdef ARN_ATH9K_INT_MIB
2166 	if (ath9k_hw_phycounters(sc->sc_ah) &&
2167 	    ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2168 	    (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2169 		sc->sc_imask |= ATH9K_INT_MIB;
2170 #endif
2171 	/*
2172 	 * Some hardware processes the TIM IE and fires an
2173 	 * interrupt when the TIM bit is set.  For hardware
2174 	 * that does, if not overridden by configuration,
2175 	 * enable the TIM interrupt when operating as station.
2176 	 */
2177 #ifdef ARN_ATH9K_INT_TIM
2178 	if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2179 	    (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2180 	    !sc->sc_config.swBeaconProcess)
2181 		sc->sc_imask |= ATH9K_INT_TIM;
2182 #endif
2183 	if (arn_chan2mode(init_channel) != sc->sc_curmode)
2184 		arn_setcurmode(sc, arn_chan2mode(init_channel));
2185 	ARN_DBG((ARN_DBG_INIT, "arn: "
2186 	    "%s: current mode after arn_setcurmode is %d\n",
2187 	    __func__, sc->sc_curmode));
2188 
2189 	sc->sc_isrunning = 1;
2190 
2191 	/* Disable BMISS interrupt when we're not associated */
2192 	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2193 	(void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2194 
2195 	return (0);
2196 
2197 error:
2198 	return (error);
2199 }
2200 
2201 static void
2202 arn_close(struct arn_softc *sc)
2203 {
2204 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2205 	struct ath_hal *ah = sc->sc_ah;
2206 
2207 	ARN_LOCK_ASSERT(sc);
2208 
2209 	if (!sc->sc_isrunning)
2210 		return;
2211 
2212 	/*
2213 	 * Shutdown the hardware and driver
2214 	 * Note that some of this work is not possible if the
2215 	 * hardware is gone (invalid).
2216 	 */
2217 	ARN_UNLOCK(sc);
2218 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2219 	ieee80211_stop_watchdog(ic);
2220 	ARN_LOCK(sc);
2221 
2222 	/*
2223 	 * make sure h/w will not generate any interrupt
2224 	 * before setting the invalid flag.
2225 	 */
2226 	(void) ath9k_hw_set_interrupts(ah, 0);
2227 
2228 	if (!(sc->sc_flags & SC_OP_INVALID)) {
2229 		arn_draintxq(sc, 0);
2230 		(void) arn_stoprecv(sc);
2231 		(void) ath9k_hw_phy_disable(ah);
2232 	} else {
2233 		sc->sc_rxlink = NULL;
2234 	}
2235 
2236 	sc->sc_isrunning = 0;
2237 }
2238 
2239 /*
2240  * MAC callback functions
2241  */
2242 static int
2243 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2244 {
2245 	struct arn_softc *sc = arg;
2246 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2247 	struct ieee80211_node *in;
2248 	struct ieee80211_rateset *rs;
2249 
2250 	ARN_LOCK(sc);
2251 	switch (stat) {
2252 	case MAC_STAT_IFSPEED:
2253 		in = ic->ic_bss;
2254 		rs = &in->in_rates;
2255 		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2256 		    1000000ull;
2257 		break;
2258 	case MAC_STAT_NOXMTBUF:
2259 		*val = sc->sc_stats.ast_tx_nobuf +
2260 		    sc->sc_stats.ast_tx_nobufmgt;
2261 		break;
2262 	case MAC_STAT_IERRORS:
2263 		*val = sc->sc_stats.ast_rx_tooshort;
2264 		break;
2265 	case MAC_STAT_RBYTES:
2266 		*val = ic->ic_stats.is_rx_bytes;
2267 		break;
2268 	case MAC_STAT_IPACKETS:
2269 		*val = ic->ic_stats.is_rx_frags;
2270 		break;
2271 	case MAC_STAT_OBYTES:
2272 		*val = ic->ic_stats.is_tx_bytes;
2273 		break;
2274 	case MAC_STAT_OPACKETS:
2275 		*val = ic->ic_stats.is_tx_frags;
2276 		break;
2277 	case MAC_STAT_OERRORS:
2278 	case WIFI_STAT_TX_FAILED:
2279 		*val = sc->sc_stats.ast_tx_fifoerr +
2280 		    sc->sc_stats.ast_tx_xretries +
2281 		    sc->sc_stats.ast_tx_discard;
2282 		break;
2283 	case WIFI_STAT_TX_RETRANS:
2284 		*val = sc->sc_stats.ast_tx_xretries;
2285 		break;
2286 	case WIFI_STAT_FCS_ERRORS:
2287 		*val = sc->sc_stats.ast_rx_crcerr;
2288 		break;
2289 	case WIFI_STAT_WEP_ERRORS:
2290 		*val = sc->sc_stats.ast_rx_badcrypt;
2291 		break;
2292 	case WIFI_STAT_TX_FRAGS:
2293 	case WIFI_STAT_MCAST_TX:
2294 	case WIFI_STAT_RTS_SUCCESS:
2295 	case WIFI_STAT_RTS_FAILURE:
2296 	case WIFI_STAT_ACK_FAILURE:
2297 	case WIFI_STAT_RX_FRAGS:
2298 	case WIFI_STAT_MCAST_RX:
2299 	case WIFI_STAT_RX_DUPS:
2300 		ARN_UNLOCK(sc);
2301 		return (ieee80211_stat(ic, stat, val));
2302 	default:
2303 		ARN_UNLOCK(sc);
2304 		return (ENOTSUP);
2305 	}
2306 	ARN_UNLOCK(sc);
2307 
2308 	return (0);
2309 }
2310 
2311 int
2312 arn_m_start(void *arg)
2313 {
2314 	struct arn_softc *sc = arg;
2315 	int err = 0;
2316 
2317 	ARN_LOCK(sc);
2318 
2319 	/*
2320 	 * Stop anything previously setup.  This is safe
2321 	 * whether this is the first time through or not.
2322 	 */
2323 
2324 	arn_close(sc);
2325 
2326 	if ((err = arn_open(sc)) != 0) {
2327 		ARN_UNLOCK(sc);
2328 		return (err);
2329 	}
2330 
2331 	/* H/W is reday now */
2332 	sc->sc_flags &= ~SC_OP_INVALID;
2333 
2334 	ARN_UNLOCK(sc);
2335 
2336 	return (0);
2337 }
2338 
2339 static void
2340 arn_m_stop(void *arg)
2341 {
2342 	struct arn_softc *sc = arg;
2343 
2344 	ARN_LOCK(sc);
2345 	arn_close(sc);
2346 
2347 	/* disable HAL and put h/w to sleep */
2348 	(void) ath9k_hw_disable(sc->sc_ah);
2349 	ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2350 
2351 	/* XXX: hardware will not be ready in suspend state */
2352 	sc->sc_flags |= SC_OP_INVALID;
2353 	ARN_UNLOCK(sc);
2354 }
2355 
2356 static int
2357 arn_m_promisc(void *arg, boolean_t on)
2358 {
2359 	struct arn_softc *sc = arg;
2360 	struct ath_hal *ah = sc->sc_ah;
2361 	uint32_t rfilt;
2362 
2363 	ARN_LOCK(sc);
2364 
2365 	rfilt = ath9k_hw_getrxfilter(ah);
2366 	if (on)
2367 		rfilt |= ATH9K_RX_FILTER_PROM;
2368 	else
2369 		rfilt &= ~ATH9K_RX_FILTER_PROM;
2370 	sc->sc_promisc = on;
2371 	ath9k_hw_setrxfilter(ah, rfilt);
2372 
2373 	ARN_UNLOCK(sc);
2374 
2375 	return (0);
2376 }
2377 
2378 static int
2379 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2380 {
2381 	struct arn_softc *sc = arg;
2382 	struct ath_hal *ah = sc->sc_ah;
2383 	uint32_t val, index, bit;
2384 	uint8_t pos;
2385 	uint32_t *mfilt = sc->sc_mcast_hash;
2386 
2387 	ARN_LOCK(sc);
2388 
2389 	/* calculate XOR of eight 6bit values */
2390 	val = ARN_LE_READ_32(mca + 0);
2391 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2392 	val = ARN_LE_READ_32(mca + 3);
2393 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2394 	pos &= 0x3f;
2395 	index = pos / 32;
2396 	bit = 1 << (pos % 32);
2397 
2398 	if (add) {	/* enable multicast */
2399 		sc->sc_mcast_refs[pos]++;
2400 		mfilt[index] |= bit;
2401 	} else {	/* disable multicast */
2402 		if (--sc->sc_mcast_refs[pos] == 0)
2403 			mfilt[index] &= ~bit;
2404 	}
2405 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2406 
2407 	ARN_UNLOCK(sc);
2408 	return (0);
2409 }
2410 
2411 static int
2412 arn_m_unicst(void *arg, const uint8_t *macaddr)
2413 {
2414 	struct arn_softc *sc = arg;
2415 	struct ath_hal *ah = sc->sc_ah;
2416 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2417 
2418 	ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2419 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2420 	    macaddr[0], macaddr[1], macaddr[2],
2421 	    macaddr[3], macaddr[4], macaddr[5]));
2422 
2423 	ARN_LOCK(sc);
2424 	IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2425 	(void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2426 	(void) arn_reset(ic);
2427 	ARN_UNLOCK(sc);
2428 	return (0);
2429 }
2430 
2431 static mblk_t *
2432 arn_m_tx(void *arg, mblk_t *mp)
2433 {
2434 	struct arn_softc *sc = arg;
2435 	int error = 0;
2436 	mblk_t *next;
2437 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2438 
2439 	/*
2440 	 * No data frames go out unless we're associated; this
2441 	 * should not happen as the 802.11 layer does not enable
2442 	 * the xmit queue until we enter the RUN state.
2443 	 */
2444 	if (ic->ic_state != IEEE80211_S_RUN) {
2445 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2446 		    "discard, state %u\n", ic->ic_state));
2447 		sc->sc_stats.ast_tx_discard++;
2448 		freemsgchain(mp);
2449 		return (NULL);
2450 	}
2451 
2452 	while (mp != NULL) {
2453 		next = mp->b_next;
2454 		mp->b_next = NULL;
2455 		error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2456 		if (error != 0) {
2457 			mp->b_next = next;
2458 			if (error == ENOMEM) {
2459 				break;
2460 			} else {
2461 				freemsgchain(mp);
2462 				return (NULL);
2463 			}
2464 		}
2465 		mp = next;
2466 	}
2467 
2468 	return (mp);
2469 }
2470 
2471 static void
2472 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2473 {
2474 	struct arn_softc *sc = arg;
2475 	int32_t err;
2476 
2477 	err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2478 
2479 	ARN_LOCK(sc);
2480 	if (err == ENETRESET) {
2481 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2482 			ARN_UNLOCK(sc);
2483 
2484 			(void) arn_m_start(sc);
2485 
2486 			(void) ieee80211_new_state(&sc->sc_isc,
2487 			    IEEE80211_S_SCAN, -1);
2488 			ARN_LOCK(sc);
2489 		}
2490 	}
2491 	ARN_UNLOCK(sc);
2492 }
2493 
2494 static int
2495 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2496     uint_t wldp_length, const void *wldp_buf)
2497 {
2498 	struct arn_softc *sc = arg;
2499 	int	err;
2500 
2501 	err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2502 	    wldp_length, wldp_buf);
2503 
2504 	ARN_LOCK(sc);
2505 
2506 	if (err == ENETRESET) {
2507 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2508 			ARN_UNLOCK(sc);
2509 			(void) arn_m_start(sc);
2510 			(void) ieee80211_new_state(&sc->sc_isc,
2511 			    IEEE80211_S_SCAN, -1);
2512 			ARN_LOCK(sc);
2513 		}
2514 		err = 0;
2515 	}
2516 
2517 	ARN_UNLOCK(sc);
2518 
2519 	return (err);
2520 }
2521 
2522 /* ARGSUSED */
2523 static int
2524 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2525     uint_t wldp_length, void *wldp_buf)
2526 {
2527 	struct arn_softc *sc = arg;
2528 	int	err = 0;
2529 
2530 	err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2531 	    wldp_length, wldp_buf);
2532 
2533 	return (err);
2534 }
2535 
2536 static void
2537 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2538     mac_prop_info_handle_t prh)
2539 {
2540 	struct arn_softc *sc = arg;
2541 
2542 	ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2543 }
2544 
2545 /* return bus cachesize in 4B word units */
2546 static void
2547 arn_pci_config_cachesize(struct arn_softc *sc)
2548 {
2549 	uint8_t csz;
2550 
2551 	/*
2552 	 * Cache line size is used to size and align various
2553 	 * structures used to communicate with the hardware.
2554 	 */
2555 	csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2556 	if (csz == 0) {
2557 		/*
2558 		 * We must have this setup properly for rx buffer
2559 		 * DMA to work so force a reasonable value here if it
2560 		 * comes up zero.
2561 		 */
2562 		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2563 		pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2564 		    csz);
2565 	}
2566 	sc->sc_cachelsz = csz << 2;
2567 }
2568 
2569 static int
2570 arn_pci_setup(struct arn_softc *sc)
2571 {
2572 	uint16_t command;
2573 
2574 	/*
2575 	 * Enable memory mapping and bus mastering
2576 	 */
2577 	ASSERT(sc != NULL);
2578 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2579 	command	|= PCI_COMM_MAE | PCI_COMM_ME;
2580 	pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2581 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2582 	if ((command & PCI_COMM_MAE) == 0) {
2583 		arn_problem("arn: arn_pci_setup(): "
2584 		    "failed to enable memory mapping\n");
2585 		return (EIO);
2586 	}
2587 	if ((command & PCI_COMM_ME) == 0) {
2588 		arn_problem("arn: arn_pci_setup(): "
2589 		    "failed to enable bus mastering\n");
2590 		return (EIO);
2591 	}
2592 	ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2593 	    "set command reg to 0x%x \n", command));
2594 
2595 	return (0);
2596 }
2597 
2598 static void
2599 arn_get_hw_encap(struct arn_softc *sc)
2600 {
2601 	ieee80211com_t *ic;
2602 	struct ath_hal *ah;
2603 
2604 	ic = (ieee80211com_t *)sc;
2605 	ah = sc->sc_ah;
2606 
2607 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2608 	    ATH9K_CIPHER_AES_CCM, NULL))
2609 		ic->ic_caps |= IEEE80211_C_AES_CCM;
2610 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2611 	    ATH9K_CIPHER_AES_OCB, NULL))
2612 		ic->ic_caps |= IEEE80211_C_AES;
2613 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2614 	    ATH9K_CIPHER_TKIP, NULL))
2615 		ic->ic_caps |= IEEE80211_C_TKIP;
2616 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2617 	    ATH9K_CIPHER_WEP, NULL))
2618 		ic->ic_caps |= IEEE80211_C_WEP;
2619 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2620 	    ATH9K_CIPHER_MIC, NULL))
2621 		ic->ic_caps |= IEEE80211_C_TKIPMIC;
2622 }
2623 
2624 static void
2625 arn_setup_ht_cap(struct arn_softc *sc)
2626 {
2627 #define	ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3	/* 2 ^ 16 */
2628 #define	ATH9K_HT_CAP_MPDUDENSITY_8 0x6		/* 8 usec */
2629 
2630 	/* LINTED E_FUNC_SET_NOT_USED */
2631 	uint8_t tx_streams;
2632 	uint8_t rx_streams;
2633 
2634 	arn_ht_conf *ht_info = &sc->sc_ht_conf;
2635 
2636 	ht_info->ht_supported = B_TRUE;
2637 
2638 	/* Todo: IEEE80211_HTCAP_SMPS */
2639 	ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2640 	    IEEE80211_HTCAP_SHORTGI40 |
2641 	    IEEE80211_HTCAP_DSSSCCK40;
2642 
2643 	ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2644 	ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2645 
2646 	/* set up supported mcs set */
2647 	(void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2648 	tx_streams =
2649 	    !(sc->sc_ah->ah_caps.tx_chainmask &
2650 	    (sc->sc_ah->ah_caps.tx_chainmask - 1)) ? 1 : 2;
2651 	rx_streams =
2652 	    !(sc->sc_ah->ah_caps.rx_chainmask &
2653 	    (sc->sc_ah->ah_caps.rx_chainmask - 1)) ? 1 : 2;
2654 
2655 	ht_info->rx_mcs_mask[0] = 0xff;
2656 	if (rx_streams >= 2)
2657 		ht_info->rx_mcs_mask[1] = 0xff;
2658 }
2659 
2660 /* xxx should be used for ht rate set negotiating ? */
2661 static void
2662 arn_overwrite_11n_rateset(struct arn_softc *sc)
2663 {
2664 	uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2665 	int mcs_idx, mcs_count = 0;
2666 	int i, j;
2667 
2668 	(void) memset(&ieee80211_rateset_11n, 0,
2669 	    sizeof (ieee80211_rateset_11n));
2670 	for (i = 0; i < 10; i++) {
2671 		for (j = 0; j < 8; j++) {
2672 			if (ht_rs[i] & (1 << j)) {
2673 				mcs_idx = i * 8 + j;
2674 				if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2675 					break;
2676 				}
2677 
2678 				ieee80211_rateset_11n.rs_rates[mcs_idx] =
2679 				    (uint8_t)mcs_idx;
2680 				mcs_count++;
2681 			}
2682 		}
2683 	}
2684 
2685 	ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2686 
2687 	ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2688 	    "MCS rate set supported by this station is as follows:\n"));
2689 
2690 	for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2691 		ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2692 		    i, ieee80211_rateset_11n.rs_rates[i]));
2693 	}
2694 
2695 }
2696 
2697 /*
2698  * Update WME parameters for a transmit queue.
2699  */
2700 static int
2701 arn_tx_queue_update(struct arn_softc *sc, int ac)
2702 {
2703 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2704 #define	ATH_TXOP_TO_US(v)		(v<<5)
2705 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2706 	struct ath_txq *txq;
2707 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2708 	struct ath_hal *ah = sc->sc_ah;
2709 	struct ath9k_tx_queue_info qi;
2710 
2711 	txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2712 	(void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2713 
2714 	/*
2715 	 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2716 	 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2717 	 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2718 	 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2719 	 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2720 	 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2721 	 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2722 	 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2723 	 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2724 	 */
2725 
2726 	/* xxx should update these flags here? */
2727 #if 0
2728 	qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2729 	    TXQ_FLAG_TXERRINT_ENABLE |
2730 	    TXQ_FLAG_TXDESCINT_ENABLE |
2731 	    TXQ_FLAG_TXURNINT_ENABLE;
2732 #endif
2733 
2734 	qi.tqi_aifs = wmep->wmep_aifsn;
2735 	qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2736 	qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2737 	qi.tqi_readyTime = 0;
2738 	qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2739 
2740 	ARN_DBG((ARN_DBG_INIT,
2741 	    "%s:"
2742 	    "Q%u"
2743 	    "qflags 0x%x"
2744 	    "aifs %u"
2745 	    "cwmin %u"
2746 	    "cwmax %u"
2747 	    "burstTime %u\n",
2748 	    __func__,
2749 	    txq->axq_qnum,
2750 	    qi.tqi_qflags,
2751 	    qi.tqi_aifs,
2752 	    qi.tqi_cwmin,
2753 	    qi.tqi_cwmax,
2754 	    qi.tqi_burstTime));
2755 
2756 	if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2757 		arn_problem("unable to update hardware queue "
2758 		    "parameters for %s traffic!\n",
2759 		    ieee80211_wme_acnames[ac]);
2760 		return (0);
2761 	} else {
2762 		/* push to H/W */
2763 		(void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2764 		return (1);
2765 	}
2766 
2767 #undef ATH_TXOP_TO_US
2768 #undef ATH_EXPONENT_TO_VALUE
2769 }
2770 
2771 /* Update WME parameters */
2772 static int
2773 arn_wme_update(ieee80211com_t *ic)
2774 {
2775 	struct arn_softc *sc = (struct arn_softc *)ic;
2776 
2777 	/* updateing */
2778 	return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2779 	    !arn_tx_queue_update(sc, WME_AC_BK) ||
2780 	    !arn_tx_queue_update(sc, WME_AC_VI) ||
2781 	    !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2782 }
2783 
2784 /*
2785  * Update tx/rx chainmask. For legacy association,
2786  * hard code chainmask to 1x1, for 11n association, use
2787  * the chainmask configuration.
2788  */
2789 void
2790 arn_update_chainmask(struct arn_softc *sc)
2791 {
2792 	boolean_t is_ht = B_FALSE;
2793 	sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2794 
2795 	is_ht = sc->sc_ht_conf.ht_supported;
2796 	if (is_ht) {
2797 		sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2798 		sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2799 	} else {
2800 		sc->sc_tx_chainmask = 1;
2801 		sc->sc_rx_chainmask = 1;
2802 	}
2803 
2804 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2805 	    "tx_chainmask = %d, rx_chainmask = %d\n",
2806 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2807 }
2808 
2809 static int
2810 arn_resume(dev_info_t *devinfo)
2811 {
2812 	struct arn_softc *sc;
2813 	int ret = DDI_SUCCESS;
2814 
2815 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2816 	if (sc == NULL) {
2817 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2818 		    "failed to get soft state\n"));
2819 		return (DDI_FAILURE);
2820 	}
2821 
2822 	ARN_LOCK(sc);
2823 	/*
2824 	 * Set up config space command register(s). Refuse
2825 	 * to resume on failure.
2826 	 */
2827 	if (arn_pci_setup(sc) != 0) {
2828 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2829 		    "ath_pci_setup() failed\n"));
2830 		ARN_UNLOCK(sc);
2831 		return (DDI_FAILURE);
2832 	}
2833 
2834 	if (!(sc->sc_flags & SC_OP_INVALID))
2835 		ret = arn_open(sc);
2836 	ARN_UNLOCK(sc);
2837 
2838 	return (ret);
2839 }
2840 
2841 static int
2842 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2843 {
2844 	struct arn_softc *sc;
2845 	int		instance;
2846 	int		status;
2847 	int32_t		err;
2848 	uint16_t	vendor_id;
2849 	uint16_t	device_id;
2850 	uint32_t	i;
2851 	uint32_t	val;
2852 	char		strbuf[32];
2853 	ieee80211com_t *ic;
2854 	struct ath_hal *ah;
2855 	wifi_data_t wd = { 0 };
2856 	mac_register_t *macp;
2857 
2858 	switch (cmd) {
2859 	case DDI_ATTACH:
2860 		break;
2861 	case DDI_RESUME:
2862 		return (arn_resume(devinfo));
2863 	default:
2864 		return (DDI_FAILURE);
2865 	}
2866 
2867 	instance = ddi_get_instance(devinfo);
2868 	if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2869 		ARN_DBG((ARN_DBG_ATTACH, "arn: "
2870 		    "%s: Unable to alloc softstate\n", __func__));
2871 		return (DDI_FAILURE);
2872 	}
2873 
2874 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2875 	ic = (ieee80211com_t *)sc;
2876 	sc->sc_dev = devinfo;
2877 
2878 	mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2879 	mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2880 	mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2881 	mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2882 	mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2883 #ifdef ARN_IBSS
2884 	mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2885 #endif
2886 
2887 	sc->sc_flags |= SC_OP_INVALID;
2888 
2889 	err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2890 	if (err != DDI_SUCCESS) {
2891 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2892 		    "pci_config_setup() failed"));
2893 		goto attach_fail0;
2894 	}
2895 
2896 	if (arn_pci_setup(sc) != 0)
2897 		goto attach_fail1;
2898 
2899 	/* Cache line size set up */
2900 	arn_pci_config_cachesize(sc);
2901 
2902 	vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2903 	device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2904 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2905 	    "device id 0x%x, cache size %d\n",
2906 	    vendor_id, device_id,
2907 	    pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2908 
2909 	pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2910 	val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2911 	if ((val & 0x0000ff00) != 0)
2912 		pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2913 
2914 	err = ddi_regs_map_setup(devinfo, 1,
2915 	    &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2916 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2917 	    "regs map1 = %x err=%d\n", sc->mem, err));
2918 	if (err != DDI_SUCCESS) {
2919 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2920 		    "ddi_regs_map_setup() failed"));
2921 		goto attach_fail1;
2922 	}
2923 
2924 	ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2925 	if (ah == NULL) {
2926 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2927 		    "unable to attach hw: H/W status %u\n",
2928 		    status));
2929 		goto attach_fail2;
2930 	}
2931 	sc->sc_ah = ah;
2932 
2933 	ath9k_hw_getmac(ah, ic->ic_macaddr);
2934 
2935 	/* Get the hardware key cache size. */
2936 	sc->sc_keymax = ah->ah_caps.keycache_size;
2937 	if (sc->sc_keymax > ATH_KEYMAX) {
2938 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2939 		    "Warning, using only %u entries in %u key cache\n",
2940 		    ATH_KEYMAX, sc->sc_keymax));
2941 		sc->sc_keymax = ATH_KEYMAX;
2942 	}
2943 
2944 	/*
2945 	 * Reset the key cache since some parts do not
2946 	 * reset the contents on initial power up.
2947 	 */
2948 	for (i = 0; i < sc->sc_keymax; i++)
2949 		(void) ath9k_hw_keyreset(ah, (uint16_t)i);
2950 	/*
2951 	 * Mark key cache slots associated with global keys
2952 	 * as in use.  If we knew TKIP was not to be used we
2953 	 * could leave the +32, +64, and +32+64 slots free.
2954 	 * XXX only for splitmic.
2955 	 */
2956 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2957 		set_bit(i, sc->sc_keymap);
2958 		set_bit(i + 32, sc->sc_keymap);
2959 		set_bit(i + 64, sc->sc_keymap);
2960 		set_bit(i + 32 + 64, sc->sc_keymap);
2961 	}
2962 
2963 	/* Collect the channel list using the default country code */
2964 	err = arn_setup_channels(sc);
2965 	if (err == EINVAL) {
2966 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2967 		    "ERR:arn_setup_channels\n"));
2968 		goto attach_fail3;
2969 	}
2970 
2971 	/* default to STA mode */
2972 	sc->sc_ah->ah_opmode = ATH9K_M_STA;
2973 
2974 	/* Setup rate tables */
2975 	arn_rate_attach(sc);
2976 	arn_setup_rates(sc, IEEE80211_MODE_11A);
2977 	arn_setup_rates(sc, IEEE80211_MODE_11B);
2978 	arn_setup_rates(sc, IEEE80211_MODE_11G);
2979 
2980 	/* Setup current mode here */
2981 	arn_setcurmode(sc, ATH9K_MODE_11G);
2982 
2983 	/* 802.11g features */
2984 	if (sc->sc_have11g)
2985 		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2986 		    IEEE80211_C_SHSLOT;		/* short slot time */
2987 
2988 	/* Temp workaround */
2989 	sc->sc_mrretry = 1;
2990 	sc->sc_config.ath_aggr_prot = 0;
2991 
2992 	/* Setup tx/rx descriptors */
2993 	err = arn_desc_alloc(devinfo, sc);
2994 	if (err != DDI_SUCCESS) {
2995 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2996 		    "failed to allocate descriptors: %d\n", err));
2997 		goto attach_fail3;
2998 	}
2999 
3000 	if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
3001 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
3002 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3003 		    "ERR:ddi_taskq_create\n"));
3004 		goto attach_fail4;
3005 	}
3006 
3007 	/*
3008 	 * Allocate hardware transmit queues: one queue for
3009 	 * beacon frames and one data queue for each QoS
3010 	 * priority.  Note that the hal handles reseting
3011 	 * these queues at the needed time.
3012 	 */
3013 #ifdef ARN_IBSS
3014 	sc->sc_beaconq = arn_beaconq_setup(ah);
3015 	if (sc->sc_beaconq == (-1)) {
3016 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3017 		    "unable to setup a beacon xmit queue\n"));
3018 		goto attach_fail4;
3019 	}
3020 #endif
3021 #ifdef ARN_HOSTAP
3022 	sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3023 	if (sc->sc_cabq == NULL) {
3024 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3025 		    "unable to setup CAB xmit queue\n"));
3026 		goto attach_fail4;
3027 	}
3028 
3029 	sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3030 	ath_cabq_update(sc);
3031 #endif
3032 
3033 	for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3034 		sc->sc_haltype2q[i] = -1;
3035 
3036 	/* Setup data queues */
3037 	/* NB: ensure BK queue is the lowest priority h/w queue */
3038 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3039 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3040 		    "unable to setup xmit queue for BK traffic\n"));
3041 		goto attach_fail4;
3042 	}
3043 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3044 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3045 		    "unable to setup xmit queue for BE traffic\n"));
3046 		goto attach_fail4;
3047 	}
3048 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3049 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3050 		    "unable to setup xmit queue for VI traffic\n"));
3051 		goto attach_fail4;
3052 	}
3053 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3054 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3055 		    "unable to setup xmit queue for VO traffic\n"));
3056 		goto attach_fail4;
3057 	}
3058 
3059 	/*
3060 	 * Initializes the noise floor to a reasonable default value.
3061 	 * Later on this will be updated during ANI processing.
3062 	 */
3063 
3064 	sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3065 
3066 
3067 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3068 	    ATH9K_CIPHER_TKIP, NULL)) {
3069 		/*
3070 		 * Whether we should enable h/w TKIP MIC.
3071 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3072 		 * report WMM capable, so it's always safe to turn on
3073 		 * TKIP MIC in this case.
3074 		 */
3075 		(void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3076 		    0, 1, NULL);
3077 	}
3078 
3079 	/* Get cipher releated capability information */
3080 	arn_get_hw_encap(sc);
3081 
3082 	/*
3083 	 * Check whether the separate key cache entries
3084 	 * are required to handle both tx+rx MIC keys.
3085 	 * With split mic keys the number of stations is limited
3086 	 * to 27 otherwise 59.
3087 	 */
3088 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3089 	    ATH9K_CIPHER_TKIP, NULL) &&
3090 	    ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3091 	    ATH9K_CIPHER_MIC, NULL) &&
3092 	    ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3093 	    0, NULL))
3094 		sc->sc_splitmic = 1;
3095 
3096 	/* turn on mcast key search if possible */
3097 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3098 		(void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3099 		    1, NULL);
3100 
3101 	sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3102 	sc->sc_config.txpowlimit_override = 0;
3103 
3104 	/* 11n Capabilities */
3105 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3106 		sc->sc_flags |= SC_OP_TXAGGR;
3107 		sc->sc_flags |= SC_OP_RXAGGR;
3108 		arn_setup_ht_cap(sc);
3109 		arn_overwrite_11n_rateset(sc);
3110 	}
3111 
3112 	sc->sc_tx_chainmask = 1;
3113 	sc->sc_rx_chainmask = 1;
3114 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3115 	    "tx_chainmask = %d, rx_chainmask = %d\n",
3116 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3117 
3118 	/* arn_update_chainmask(sc); */
3119 
3120 	(void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3121 	sc->sc_defant = ath9k_hw_getdefantenna(ah);
3122 
3123 	ath9k_hw_getmac(ah, sc->sc_myaddr);
3124 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3125 		ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3126 		ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3127 		(void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3128 	}
3129 
3130 	/* set default value to short slot time */
3131 	sc->sc_slottime = ATH9K_SLOT_TIME_9;
3132 	(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3133 
3134 	/* initialize beacon slots */
3135 	for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3136 		sc->sc_bslot[i] = ATH_IF_ID_ANY;
3137 
3138 	/* Save MISC configurations */
3139 	sc->sc_config.swBeaconProcess = 1;
3140 
3141 	/* Support QoS/WME */
3142 	ic->ic_caps |= IEEE80211_C_WME;
3143 	ic->ic_wme.wme_update = arn_wme_update;
3144 
3145 	/* Support 802.11n/HT */
3146 	if (sc->sc_ht_conf.ht_supported) {
3147 		ic->ic_htcaps =
3148 		    IEEE80211_HTCAP_CHWIDTH40 |
3149 		    IEEE80211_HTCAP_SHORTGI40 |
3150 		    IEEE80211_HTCAP_DSSSCCK40 |
3151 		    IEEE80211_HTCAP_MAXAMSDU_7935 |
3152 		    IEEE80211_HTC_HT |
3153 		    IEEE80211_HTC_AMSDU |
3154 		    IEEE80211_HTCAP_RXSTBC_2STREAM;
3155 
3156 #ifdef ARN_TX_AGGREGATION
3157 	ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3158 #endif
3159 	}
3160 
3161 	/* Header padding requested by driver */
3162 	ic->ic_flags |= IEEE80211_F_DATAPAD;
3163 	/* Support WPA/WPA2 */
3164 	ic->ic_caps |= IEEE80211_C_WPA;
3165 #if 0
3166 	ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3167 	ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3168 #endif
3169 	ic->ic_phytype = IEEE80211_T_HT;
3170 	ic->ic_opmode = IEEE80211_M_STA;
3171 	ic->ic_state = IEEE80211_S_INIT;
3172 	ic->ic_maxrssi = ARN_MAX_RSSI;
3173 	ic->ic_set_shortslot = arn_set_shortslot;
3174 	ic->ic_xmit = arn_tx;
3175 	ieee80211_attach(ic);
3176 
3177 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3178 	    "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3179 
3180 	/* different instance has different WPA door */
3181 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3182 	    ddi_driver_name(devinfo),
3183 	    ddi_get_instance(devinfo));
3184 
3185 	if (sc->sc_ht_conf.ht_supported) {
3186 		sc->sc_recv_action = ic->ic_recv_action;
3187 		ic->ic_recv_action = arn_ampdu_recv_action;
3188 		// sc->sc_send_action = ic->ic_send_action;
3189 		// ic->ic_send_action = arn_ampdu_send_action;
3190 
3191 		ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3192 		ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3193 		ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3194 	}
3195 
3196 	/* Override 80211 default routines */
3197 	sc->sc_newstate = ic->ic_newstate;
3198 	ic->ic_newstate = arn_newstate;
3199 #ifdef ARN_IBSS
3200 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3201 	ic->ic_recv_mgmt = arn_recv_mgmt;
3202 #endif
3203 	ic->ic_watchdog = arn_watchdog;
3204 	ic->ic_node_alloc = arn_node_alloc;
3205 	ic->ic_node_free = arn_node_free;
3206 	ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3207 	ic->ic_crypto.cs_key_delete = arn_key_delete;
3208 	ic->ic_crypto.cs_key_set = arn_key_set;
3209 
3210 	ieee80211_media_init(ic);
3211 
3212 	/*
3213 	 * initialize default tx key
3214 	 */
3215 	ic->ic_def_txkey = 0;
3216 
3217 	sc->sc_rx_pend = 0;
3218 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3219 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3220 	    &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3221 	if (err != DDI_SUCCESS) {
3222 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3223 		    "ddi_add_softintr() failed....\n"));
3224 		goto attach_fail5;
3225 	}
3226 
3227 	if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3228 	    != DDI_SUCCESS) {
3229 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3230 		    "Can not get iblock cookie for INT\n"));
3231 		goto attach_fail6;
3232 	}
3233 
3234 	if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3235 	    (caddr_t)sc) != DDI_SUCCESS) {
3236 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3237 		    "Can not set intr for ARN driver\n"));
3238 		goto attach_fail6;
3239 	}
3240 
3241 	/*
3242 	 * Provide initial settings for the WiFi plugin; whenever this
3243 	 * information changes, we need to call mac_plugindata_update()
3244 	 */
3245 	wd.wd_opmode = ic->ic_opmode;
3246 	wd.wd_secalloc = WIFI_SEC_NONE;
3247 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3248 
3249 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3250 	    "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3251 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3252 	    wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3253 	    wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3254 
3255 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3256 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3257 		    "MAC version mismatch\n"));
3258 		goto attach_fail7;
3259 	}
3260 
3261 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
3262 	macp->m_driver		= sc;
3263 	macp->m_dip		= devinfo;
3264 	macp->m_src_addr	= ic->ic_macaddr;
3265 	macp->m_callbacks	= &arn_m_callbacks;
3266 	macp->m_min_sdu		= 0;
3267 	macp->m_max_sdu		= IEEE80211_MTU;
3268 	macp->m_pdata		= &wd;
3269 	macp->m_pdata_size	= sizeof (wd);
3270 
3271 	err = mac_register(macp, &ic->ic_mach);
3272 	mac_free(macp);
3273 	if (err != 0) {
3274 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3275 		    "mac_register err %x\n", err));
3276 		goto attach_fail7;
3277 	}
3278 
3279 	/* Create minor node of type DDI_NT_NET_WIFI */
3280 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3281 	    ARN_NODENAME, instance);
3282 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3283 	    instance + 1, DDI_NT_NET_WIFI, 0);
3284 	if (err != DDI_SUCCESS)
3285 		ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3286 		    "Create minor node failed - %d\n", err));
3287 
3288 	/* Notify link is down now */
3289 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3290 
3291 	sc->sc_promisc = B_FALSE;
3292 	bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3293 	bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3294 
3295 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3296 	    "Atheros AR%s MAC/BB Rev:%x "
3297 	    "AR%s RF Rev:%x: mem=0x%lx\n",
3298 	    arn_mac_bb_name(ah->ah_macVersion),
3299 	    ah->ah_macRev,
3300 	    arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3301 	    ah->ah_phyRev,
3302 	    (unsigned long)sc->mem));
3303 
3304 	/* XXX: hardware will not be ready until arn_open() being called */
3305 	sc->sc_flags |= SC_OP_INVALID;
3306 	sc->sc_isrunning = 0;
3307 
3308 	return (DDI_SUCCESS);
3309 
3310 attach_fail7:
3311 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3312 attach_fail6:
3313 	ddi_remove_softintr(sc->sc_softint_id);
3314 attach_fail5:
3315 	(void) ieee80211_detach(ic);
3316 attach_fail4:
3317 	arn_desc_free(sc);
3318 	if (sc->sc_tq)
3319 		ddi_taskq_destroy(sc->sc_tq);
3320 attach_fail3:
3321 	ath9k_hw_detach(ah);
3322 attach_fail2:
3323 	ddi_regs_map_free(&sc->sc_io_handle);
3324 attach_fail1:
3325 	pci_config_teardown(&sc->sc_cfg_handle);
3326 attach_fail0:
3327 	sc->sc_flags |= SC_OP_INVALID;
3328 	/* cleanup tx queues */
3329 	mutex_destroy(&sc->sc_txbuflock);
3330 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3331 		if (ARN_TXQ_SETUP(sc, i)) {
3332 			/* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3333 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3334 		}
3335 	}
3336 	mutex_destroy(&sc->sc_rxbuflock);
3337 	mutex_destroy(&sc->sc_serial_rw);
3338 	mutex_destroy(&sc->sc_genlock);
3339 	mutex_destroy(&sc->sc_resched_lock);
3340 #ifdef ARN_IBSS
3341 	mutex_destroy(&sc->sc_bcbuflock);
3342 #endif
3343 
3344 	ddi_soft_state_free(arn_soft_state_p, instance);
3345 
3346 	return (DDI_FAILURE);
3347 
3348 }
3349 
3350 /*
3351  * Suspend transmit/receive for powerdown
3352  */
3353 static int
3354 arn_suspend(struct arn_softc *sc)
3355 {
3356 	ARN_LOCK(sc);
3357 	arn_close(sc);
3358 	ARN_UNLOCK(sc);
3359 
3360 	return (DDI_SUCCESS);
3361 }
3362 
3363 static int32_t
3364 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3365 {
3366 	struct arn_softc *sc;
3367 	int i;
3368 
3369 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3370 	ASSERT(sc != NULL);
3371 
3372 	switch (cmd) {
3373 	case DDI_DETACH:
3374 		break;
3375 
3376 	case DDI_SUSPEND:
3377 		return (arn_suspend(sc));
3378 
3379 	default:
3380 		return (DDI_FAILURE);
3381 	}
3382 
3383 	if (mac_disable(sc->sc_isc.ic_mach) != 0)
3384 		return (DDI_FAILURE);
3385 
3386 	arn_stop_scantimer(sc);
3387 	arn_stop_caltimer(sc);
3388 
3389 	/* disable interrupts */
3390 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3391 
3392 	/*
3393 	 * Unregister from the MAC layer subsystem
3394 	 */
3395 	(void) mac_unregister(sc->sc_isc.ic_mach);
3396 
3397 	/* free intterrupt resources */
3398 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3399 	ddi_remove_softintr(sc->sc_softint_id);
3400 
3401 	/*
3402 	 * NB: the order of these is important:
3403 	 * o call the 802.11 layer before detaching the hal to
3404 	 *   insure callbacks into the driver to delete global
3405 	 *   key cache entries can be handled
3406 	 * o reclaim the tx queue data structures after calling
3407 	 *   the 802.11 layer as we'll get called back to reclaim
3408 	 *   node state and potentially want to use them
3409 	 * o to cleanup the tx queues the hal is called, so detach
3410 	 *   it last
3411 	 */
3412 	ieee80211_detach(&sc->sc_isc);
3413 
3414 	arn_desc_free(sc);
3415 
3416 	ddi_taskq_destroy(sc->sc_tq);
3417 
3418 	if (!(sc->sc_flags & SC_OP_INVALID))
3419 		(void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3420 
3421 	/* cleanup tx queues */
3422 	mutex_destroy(&sc->sc_txbuflock);
3423 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3424 		if (ARN_TXQ_SETUP(sc, i)) {
3425 			arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3426 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3427 		}
3428 	}
3429 
3430 	ath9k_hw_detach(sc->sc_ah);
3431 
3432 	/* free io handle */
3433 	ddi_regs_map_free(&sc->sc_io_handle);
3434 	pci_config_teardown(&sc->sc_cfg_handle);
3435 
3436 	/* destroy locks */
3437 	mutex_destroy(&sc->sc_genlock);
3438 	mutex_destroy(&sc->sc_serial_rw);
3439 	mutex_destroy(&sc->sc_rxbuflock);
3440 	mutex_destroy(&sc->sc_resched_lock);
3441 #ifdef ARN_IBSS
3442 	mutex_destroy(&sc->sc_bcbuflock);
3443 #endif
3444 
3445 	ddi_remove_minor_node(devinfo, NULL);
3446 	ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3447 
3448 	return (DDI_SUCCESS);
3449 }
3450 
3451 /*
3452  * quiesce(9E) entry point.
3453  *
3454  * This function is called when the system is single-threaded at high
3455  * PIL with preemption disabled. Therefore, this function must not be
3456  * blocked.
3457  *
3458  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3459  * DDI_FAILURE indicates an error condition and should almost never happen.
3460  */
3461 static int32_t
3462 arn_quiesce(dev_info_t *devinfo)
3463 {
3464 	struct arn_softc *sc;
3465 	int i;
3466 	struct ath_hal *ah;
3467 
3468 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3469 
3470 	if (sc == NULL || (ah = sc->sc_ah) == NULL)
3471 		return (DDI_FAILURE);
3472 
3473 	/*
3474 	 * Disable interrupts
3475 	 */
3476 	(void) ath9k_hw_set_interrupts(ah, 0);
3477 
3478 	/*
3479 	 * Disable TX HW
3480 	 */
3481 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3482 		if (ARN_TXQ_SETUP(sc, i))
3483 			(void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3484 	}
3485 
3486 	/*
3487 	 * Disable RX HW
3488 	 */
3489 	ath9k_hw_stoppcurecv(ah);
3490 	ath9k_hw_setrxfilter(ah, 0);
3491 	(void) ath9k_hw_stopdmarecv(ah);
3492 	drv_usecwait(3000);
3493 
3494 	/*
3495 	 * Power down HW
3496 	 */
3497 	(void) ath9k_hw_phy_disable(ah);
3498 
3499 	return (DDI_SUCCESS);
3500 }
3501 
3502 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3503     nodev, NULL, D_MP, NULL, arn_quiesce);
3504 
3505 static struct modldrv arn_modldrv = {
3506 	&mod_driverops, /* Type of module.  This one is a driver */
3507 	"arn-Atheros 9000 series driver:2.0", /* short description */
3508 	&arn_dev_ops /* driver specific ops */
3509 };
3510 
3511 static struct modlinkage modlinkage = {
3512 	MODREV_1, (void *)&arn_modldrv, NULL
3513 };
3514 
3515 int
3516 _info(struct modinfo *modinfop)
3517 {
3518 	return (mod_info(&modlinkage, modinfop));
3519 }
3520 
3521 int
3522 _init(void)
3523 {
3524 	int status;
3525 
3526 	status = ddi_soft_state_init
3527 	    (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3528 	if (status != 0)
3529 		return (status);
3530 
3531 	mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3532 	mac_init_ops(&arn_dev_ops, "arn");
3533 	status = mod_install(&modlinkage);
3534 	if (status != 0) {
3535 		mac_fini_ops(&arn_dev_ops);
3536 		mutex_destroy(&arn_loglock);
3537 		ddi_soft_state_fini(&arn_soft_state_p);
3538 	}
3539 
3540 	return (status);
3541 }
3542 
3543 int
3544 _fini(void)
3545 {
3546 	int status;
3547 
3548 	status = mod_remove(&modlinkage);
3549 	if (status == 0) {
3550 		mac_fini_ops(&arn_dev_ops);
3551 		mutex_destroy(&arn_loglock);
3552 		ddi_soft_state_fini(&arn_soft_state_p);
3553 	}
3554 	return (status);
3555 }
3556