xref: /titanic_52/usr/src/uts/common/io/arn/arn_main.c (revision a4aeef46cda1835da2b19f8f62b4526de6521e6c)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/param.h>
23 #include <sys/types.h>
24 #include <sys/signal.h>
25 #include <sys/stream.h>
26 #include <sys/termio.h>
27 #include <sys/errno.h>
28 #include <sys/file.h>
29 #include <sys/cmn_err.h>
30 #include <sys/stropts.h>
31 #include <sys/strsubr.h>
32 #include <sys/strtty.h>
33 #include <sys/kbio.h>
34 #include <sys/cred.h>
35 #include <sys/stat.h>
36 #include <sys/consdev.h>
37 #include <sys/kmem.h>
38 #include <sys/modctl.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/pci.h>
42 #include <sys/errno.h>
43 #include <sys/mac_provider.h>
44 #include <sys/dlpi.h>
45 #include <sys/ethernet.h>
46 #include <sys/list.h>
47 #include <sys/byteorder.h>
48 #include <sys/strsun.h>
49 #include <sys/policy.h>
50 #include <inet/common.h>
51 #include <inet/nd.h>
52 #include <inet/mi.h>
53 #include <inet/wifi_ioctl.h>
54 #include <sys/mac_wifi.h>
55 #include <sys/net80211.h>
56 #include <sys/net80211_proto.h>
57 #include <sys/net80211_ht.h>
58 
59 
60 #include "arn_ath9k.h"
61 #include "arn_core.h"
62 #include "arn_reg.h"
63 #include "arn_hw.h"
64 
65 #define	ARN_MAX_RSSI	45	/* max rssi */
66 
67 /*
68  * Default 11n reates supported by this station.
69  */
70 extern struct ieee80211_htrateset ieee80211_rateset_11n;
71 
72 /*
73  * PIO access attributes for registers
74  */
75 static ddi_device_acc_attr_t arn_reg_accattr = {
76 	DDI_DEVICE_ATTR_V0,
77 	DDI_STRUCTURE_LE_ACC,
78 	DDI_STRICTORDER_ACC,
79 	DDI_DEFAULT_ACC
80 };
81 
82 /*
83  * DMA access attributes for descriptors: NOT to be byte swapped.
84  */
85 static ddi_device_acc_attr_t arn_desc_accattr = {
86 	DDI_DEVICE_ATTR_V0,
87 	DDI_STRUCTURE_LE_ACC,
88 	DDI_STRICTORDER_ACC,
89 	DDI_DEFAULT_ACC
90 };
91 
92 /*
93  * Describes the chip's DMA engine
94  */
95 static ddi_dma_attr_t arn_dma_attr = {
96 	DMA_ATTR_V0,	/* version number */
97 	0,				/* low address */
98 	0xffffffffU,	/* high address */
99 	0x3ffffU,		/* counter register max */
100 	1,				/* alignment */
101 	0xFFF,			/* burst sizes */
102 	1,				/* minimum transfer size */
103 	0x3ffffU,		/* max transfer size */
104 	0xffffffffU,	/* address register max */
105 	1,				/* no scatter-gather */
106 	1,				/* granularity of device */
107 	0,				/* DMA flags */
108 };
109 
110 static ddi_dma_attr_t arn_desc_dma_attr = {
111 	DMA_ATTR_V0,	/* version number */
112 	0,				/* low address */
113 	0xffffffffU,	/* high address */
114 	0xffffffffU,	/* counter register max */
115 	0x1000,			/* alignment */
116 	0xFFF,			/* burst sizes */
117 	1,				/* minimum transfer size */
118 	0xffffffffU,	/* max transfer size */
119 	0xffffffffU,	/* address register max */
120 	1,				/* no scatter-gather */
121 	1,				/* granularity of device */
122 	0,				/* DMA flags */
123 };
124 
125 #define	ATH_DEF_CACHE_BYTES	32 /* default cache line size */
126 
127 static kmutex_t arn_loglock;
128 static void *arn_soft_state_p = NULL;
129 static int arn_dwelltime = 200; /* scan interval */
130 
131 static int	arn_m_stat(void *,  uint_t, uint64_t *);
132 static int	arn_m_start(void *);
133 static void	arn_m_stop(void *);
134 static int	arn_m_promisc(void *, boolean_t);
135 static int	arn_m_multicst(void *, boolean_t, const uint8_t *);
136 static int	arn_m_unicst(void *, const uint8_t *);
137 static mblk_t	*arn_m_tx(void *, mblk_t *);
138 static void	arn_m_ioctl(void *, queue_t *, mblk_t *);
139 static int	arn_m_setprop(void *, const char *, mac_prop_id_t,
140     uint_t, const void *);
141 static int	arn_m_getprop(void *, const char *, mac_prop_id_t,
142     uint_t, uint_t, void *, uint_t *);
143 
144 /* MAC Callcack Functions */
145 static mac_callbacks_t arn_m_callbacks = {
146 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
147 	arn_m_stat,
148 	arn_m_start,
149 	arn_m_stop,
150 	arn_m_promisc,
151 	arn_m_multicst,
152 	arn_m_unicst,
153 	arn_m_tx,
154 	arn_m_ioctl,
155 	NULL,
156 	NULL,
157 	NULL,
158 	arn_m_setprop,
159 	arn_m_getprop
160 };
161 
162 /*
163  * ARN_DBG_HW
164  * ARN_DBG_REG_IO
165  * ARN_DBG_QUEUE
166  * ARN_DBG_EEPROM
167  * ARN_DBG_XMIT
168  * ARN_DBG_RECV
169  * ARN_DBG_CALIBRATE
170  * ARN_DBG_CHANNEL
171  * ARN_DBG_INTERRUPT
172  * ARN_DBG_REGULATORY
173  * ARN_DBG_ANI
174  * ARN_DBG_POWER_MGMT
175  * ARN_DBG_KEYCACHE
176  * ARN_DBG_BEACON
177  * ARN_DBG_RATE
178  * ARN_DBG_INIT
179  * ARN_DBG_ATTACH
180  * ARN_DBG_DEATCH
181  * ARN_DBG_AGGR
182  * ARN_DBG_RESET
183  * ARN_DBG_FATAL
184  * ARN_DBG_ANY
185  * ARN_DBG_ALL
186  */
187 uint32_t arn_dbg_mask = 0;
188 
189 /*
190  * Exception/warning cases not leading to panic.
191  */
192 void
193 arn_problem(const int8_t *fmt, ...)
194 {
195 	va_list args;
196 
197 	mutex_enter(&arn_loglock);
198 
199 	va_start(args, fmt);
200 	vcmn_err(CE_WARN, fmt, args);
201 	va_end(args);
202 
203 	mutex_exit(&arn_loglock);
204 }
205 
206 /*
207  * Normal log information independent of debug.
208  */
209 void
210 arn_log(const int8_t *fmt, ...)
211 {
212 	va_list args;
213 
214 	mutex_enter(&arn_loglock);
215 
216 	va_start(args, fmt);
217 	vcmn_err(CE_CONT, fmt, args);
218 	va_end(args);
219 
220 	mutex_exit(&arn_loglock);
221 }
222 
223 void
224 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
225 {
226 	va_list args;
227 
228 	if (dbg_flags & arn_dbg_mask) {
229 		mutex_enter(&arn_loglock);
230 		va_start(args, fmt);
231 		vcmn_err(CE_CONT, fmt, args);
232 		va_end(args);
233 		mutex_exit(&arn_loglock);
234 	}
235 }
236 
237 /*
238  * Read and write, they both share the same lock. We do this to serialize
239  * reads and writes on Atheros 802.11n PCI devices only. This is required
240  * as the FIFO on these devices can only accept sanely 2 requests. After
241  * that the device goes bananas. Serializing the reads/writes prevents this
242  * from happening.
243  */
244 void
245 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
246 {
247 	struct arn_softc *sc = ah->ah_sc;
248 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
249 		mutex_enter(&sc->sc_serial_rw);
250 		ddi_put32(sc->sc_io_handle,
251 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
252 		mutex_exit(&sc->sc_serial_rw);
253 	} else {
254 		ddi_put32(sc->sc_io_handle,
255 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
256 	}
257 }
258 
259 unsigned int
260 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
261 {
262 	uint32_t val;
263 	struct arn_softc *sc = ah->ah_sc;
264 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
265 		mutex_enter(&sc->sc_serial_rw);
266 		val = ddi_get32(sc->sc_io_handle,
267 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
268 		mutex_exit(&sc->sc_serial_rw);
269 	} else {
270 		val = ddi_get32(sc->sc_io_handle,
271 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
272 	}
273 
274 	return (val);
275 }
276 
277 /*
278  * Allocate an area of memory and a DMA handle for accessing it
279  */
280 static int
281 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
282     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
283     uint_t bind_flags, dma_area_t *dma_p)
284 {
285 	int err;
286 
287 	/*
288 	 * Allocate handle
289 	 */
290 	err = ddi_dma_alloc_handle(devinfo, dma_attr,
291 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
292 	if (err != DDI_SUCCESS)
293 		return (DDI_FAILURE);
294 
295 	/*
296 	 * Allocate memory
297 	 */
298 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
299 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
300 	    &dma_p->alength, &dma_p->acc_hdl);
301 	if (err != DDI_SUCCESS)
302 		return (DDI_FAILURE);
303 
304 	/*
305 	 * Bind the two together
306 	 */
307 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
308 	    dma_p->mem_va, dma_p->alength, bind_flags,
309 	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
310 	if (err != DDI_DMA_MAPPED)
311 		return (DDI_FAILURE);
312 
313 	dma_p->nslots = ~0U;
314 	dma_p->size = ~0U;
315 	dma_p->token = ~0U;
316 	dma_p->offset = 0;
317 	return (DDI_SUCCESS);
318 }
319 
320 /*
321  * Free one allocated area of DMAable memory
322  */
323 static void
324 arn_free_dma_mem(dma_area_t *dma_p)
325 {
326 	if (dma_p->dma_hdl != NULL) {
327 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
328 		if (dma_p->acc_hdl != NULL) {
329 			ddi_dma_mem_free(&dma_p->acc_hdl);
330 			dma_p->acc_hdl = NULL;
331 		}
332 		ddi_dma_free_handle(&dma_p->dma_hdl);
333 		dma_p->ncookies = 0;
334 		dma_p->dma_hdl = NULL;
335 	}
336 }
337 
338 /*
339  * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
340  * each buffer.
341  */
342 static int
343 arn_buflist_setup(dev_info_t *devinfo,
344     struct arn_softc *sc,
345     list_t *bflist,
346     struct ath_buf **pbf,
347     struct ath_desc **pds,
348     int nbuf,
349     uint_t dmabflags,
350     uint32_t buflen)
351 {
352 	int i, err;
353 	struct ath_buf *bf = *pbf;
354 	struct ath_desc *ds = *pds;
355 
356 	list_create(bflist, sizeof (struct ath_buf),
357 	    offsetof(struct ath_buf, bf_node));
358 	for (i = 0; i < nbuf; i++, bf++, ds++) {
359 		bf->bf_desc = ds;
360 		bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
361 		    ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
362 		list_insert_tail(bflist, bf);
363 
364 		/* alloc DMA memory */
365 		err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
366 		    buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
367 		    dmabflags, &bf->bf_dma);
368 		if (err != DDI_SUCCESS)
369 			return (err);
370 	}
371 	*pbf = bf;
372 	*pds = ds;
373 
374 	return (DDI_SUCCESS);
375 }
376 
377 /*
378  * Destroy tx, rx or beacon buffer list. Free DMA memory.
379  */
380 static void
381 arn_buflist_cleanup(list_t *buflist)
382 {
383 	struct ath_buf *bf;
384 
385 	if (!buflist)
386 		return;
387 
388 	bf = list_head(buflist);
389 	while (bf != NULL) {
390 		if (bf->bf_m != NULL) {
391 			freemsg(bf->bf_m);
392 			bf->bf_m = NULL;
393 		}
394 		/* Free DMA buffer */
395 		arn_free_dma_mem(&bf->bf_dma);
396 		if (bf->bf_in != NULL) {
397 			ieee80211_free_node(bf->bf_in);
398 			bf->bf_in = NULL;
399 		}
400 		list_remove(buflist, bf);
401 		bf = list_head(buflist);
402 	}
403 	list_destroy(buflist);
404 }
405 
406 static void
407 arn_desc_free(struct arn_softc *sc)
408 {
409 	arn_buflist_cleanup(&sc->sc_txbuf_list);
410 	arn_buflist_cleanup(&sc->sc_rxbuf_list);
411 #ifdef ARN_IBSS
412 	arn_buflist_cleanup(&sc->sc_bcbuf_list);
413 #endif
414 
415 	/* Free descriptor DMA buffer */
416 	arn_free_dma_mem(&sc->sc_desc_dma);
417 
418 	kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
419 	sc->sc_vbufptr = NULL;
420 }
421 
422 static int
423 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
424 {
425 	int err;
426 	size_t size;
427 	struct ath_desc *ds;
428 	struct ath_buf *bf;
429 
430 #ifdef ARN_IBSS
431 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
432 #else
433 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
434 #endif
435 
436 	err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
437 	    &arn_desc_accattr, DDI_DMA_CONSISTENT,
438 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
439 
440 	/* virtual address of the first descriptor */
441 	sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
442 
443 	ds = sc->sc_desc;
444 	ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
445 	    "%p (%d) -> %p\n",
446 	    sc->sc_desc, sc->sc_desc_dma.alength,
447 	    sc->sc_desc_dma.cookie.dmac_address));
448 
449 	/* allocate data structures to describe TX/RX DMA buffers */
450 #ifdef ARN_IBSS
451 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
452 	    ATH_BCBUF);
453 #else
454 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
455 #endif
456 	bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
457 	sc->sc_vbufptr = bf;
458 
459 	/* DMA buffer size for each TX/RX packet */
460 #ifdef ARN_TX_AGGREGRATION
461 	sc->tx_dmabuf_size =
462 	    roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
463 	    min(sc->sc_cachelsz, (uint16_t)64));
464 #else
465 	sc->tx_dmabuf_size =
466 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
467 #endif
468 	sc->rx_dmabuf_size =
469 	    roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
470 
471 	/* create RX buffer list */
472 	err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
473 	    ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
474 	if (err != DDI_SUCCESS) {
475 		arn_desc_free(sc);
476 		return (err);
477 	}
478 
479 	/* create TX buffer list */
480 	err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
481 	    ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
482 	if (err != DDI_SUCCESS) {
483 		arn_desc_free(sc);
484 		return (err);
485 	}
486 
487 	/* create beacon buffer list */
488 #ifdef ARN_IBSS
489 	err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
490 	    ATH_BCBUF, DDI_DMA_STREAMING);
491 	if (err != DDI_SUCCESS) {
492 		arn_desc_free(sc);
493 		return (err);
494 	}
495 #endif
496 
497 	return (DDI_SUCCESS);
498 }
499 
500 static struct ath_rate_table *
501 /* LINTED E_STATIC_UNUSED */
502 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
503 {
504 	struct ath_rate_table *rate_table = NULL;
505 
506 	switch (mode) {
507 	case IEEE80211_MODE_11A:
508 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
509 		break;
510 	case IEEE80211_MODE_11B:
511 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
512 		break;
513 	case IEEE80211_MODE_11G:
514 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
515 		break;
516 #ifdef ARB_11N
517 	case IEEE80211_MODE_11NA_HT20:
518 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
519 		break;
520 	case IEEE80211_MODE_11NG_HT20:
521 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
522 		break;
523 	case IEEE80211_MODE_11NA_HT40PLUS:
524 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
525 		break;
526 	case IEEE80211_MODE_11NA_HT40MINUS:
527 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
528 		break;
529 	case IEEE80211_MODE_11NG_HT40PLUS:
530 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
531 		break;
532 	case IEEE80211_MODE_11NG_HT40MINUS:
533 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
534 		break;
535 #endif
536 	default:
537 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
538 		    "invalid mode %u\n", mode));
539 		return (NULL);
540 	}
541 
542 	return (rate_table);
543 
544 }
545 
546 static void
547 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
548 {
549 	struct ath_rate_table *rt;
550 	int i;
551 
552 	for (i = 0; i < sizeof (sc->asc_rixmap); i++)
553 		sc->asc_rixmap[i] = 0xff;
554 
555 	rt = sc->hw_rate_table[mode];
556 	ASSERT(rt != NULL);
557 
558 	for (i = 0; i < rt->rate_cnt; i++)
559 		sc->asc_rixmap[rt->info[i].dot11rate &
560 		    IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
561 
562 	sc->sc_currates = rt;
563 	sc->sc_curmode = mode;
564 
565 	/*
566 	 * All protection frames are transmited at 2Mb/s for
567 	 * 11g, otherwise at 1Mb/s.
568 	 * XXX select protection rate index from rate table.
569 	 */
570 	sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
571 }
572 
573 static enum wireless_mode
574 arn_chan2mode(struct ath9k_channel *chan)
575 {
576 	if (chan->chanmode == CHANNEL_A)
577 		return (ATH9K_MODE_11A);
578 	else if (chan->chanmode == CHANNEL_G)
579 		return (ATH9K_MODE_11G);
580 	else if (chan->chanmode == CHANNEL_B)
581 		return (ATH9K_MODE_11B);
582 	else if (chan->chanmode == CHANNEL_A_HT20)
583 		return (ATH9K_MODE_11NA_HT20);
584 	else if (chan->chanmode == CHANNEL_G_HT20)
585 		return (ATH9K_MODE_11NG_HT20);
586 	else if (chan->chanmode == CHANNEL_A_HT40PLUS)
587 		return (ATH9K_MODE_11NA_HT40PLUS);
588 	else if (chan->chanmode == CHANNEL_A_HT40MINUS)
589 		return (ATH9K_MODE_11NA_HT40MINUS);
590 	else if (chan->chanmode == CHANNEL_G_HT40PLUS)
591 		return (ATH9K_MODE_11NG_HT40PLUS);
592 	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
593 		return (ATH9K_MODE_11NG_HT40MINUS);
594 
595 	return (ATH9K_MODE_11B);
596 }
597 
598 static void
599 arn_update_txpow(struct arn_softc *sc)
600 {
601 	struct ath_hal 	*ah = sc->sc_ah;
602 	uint32_t txpow;
603 
604 	if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
605 		(void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
606 		/* read back in case value is clamped */
607 		(void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
608 		sc->sc_curtxpow = (uint32_t)txpow;
609 	}
610 }
611 
612 uint8_t
613 parse_mpdudensity(uint8_t mpdudensity)
614 {
615 	/*
616 	 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
617 	 *   0 for no restriction
618 	 *   1 for 1/4 us
619 	 *   2 for 1/2 us
620 	 *   3 for 1 us
621 	 *   4 for 2 us
622 	 *   5 for 4 us
623 	 *   6 for 8 us
624 	 *   7 for 16 us
625 	 */
626 	switch (mpdudensity) {
627 	case 0:
628 		return (0);
629 	case 1:
630 	case 2:
631 	case 3:
632 		/*
633 		 * Our lower layer calculations limit our
634 		 * precision to 1 microsecond
635 		 */
636 		return (1);
637 	case 4:
638 		return (2);
639 	case 5:
640 		return (4);
641 	case 6:
642 		return (8);
643 	case 7:
644 		return (16);
645 	default:
646 		return (0);
647 	}
648 }
649 
650 static void
651 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
652 {
653 	int i, maxrates;
654 	struct ath_rate_table *rate_table = NULL;
655 	struct ieee80211_rateset *rateset;
656 	ieee80211com_t *ic = (ieee80211com_t *)sc;
657 
658 	/* rate_table = arn_get_ratetable(sc, mode); */
659 	switch (mode) {
660 	case IEEE80211_MODE_11A:
661 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
662 		break;
663 	case IEEE80211_MODE_11B:
664 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
665 		break;
666 	case IEEE80211_MODE_11G:
667 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
668 		break;
669 #ifdef ARN_11N
670 	case IEEE80211_MODE_11NA_HT20:
671 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
672 		break;
673 	case IEEE80211_MODE_11NG_HT20:
674 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
675 		break;
676 	case IEEE80211_MODE_11NA_HT40PLUS:
677 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
678 		break;
679 	case IEEE80211_MODE_11NA_HT40MINUS:
680 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
681 		break;
682 	case IEEE80211_MODE_11NG_HT40PLUS:
683 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
684 		break;
685 	case IEEE80211_MODE_11NG_HT40MINUS:
686 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
687 		break;
688 #endif
689 	default:
690 		ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
691 		    "invalid mode %u\n", mode));
692 		break;
693 	}
694 	if (rate_table == NULL)
695 		return;
696 	if (rate_table->rate_cnt > ATH_RATE_MAX) {
697 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
698 		    "rate table too small (%u > %u)\n",
699 		    rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
700 		maxrates = ATH_RATE_MAX;
701 	} else
702 		maxrates = rate_table->rate_cnt;
703 
704 	ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
705 	    "maxrates is %d\n", maxrates));
706 
707 	rateset = &ic->ic_sup_rates[mode];
708 	for (i = 0; i < maxrates; i++) {
709 		rateset->ir_rates[i] = rate_table->info[i].dot11rate;
710 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
711 		    "%d\n", rate_table->info[i].dot11rate));
712 	}
713 	rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
714 }
715 
716 static int
717 arn_setup_channels(struct arn_softc *sc)
718 {
719 	struct ath_hal *ah = sc->sc_ah;
720 	ieee80211com_t *ic = (ieee80211com_t *)sc;
721 	int nchan, i, index;
722 	uint8_t regclassids[ATH_REGCLASSIDS_MAX];
723 	uint32_t nregclass = 0;
724 	struct ath9k_channel *c;
725 
726 	/* Fill in ah->ah_channels */
727 	if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
728 	    regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
729 	    B_FALSE, 1)) {
730 		uint32_t rd = ah->ah_currentRD;
731 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
732 		    "unable to collect channel list; "
733 		    "regdomain likely %u country code %u\n",
734 		    rd, CTRY_DEFAULT));
735 		return (EINVAL);
736 	}
737 
738 	ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
739 	    "number of channel is %d\n", nchan));
740 
741 	for (i = 0; i < nchan; i++) {
742 		c = &ah->ah_channels[i];
743 		uint32_t flags;
744 		index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
745 
746 		if (index > IEEE80211_CHAN_MAX) {
747 			ARN_DBG((ARN_DBG_CHANNEL,
748 			    "arn: arn_setup_channels(): "
749 			    "bad hal channel %d (%u/%x) ignored\n",
750 			    index, c->channel, c->channelFlags));
751 			continue;
752 		}
753 		/* NB: flags are known to be compatible */
754 		if (index < 0) {
755 			/*
756 			 * can't handle frequency <2400MHz (negative
757 			 * channels) right now
758 			 */
759 			ARN_DBG((ARN_DBG_CHANNEL,
760 			    "arn: arn_setup_channels(): "
761 			    "hal channel %d (%u/%x) "
762 			    "cannot be handled, ignored\n",
763 			    index, c->channel, c->channelFlags));
764 			continue;
765 		}
766 
767 		/*
768 		 * Calculate net80211 flags; most are compatible
769 		 * but some need massaging.  Note the static turbo
770 		 * conversion can be removed once net80211 is updated
771 		 * to understand static vs. dynamic turbo.
772 		 */
773 
774 		flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
775 
776 		if (ic->ic_sup_channels[index].ich_freq == 0) {
777 			ic->ic_sup_channels[index].ich_freq = c->channel;
778 			ic->ic_sup_channels[index].ich_flags = flags;
779 		} else {
780 			/* channels overlap; e.g. 11g and 11b */
781 			ic->ic_sup_channels[index].ich_flags |= flags;
782 		}
783 		if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
784 			sc->sc_have11g = 1;
785 			ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
786 			    IEEE80211_C_SHSLOT;	/* short slot time */
787 		}
788 	}
789 
790 	return (0);
791 }
792 
793 uint32_t
794 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
795 {
796 	uint32_t channel_mode;
797 	switch (ieee80211_chan2mode(isc, chan)) {
798 	case IEEE80211_MODE_11NA:
799 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
800 			channel_mode = CHANNEL_A_HT40PLUS;
801 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
802 			channel_mode = CHANNEL_A_HT40MINUS;
803 		else
804 			channel_mode = CHANNEL_A_HT20;
805 		break;
806 	case IEEE80211_MODE_11NG:
807 		if (chan->ich_flags & IEEE80211_CHAN_HT40U)
808 			channel_mode = CHANNEL_G_HT40PLUS;
809 		else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
810 			channel_mode = CHANNEL_G_HT40MINUS;
811 		else
812 			channel_mode = CHANNEL_G_HT20;
813 		break;
814 	case IEEE80211_MODE_TURBO_G:
815 	case IEEE80211_MODE_STURBO_A:
816 	case IEEE80211_MODE_TURBO_A:
817 		channel_mode = 0;
818 		break;
819 	case IEEE80211_MODE_11A:
820 		channel_mode = CHANNEL_A;
821 		break;
822 	case IEEE80211_MODE_11G:
823 		channel_mode = CHANNEL_B;
824 		break;
825 	case IEEE80211_MODE_11B:
826 		channel_mode = CHANNEL_G;
827 		break;
828 	case IEEE80211_MODE_FH:
829 		channel_mode = 0;
830 		break;
831 	default:
832 		break;
833 	}
834 
835 	return (channel_mode);
836 }
837 
838 /*
839  * Update internal state after a channel change.
840  */
841 void
842 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
843 {
844 	struct ieee80211com *ic = &sc->sc_isc;
845 	enum ieee80211_phymode mode;
846 	enum wireless_mode wlmode;
847 
848 	/*
849 	 * Change channels and update the h/w rate map
850 	 * if we're switching; e.g. 11a to 11b/g.
851 	 */
852 	mode = ieee80211_chan2mode(ic, chan);
853 	switch (mode) {
854 	case IEEE80211_MODE_11A:
855 		wlmode = ATH9K_MODE_11A;
856 		break;
857 	case IEEE80211_MODE_11B:
858 		wlmode = ATH9K_MODE_11B;
859 		break;
860 	case IEEE80211_MODE_11G:
861 		wlmode = ATH9K_MODE_11B;
862 		break;
863 	default:
864 		break;
865 	}
866 	if (wlmode != sc->sc_curmode)
867 		arn_setcurmode(sc, wlmode);
868 
869 }
870 
871 /*
872  * Set/change channels.  If the channel is really being changed, it's done
873  * by reseting the chip.  To accomplish this we must first cleanup any pending
874  * DMA, then restart stuff.
875  */
876 static int
877 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
878 {
879 	struct ath_hal *ah = sc->sc_ah;
880 	ieee80211com_t *ic = &sc->sc_isc;
881 	boolean_t fastcc = B_TRUE;
882 	boolean_t  stopped;
883 	struct ieee80211_channel chan;
884 	enum wireless_mode curmode;
885 
886 	if (sc->sc_flags & SC_OP_INVALID)
887 		return (EIO);
888 
889 	if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
890 	    hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
891 	    (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
892 	    (sc->sc_flags & SC_OP_FULL_RESET)) {
893 		int status;
894 
895 		/*
896 		 * This is only performed if the channel settings have
897 		 * actually changed.
898 		 *
899 		 * To switch channels clear any pending DMA operations;
900 		 * wait long enough for the RX fifo to drain, reset the
901 		 * hardware at the new frequency, and then re-enable
902 		 * the relevant bits of the h/w.
903 		 */
904 		(void) ath9k_hw_set_interrupts(ah, 0);	/* disable interrupts */
905 		arn_draintxq(sc, B_FALSE);	/* clear pending tx frames */
906 		stopped = arn_stoprecv(sc);	/* turn off frame recv */
907 
908 		/*
909 		 * XXX: do not flush receive queue here. We don't want
910 		 * to flush data frames already in queue because of
911 		 * changing channel.
912 		 */
913 
914 		if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
915 			fastcc = B_FALSE;
916 
917 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
918 		    "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
919 		    sc->sc_ah->ah_curchan->channel,
920 		    hchan->channel, hchan->channelFlags, sc->tx_chan_width));
921 
922 		if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
923 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
924 		    sc->sc_ht_extprotspacing, fastcc, &status)) {
925 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
926 			    "unable to reset channel %u (%uMhz) "
927 			    "flags 0x%x hal status %u\n",
928 			    ath9k_hw_mhz2ieee(ah, hchan->channel,
929 			    hchan->channelFlags),
930 			    hchan->channel, hchan->channelFlags, status));
931 			return (EIO);
932 		}
933 
934 		sc->sc_curchan = *hchan;
935 
936 		sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
937 		sc->sc_flags &= ~SC_OP_FULL_RESET;
938 
939 		if (arn_startrecv(sc) != 0) {
940 			arn_problem("arn: arn_set_channel(): "
941 			    "unable to restart recv logic\n");
942 			return (EIO);
943 		}
944 
945 		chan.ich_freq = hchan->channel;
946 		chan.ich_flags = hchan->channelFlags;
947 		ic->ic_ibss_chan = &chan;
948 
949 		/*
950 		 * Change channels and update the h/w rate map
951 		 * if we're switching; e.g. 11a to 11b/g.
952 		 */
953 		curmode = arn_chan2mode(hchan);
954 		if (curmode != sc->sc_curmode)
955 			arn_setcurmode(sc, arn_chan2mode(hchan));
956 
957 		arn_update_txpow(sc);
958 
959 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
960 	}
961 
962 	return (0);
963 }
964 
965 /*
966  *  This routine performs the periodic noise floor calibration function
967  *  that is used to adjust and optimize the chip performance.  This
968  *  takes environmental changes (location, temperature) into account.
969  *  When the task is complete, it reschedules itself depending on the
970  *  appropriate interval that was calculated.
971  */
972 static void
973 arn_ani_calibrate(void *arg)
974 
975 {
976 	ieee80211com_t *ic = (ieee80211com_t *)arg;
977 	struct arn_softc *sc = (struct arn_softc *)ic;
978 	struct ath_hal *ah = sc->sc_ah;
979 	boolean_t longcal = B_FALSE;
980 	boolean_t shortcal = B_FALSE;
981 	boolean_t aniflag = B_FALSE;
982 	unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
983 	uint32_t cal_interval;
984 
985 	/*
986 	 * don't calibrate when we're scanning.
987 	 * we are most likely not on our home channel.
988 	 */
989 	if (ic->ic_state != IEEE80211_S_RUN)
990 		goto settimer;
991 
992 	/* Long calibration runs independently of short calibration. */
993 	if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
994 		longcal = B_TRUE;
995 		ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
996 		    "%s: longcal @%lu\n", __func__, drv_hztousec));
997 		sc->sc_ani.sc_longcal_timer = timestamp;
998 	}
999 
1000 	/* Short calibration applies only while sc_caldone is FALSE */
1001 	if (!sc->sc_ani.sc_caldone) {
1002 		if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1003 		    ATH_SHORT_CALINTERVAL) {
1004 			shortcal = B_TRUE;
1005 			ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1006 			    "%s: shortcal @%lu\n",
1007 			    __func__, drv_hztousec));
1008 			sc->sc_ani.sc_shortcal_timer = timestamp;
1009 			sc->sc_ani.sc_resetcal_timer = timestamp;
1010 		}
1011 	} else {
1012 		if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1013 		    ATH_RESTART_CALINTERVAL) {
1014 			ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1015 						&sc->sc_ani.sc_caldone);
1016 			if (sc->sc_ani.sc_caldone)
1017 				sc->sc_ani.sc_resetcal_timer = timestamp;
1018 		}
1019 	}
1020 
1021 	/* Verify whether we must check ANI */
1022 	if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1023 	    ATH_ANI_POLLINTERVAL) {
1024 		aniflag = B_TRUE;
1025 		sc->sc_ani.sc_checkani_timer = timestamp;
1026 	}
1027 
1028 	/* Skip all processing if there's nothing to do. */
1029 	if (longcal || shortcal || aniflag) {
1030 		/* Call ANI routine if necessary */
1031 		if (aniflag)
1032 			ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1033 			    ah->ah_curchan);
1034 
1035 		/* Perform calibration if necessary */
1036 		if (longcal || shortcal) {
1037 			boolean_t iscaldone = B_FALSE;
1038 
1039 			if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1040 			    sc->sc_rx_chainmask, longcal, &iscaldone)) {
1041 				if (longcal)
1042 					sc->sc_ani.sc_noise_floor =
1043 					    ath9k_hw_getchan_noise(ah,
1044 					    ah->ah_curchan);
1045 
1046 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1047 				    "%s: calibrate chan %u/%x nf: %d\n",
1048 				    __func__,
1049 				    ah->ah_curchan->channel,
1050 				    ah->ah_curchan->channelFlags,
1051 				    sc->sc_ani.sc_noise_floor));
1052 			} else {
1053 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1054 				    "%s: calibrate chan %u/%x failed\n",
1055 				    __func__,
1056 				    ah->ah_curchan->channel,
1057 				    ah->ah_curchan->channelFlags));
1058 			}
1059 			sc->sc_ani.sc_caldone = iscaldone;
1060 		}
1061 	}
1062 
1063 settimer:
1064 	/*
1065 	 * Set timer interval based on previous results.
1066 	 * The interval must be the shortest necessary to satisfy ANI,
1067 	 * short calibration and long calibration.
1068 	 */
1069 	cal_interval = ATH_LONG_CALINTERVAL;
1070 	if (sc->sc_ah->ah_config.enable_ani)
1071 		cal_interval =
1072 		    min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1073 
1074 	if (!sc->sc_ani.sc_caldone)
1075 		cal_interval = min(cal_interval,
1076 		    (uint32_t)ATH_SHORT_CALINTERVAL);
1077 
1078 	sc->sc_scan_timer = 0;
1079 	sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1080 	    drv_usectohz(cal_interval * 1000));
1081 }
1082 
1083 static void
1084 arn_stop_caltimer(struct arn_softc *sc)
1085 {
1086 	timeout_id_t tmp_id = 0;
1087 
1088 	while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1089 		tmp_id = sc->sc_cal_timer;
1090 		(void) untimeout(tmp_id);
1091 	}
1092 	sc->sc_cal_timer = 0;
1093 }
1094 
1095 static uint_t
1096 arn_isr(caddr_t arg)
1097 {
1098 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1099 	struct arn_softc *sc = (struct arn_softc *)arg;
1100 	struct ath_hal *ah = sc->sc_ah;
1101 	enum ath9k_int status;
1102 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1103 
1104 	ARN_LOCK(sc);
1105 
1106 	if (sc->sc_flags & SC_OP_INVALID) {
1107 		/*
1108 		 * The hardware is not ready/present, don't
1109 		 * touch anything. Note this can happen early
1110 		 * on if the IRQ is shared.
1111 		 */
1112 		ARN_UNLOCK(sc);
1113 		return (DDI_INTR_UNCLAIMED);
1114 	}
1115 	if (!ath9k_hw_intrpend(ah)) {	/* shared irq, not for us */
1116 		ARN_UNLOCK(sc);
1117 		return (DDI_INTR_UNCLAIMED);
1118 	}
1119 
1120 	/*
1121 	 * Figure out the reason(s) for the interrupt. Note
1122 	 * that the hal returns a pseudo-ISR that may include
1123 	 * bits we haven't explicitly enabled so we mask the
1124 	 * value to insure we only process bits we requested.
1125 	 */
1126 	(void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1127 
1128 	status &= sc->sc_imask; /* discard unasked-for bits */
1129 
1130 	/*
1131 	 * If there are no status bits set, then this interrupt was not
1132 	 * for me (should have been caught above).
1133 	 */
1134 	if (!status) {
1135 		ARN_UNLOCK(sc);
1136 		return (DDI_INTR_UNCLAIMED);
1137 	}
1138 
1139 	sc->sc_intrstatus = status;
1140 
1141 	if (status & ATH9K_INT_FATAL) {
1142 		/* need a chip reset */
1143 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1144 		    "ATH9K_INT_FATAL\n"));
1145 		goto reset;
1146 	} else if (status & ATH9K_INT_RXORN) {
1147 		/* need a chip reset */
1148 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1149 		    "ATH9K_INT_RXORN\n"));
1150 		goto reset;
1151 	} else {
1152 		if (status & ATH9K_INT_RXEOL) {
1153 			/*
1154 			 * NB: the hardware should re-read the link when
1155 			 * RXE bit is written, but it doesn't work
1156 			 * at least on older hardware revs.
1157 			 */
1158 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1159 			    "ATH9K_INT_RXEOL\n"));
1160 			sc->sc_rxlink = NULL;
1161 		}
1162 		if (status & ATH9K_INT_TXURN) {
1163 			/* bump tx trigger level */
1164 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1165 			    "ATH9K_INT_TXURN\n"));
1166 			(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1167 		}
1168 		/* XXX: optimize this */
1169 		if (status & ATH9K_INT_RX) {
1170 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1171 			    "ATH9K_INT_RX\n"));
1172 			sc->sc_rx_pend = 1;
1173 			ddi_trigger_softintr(sc->sc_softint_id);
1174 		}
1175 		if (status & ATH9K_INT_TX) {
1176 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1177 			    "ATH9K_INT_TX\n"));
1178 			if (ddi_taskq_dispatch(sc->sc_tq,
1179 			    arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1180 			    DDI_SUCCESS) {
1181 				arn_problem("arn: arn_isr(): "
1182 				    "No memory for tx taskq\n");
1183 				}
1184 			}
1185 #ifdef ARN_ATH9K_INT_MIB
1186 		if (status & ATH9K_INT_MIB) {
1187 			/*
1188 			 * Disable interrupts until we service the MIB
1189 			 * interrupt; otherwise it will continue to
1190 			 * fire.
1191 			 */
1192 			(void) ath9k_hw_set_interrupts(ah, 0);
1193 			/*
1194 			 * Let the hal handle the event. We assume
1195 			 * it will clear whatever condition caused
1196 			 * the interrupt.
1197 			 */
1198 			ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1199 			(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1200 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1201 			    "ATH9K_INT_MIB\n"));
1202 		}
1203 #endif
1204 
1205 #ifdef ARN_ATH9K_INT_TIM_TIMER
1206 		if (status & ATH9K_INT_TIM_TIMER) {
1207 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1208 			    "ATH9K_INT_TIM_TIMER\n"));
1209 			if (!(ah->ah_caps.hw_caps &
1210 			    ATH9K_HW_CAP_AUTOSLEEP)) {
1211 				/*
1212 				 * Clear RxAbort bit so that we can
1213 				 * receive frames
1214 				 */
1215 				ath9k_hw_setrxabort(ah, 0);
1216 				goto reset;
1217 			}
1218 		}
1219 #endif
1220 
1221 		if (status & ATH9K_INT_BMISS) {
1222 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1223 			    "ATH9K_INT_BMISS\n"));
1224 #ifdef ARN_HW_BEACON_MISS_HANDLE
1225 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1226 			    "handle beacon mmiss by H/W mechanism\n"));
1227 			if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1228 			    sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1229 				arn_problem("arn: arn_isr(): "
1230 				    "No memory available for bmiss taskq\n");
1231 			}
1232 #else
1233 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1234 			    "handle beacon mmiss by S/W mechanism\n"));
1235 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1236 		}
1237 
1238 		ARN_UNLOCK(sc);
1239 
1240 #ifdef ARN_ATH9K_INT_CST
1241 		/* carrier sense timeout */
1242 		if (status & ATH9K_INT_CST) {
1243 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1244 			    "ATH9K_INT_CST\n"));
1245 			return (DDI_INTR_CLAIMED);
1246 		}
1247 #endif
1248 
1249 		if (status & ATH9K_INT_SWBA) {
1250 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1251 			    "ATH9K_INT_SWBA\n"));
1252 			/* This will occur only in Host-AP or Ad-Hoc mode */
1253 			return (DDI_INTR_CLAIMED);
1254 		}
1255 	}
1256 
1257 	return (DDI_INTR_CLAIMED);
1258 reset:
1259 	ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1260 	(void) arn_reset(ic);
1261 	ARN_UNLOCK(sc);
1262 	return (DDI_INTR_CLAIMED);
1263 }
1264 
1265 static int
1266 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1267 {
1268 	int i;
1269 
1270 	for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1271 		if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1272 			return (i);
1273 	}
1274 
1275 	return (-1);
1276 }
1277 
1278 int
1279 arn_reset(ieee80211com_t *ic)
1280 {
1281 	struct arn_softc *sc = (struct arn_softc *)ic;
1282 	struct ath_hal *ah = sc->sc_ah;
1283 	int status;
1284 	int error = 0;
1285 
1286 	(void) ath9k_hw_set_interrupts(ah, 0);
1287 	arn_draintxq(sc, 0);
1288 	(void) arn_stoprecv(sc);
1289 
1290 	if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1291 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1292 	    sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1293 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1294 		    "unable to reset hardware; hal status %u\n", status));
1295 		error = EIO;
1296 	}
1297 
1298 	if (arn_startrecv(sc) != 0)
1299 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1300 		    "unable to start recv logic\n"));
1301 
1302 	/*
1303 	 * We may be doing a reset in response to a request
1304 	 * that changes the channel so update any state that
1305 	 * might change as a result.
1306 	 */
1307 	arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1308 
1309 	arn_update_txpow(sc);
1310 
1311 	if (sc->sc_flags & SC_OP_BEACONS)
1312 		arn_beacon_config(sc);	/* restart beacons */
1313 
1314 	(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1315 
1316 	return (error);
1317 }
1318 
1319 int
1320 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1321 {
1322 	int qnum;
1323 
1324 	switch (queue) {
1325 	case WME_AC_VO:
1326 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1327 		break;
1328 	case WME_AC_VI:
1329 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1330 		break;
1331 	case WME_AC_BE:
1332 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1333 		break;
1334 	case WME_AC_BK:
1335 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1336 		break;
1337 	default:
1338 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1339 		break;
1340 	}
1341 
1342 	return (qnum);
1343 }
1344 
1345 static struct {
1346 	uint32_t version;
1347 	const char *name;
1348 } ath_mac_bb_names[] = {
1349 	{ AR_SREV_VERSION_5416_PCI,	"5416" },
1350 	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
1351 	{ AR_SREV_VERSION_9100,		"9100" },
1352 	{ AR_SREV_VERSION_9160,		"9160" },
1353 	{ AR_SREV_VERSION_9280,		"9280" },
1354 	{ AR_SREV_VERSION_9285,		"9285" }
1355 };
1356 
1357 static struct {
1358 	uint16_t version;
1359 	const char *name;
1360 } ath_rf_names[] = {
1361 	{ 0,				"5133" },
1362 	{ AR_RAD5133_SREV_MAJOR,	"5133" },
1363 	{ AR_RAD5122_SREV_MAJOR,	"5122" },
1364 	{ AR_RAD2133_SREV_MAJOR,	"2133" },
1365 	{ AR_RAD2122_SREV_MAJOR,	"2122" }
1366 };
1367 
1368 /*
1369  * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1370  */
1371 
1372 static const char *
1373 arn_mac_bb_name(uint32_t mac_bb_version)
1374 {
1375 	int i;
1376 
1377 	for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1378 		if (ath_mac_bb_names[i].version == mac_bb_version) {
1379 			return (ath_mac_bb_names[i].name);
1380 		}
1381 	}
1382 
1383 	return ("????");
1384 }
1385 
1386 /*
1387  * Return the RF name. "????" is returned if the RF is unknown.
1388  */
1389 
1390 static const char *
1391 arn_rf_name(uint16_t rf_version)
1392 {
1393 	int i;
1394 
1395 	for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1396 		if (ath_rf_names[i].version == rf_version) {
1397 			return (ath_rf_names[i].name);
1398 		}
1399 	}
1400 
1401 	return ("????");
1402 }
1403 
1404 static void
1405 arn_next_scan(void *arg)
1406 {
1407 	ieee80211com_t *ic = arg;
1408 	struct arn_softc *sc = (struct arn_softc *)ic;
1409 
1410 	sc->sc_scan_timer = 0;
1411 	if (ic->ic_state == IEEE80211_S_SCAN) {
1412 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1413 		    drv_usectohz(arn_dwelltime * 1000));
1414 		ieee80211_next_scan(ic);
1415 	}
1416 }
1417 
1418 static void
1419 arn_stop_scantimer(struct arn_softc *sc)
1420 {
1421 	timeout_id_t tmp_id = 0;
1422 
1423 	while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1424 		tmp_id = sc->sc_scan_timer;
1425 		(void) untimeout(tmp_id);
1426 	}
1427 	sc->sc_scan_timer = 0;
1428 }
1429 
1430 static int32_t
1431 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1432 {
1433 	struct arn_softc *sc = (struct arn_softc *)ic;
1434 	struct ath_hal *ah = sc->sc_ah;
1435 	struct ieee80211_node *in;
1436 	int32_t i, error;
1437 	uint8_t *bssid;
1438 	uint32_t rfilt;
1439 	enum ieee80211_state ostate;
1440 	struct ath9k_channel *channel;
1441 	int pos;
1442 
1443 	/* Should set up & init LED here */
1444 
1445 	if (sc->sc_flags & SC_OP_INVALID)
1446 		return (0);
1447 
1448 	ostate = ic->ic_state;
1449 	ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1450 	    "%x -> %x!\n", ostate, nstate));
1451 
1452 	ARN_LOCK(sc);
1453 
1454 	if (nstate != IEEE80211_S_SCAN)
1455 		arn_stop_scantimer(sc);
1456 	if (nstate != IEEE80211_S_RUN)
1457 		arn_stop_caltimer(sc);
1458 
1459 	/* Should set LED here */
1460 
1461 	if (nstate == IEEE80211_S_INIT) {
1462 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1463 		/*
1464 		 * Disable interrupts.
1465 		 */
1466 		(void) ath9k_hw_set_interrupts
1467 		    (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1468 
1469 #ifdef ARN_IBSS
1470 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1471 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1472 			arn_beacon_return(sc);
1473 		}
1474 #endif
1475 		ARN_UNLOCK(sc);
1476 		ieee80211_stop_watchdog(ic);
1477 		goto done;
1478 	}
1479 	in = ic->ic_bss;
1480 
1481 	pos = arn_get_channel(sc, ic->ic_curchan);
1482 
1483 	if (pos == -1) {
1484 		ARN_DBG((ARN_DBG_FATAL, "arn: "
1485 		    "%s: Invalid channel\n", __func__));
1486 		error = EINVAL;
1487 		ARN_UNLOCK(sc);
1488 		goto bad;
1489 	}
1490 
1491 	if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1492 		arn_update_chainmask(sc);
1493 		sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1494 	} else
1495 		sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1496 
1497 	sc->sc_ah->ah_channels[pos].chanmode =
1498 	    arn_chan2flags(ic, ic->ic_curchan);
1499 	channel = &sc->sc_ah->ah_channels[pos];
1500 	if (channel == NULL) {
1501 		arn_problem("arn_newstate(): channel == NULL");
1502 		ARN_UNLOCK(sc);
1503 		goto bad;
1504 	}
1505 	error = arn_set_channel(sc, channel);
1506 	if (error != 0) {
1507 		if (nstate != IEEE80211_S_SCAN) {
1508 			ARN_UNLOCK(sc);
1509 			ieee80211_reset_chan(ic);
1510 			goto bad;
1511 		}
1512 	}
1513 
1514 	/*
1515 	 * Get the receive filter according to the
1516 	 * operating mode and state
1517 	 */
1518 	rfilt = arn_calcrxfilter(sc);
1519 
1520 	if (nstate == IEEE80211_S_SCAN)
1521 		bssid = ic->ic_macaddr;
1522 	else
1523 		bssid = in->in_bssid;
1524 
1525 	ath9k_hw_setrxfilter(ah, rfilt);
1526 
1527 	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1528 		ath9k_hw_write_associd(ah, bssid, in->in_associd);
1529 	else
1530 		ath9k_hw_write_associd(ah, bssid, 0);
1531 
1532 	/* Check for WLAN_CAPABILITY_PRIVACY ? */
1533 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1534 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1535 			if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1536 				(void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1537 				    bssid);
1538 		}
1539 	}
1540 
1541 	if (nstate == IEEE80211_S_RUN) {
1542 		switch (ic->ic_opmode) {
1543 #ifdef ARN_IBSS
1544 		case IEEE80211_M_IBSS:
1545 			/*
1546 			 * Allocate and setup the beacon frame.
1547 			 * Stop any previous beacon DMA.
1548 			 */
1549 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1550 			arn_beacon_return(sc);
1551 			error = arn_beacon_alloc(sc, in);
1552 			if (error != 0) {
1553 				ARN_UNLOCK(sc);
1554 				goto bad;
1555 			}
1556 			/*
1557 			 * If joining an adhoc network defer beacon timer
1558 			 * configuration to the next beacon frame so we
1559 			 * have a current TSF to use.  Otherwise we're
1560 			 * starting an ibss/bss so there's no need to delay.
1561 			 */
1562 			if (ic->ic_opmode == IEEE80211_M_IBSS &&
1563 			    ic->ic_bss->in_tstamp.tsf != 0) {
1564 				sc->sc_bsync = 1;
1565 			} else {
1566 				arn_beacon_config(sc);
1567 			}
1568 			break;
1569 #endif /* ARN_IBSS */
1570 		case IEEE80211_M_STA:
1571 			if (ostate != IEEE80211_S_RUN) {
1572 				/*
1573 				 * Defer beacon timer configuration to the next
1574 				 * beacon frame so we have a current TSF to use.
1575 				 * Any TSF collected when scanning is likely old
1576 				 */
1577 #ifdef ARN_IBSS
1578 				sc->sc_bsync = 1;
1579 #else
1580 				/* Configure the beacon and sleep timers. */
1581 				arn_beacon_config(sc);
1582 				/* Reset rssi stats */
1583 				sc->sc_halstats.ns_avgbrssi =
1584 				    ATH_RSSI_DUMMY_MARKER;
1585 				sc->sc_halstats.ns_avgrssi =
1586 				    ATH_RSSI_DUMMY_MARKER;
1587 				sc->sc_halstats.ns_avgtxrssi =
1588 				    ATH_RSSI_DUMMY_MARKER;
1589 				sc->sc_halstats.ns_avgtxrate =
1590 				    ATH_RATE_DUMMY_MARKER;
1591 /* end */
1592 
1593 #endif /* ARN_IBSS */
1594 			}
1595 			break;
1596 		default:
1597 			break;
1598 		}
1599 	} else {
1600 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1601 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1602 	}
1603 
1604 	/*
1605 	 * Reset the rate control state.
1606 	 */
1607 	arn_rate_ctl_reset(sc, nstate);
1608 
1609 	ARN_UNLOCK(sc);
1610 done:
1611 	/*
1612 	 * Invoke the parent method to complete the work.
1613 	 */
1614 	error = sc->sc_newstate(ic, nstate, arg);
1615 
1616 	/*
1617 	 * Finally, start any timers.
1618 	 */
1619 	if (nstate == IEEE80211_S_RUN) {
1620 		ieee80211_start_watchdog(ic, 1);
1621 		ASSERT(sc->sc_cal_timer == 0);
1622 		sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1623 		    drv_usectohz(100 * 1000));
1624 	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1625 		/* start ap/neighbor scan timer */
1626 		/* ASSERT(sc->sc_scan_timer == 0); */
1627 		if (sc->sc_scan_timer != 0) {
1628 			(void) untimeout(sc->sc_scan_timer);
1629 			sc->sc_scan_timer = 0;
1630 		}
1631 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1632 		    drv_usectohz(arn_dwelltime * 1000));
1633 	}
1634 
1635 bad:
1636 	return (error);
1637 }
1638 
1639 static void
1640 arn_watchdog(void *arg)
1641 {
1642 	struct arn_softc *sc = arg;
1643 	ieee80211com_t *ic = &sc->sc_isc;
1644 	int ntimer = 0;
1645 
1646 	ARN_LOCK(sc);
1647 	ic->ic_watchdog_timer = 0;
1648 	if (sc->sc_flags & SC_OP_INVALID) {
1649 		ARN_UNLOCK(sc);
1650 		return;
1651 	}
1652 
1653 	if (ic->ic_state == IEEE80211_S_RUN) {
1654 		/*
1655 		 * Start the background rate control thread if we
1656 		 * are not configured to use a fixed xmit rate.
1657 		 */
1658 #ifdef ARN_LEGACY_RC
1659 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1660 			sc->sc_stats.ast_rate_calls ++;
1661 			if (ic->ic_opmode == IEEE80211_M_STA)
1662 				arn_rate_ctl(ic, ic->ic_bss);
1663 			else
1664 				ieee80211_iterate_nodes(&ic->ic_sta,
1665 				    arn_rate_ctl, sc);
1666 		}
1667 #endif /* ARN_LEGACY_RC */
1668 
1669 #ifdef ARN_HW_BEACON_MISS_HANDLE
1670 	/* nothing to do here */
1671 #else
1672 	/* currently set 10 seconds as beacon miss threshold */
1673 	if (ic->ic_beaconmiss++ > 100) {
1674 		ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1675 		    "Beacon missed for 10 seconds, run"
1676 		    "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1677 		ARN_UNLOCK(sc);
1678 		(void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1679 		return;
1680 	}
1681 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1682 
1683 		ntimer = 1;
1684 	}
1685 	ARN_UNLOCK(sc);
1686 
1687 	ieee80211_watchdog(ic);
1688 	if (ntimer != 0)
1689 		ieee80211_start_watchdog(ic, ntimer);
1690 }
1691 
1692 /* ARGSUSED */
1693 static struct ieee80211_node *
1694 arn_node_alloc(ieee80211com_t *ic)
1695 {
1696 	struct ath_node *an;
1697 #ifdef ARN_TX_AGGREGATION
1698 	struct arn_softc *sc = (struct arn_softc *)ic;
1699 #endif
1700 
1701 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1702 
1703 	/* legacy rate control */
1704 #ifdef ARN_LEGACY_RC
1705 	arn_rate_update(sc, &an->an_node, 0);
1706 #endif
1707 
1708 #ifdef ARN_TX_AGGREGATION
1709 	if (sc->sc_flags & SC_OP_TXAGGR) {
1710 		arn_tx_node_init(sc, an);
1711 	}
1712 #endif /* ARN_TX_AGGREGATION */
1713 
1714 	an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1715 
1716 	return ((an != NULL) ? &an->an_node : NULL);
1717 }
1718 
1719 static void
1720 arn_node_free(struct ieee80211_node *in)
1721 {
1722 	ieee80211com_t *ic = in->in_ic;
1723 	struct arn_softc *sc = (struct arn_softc *)ic;
1724 	struct ath_buf *bf;
1725 	struct ath_txq *txq;
1726 	int32_t i;
1727 
1728 #ifdef ARN_TX_AGGREGATION
1729 	if (sc->sc_flags & SC_OP_TXAGGR)
1730 		arn_tx_node_cleanup(sc, in);
1731 #endif /* TX_AGGREGATION */
1732 
1733 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1734 		if (ARN_TXQ_SETUP(sc, i)) {
1735 			txq = &sc->sc_txq[i];
1736 			mutex_enter(&txq->axq_lock);
1737 			bf = list_head(&txq->axq_list);
1738 			while (bf != NULL) {
1739 				if (bf->bf_in == in) {
1740 					bf->bf_in = NULL;
1741 				}
1742 				bf = list_next(&txq->axq_list, bf);
1743 			}
1744 			mutex_exit(&txq->axq_lock);
1745 		}
1746 	}
1747 
1748 	ic->ic_node_cleanup(in);
1749 
1750 	if (in->in_wpa_ie != NULL)
1751 		ieee80211_free(in->in_wpa_ie);
1752 
1753 	if (in->in_wme_ie != NULL)
1754 		ieee80211_free(in->in_wme_ie);
1755 
1756 	if (in->in_htcap_ie != NULL)
1757 		ieee80211_free(in->in_htcap_ie);
1758 
1759 	kmem_free(in, sizeof (struct ath_node));
1760 }
1761 
1762 /*
1763  * Allocate tx/rx key slots for TKIP.  We allocate one slot for
1764  * each key. MIC is right after the decrypt/encrypt key.
1765  */
1766 static uint16_t
1767 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1768     ieee80211_keyix *rxkeyix)
1769 {
1770 	uint16_t i, keyix;
1771 
1772 	ASSERT(!sc->sc_splitmic);
1773 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1774 		uint8_t b = sc->sc_keymap[i];
1775 		if (b == 0xff)
1776 			continue;
1777 		for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1778 		    keyix++, b >>= 1) {
1779 			if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1780 				/* full pair unavailable */
1781 				continue;
1782 			}
1783 			set_bit(keyix, sc->sc_keymap);
1784 			set_bit(keyix+64, sc->sc_keymap);
1785 			ARN_DBG((ARN_DBG_KEYCACHE,
1786 			    "arn_key_alloc_pair(): key pair %u,%u\n",
1787 			    keyix, keyix+64));
1788 			*txkeyix = *rxkeyix = keyix;
1789 			return (1);
1790 		}
1791 	}
1792 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1793 	    " out of pair space\n"));
1794 
1795 	return (0);
1796 }
1797 
1798 /*
1799  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
1800  * each key, one for decrypt/encrypt and the other for the MIC.
1801  */
1802 static int
1803 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1804     ieee80211_keyix *rxkeyix)
1805 {
1806 	uint16_t i, keyix;
1807 
1808 	ASSERT(sc->sc_splitmic);
1809 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1810 		uint8_t b = sc->sc_keymap[i];
1811 		if (b != 0xff) {
1812 			/*
1813 			 * One or more slots in this byte are free.
1814 			 */
1815 			keyix = i*NBBY;
1816 			while (b & 1) {
1817 		again:
1818 				keyix++;
1819 				b >>= 1;
1820 			}
1821 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1822 			if (is_set(keyix+32, sc->sc_keymap) ||
1823 			    is_set(keyix+64, sc->sc_keymap) ||
1824 			    is_set(keyix+32+64, sc->sc_keymap)) {
1825 				/* full pair unavailable */
1826 				if (keyix == (i+1)*NBBY) {
1827 					/* no slots were appropriate, advance */
1828 					continue;
1829 				}
1830 				goto again;
1831 			}
1832 			set_bit(keyix, sc->sc_keymap);
1833 			set_bit(keyix+64, sc->sc_keymap);
1834 			set_bit(keyix+32, sc->sc_keymap);
1835 			set_bit(keyix+32+64, sc->sc_keymap);
1836 			ARN_DBG((ARN_DBG_KEYCACHE,
1837 			    "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1838 			    keyix, keyix+64,
1839 			    keyix+32, keyix+32+64));
1840 			*txkeyix = *rxkeyix = keyix;
1841 			return (1);
1842 		}
1843 	}
1844 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1845 	    " out of pair space\n"));
1846 
1847 	return (0);
1848 }
1849 /*
1850  * Allocate a single key cache slot.
1851  */
1852 static int
1853 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1854     ieee80211_keyix *rxkeyix)
1855 {
1856 	uint16_t i, keyix;
1857 
1858 	/* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1859 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1860 		uint8_t b = sc->sc_keymap[i];
1861 
1862 		if (b != 0xff) {
1863 			/*
1864 			 * One or more slots are free.
1865 			 */
1866 			keyix = i*NBBY;
1867 			while (b & 1)
1868 				keyix++, b >>= 1;
1869 			set_bit(keyix, sc->sc_keymap);
1870 			ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1871 			    "key %u\n", keyix));
1872 			*txkeyix = *rxkeyix = keyix;
1873 			return (1);
1874 		}
1875 	}
1876 	return (0);
1877 }
1878 
1879 /*
1880  * Allocate one or more key cache slots for a unicast key.  The
1881  * key itself is needed only to identify the cipher.  For hardware
1882  * TKIP with split cipher+MIC keys we allocate two key cache slot
1883  * pairs so that we can setup separate TX and RX MIC keys.  Note
1884  * that the MIC key for a TKIP key at slot i is assumed by the
1885  * hardware to be at slot i+64.  This limits TKIP keys to the first
1886  * 64 entries.
1887  */
1888 /* ARGSUSED */
1889 int
1890 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1891     ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1892 {
1893 	struct arn_softc *sc = (struct arn_softc *)ic;
1894 
1895 	/*
1896 	 * We allocate two pair for TKIP when using the h/w to do
1897 	 * the MIC.  For everything else, including software crypto,
1898 	 * we allocate a single entry.  Note that s/w crypto requires
1899 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
1900 	 * not support pass-through cache entries and we map all
1901 	 * those requests to slot 0.
1902 	 */
1903 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1904 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1905 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1906 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1907 		if (sc->sc_splitmic)
1908 			return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1909 		else
1910 			return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1911 	} else {
1912 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1913 	}
1914 }
1915 
1916 /*
1917  * Delete an entry in the key cache allocated by ath_key_alloc.
1918  */
1919 int
1920 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1921 {
1922 	struct arn_softc *sc = (struct arn_softc *)ic;
1923 	struct ath_hal *ah = sc->sc_ah;
1924 	const struct ieee80211_cipher *cip = k->wk_cipher;
1925 	ieee80211_keyix keyix = k->wk_keyix;
1926 
1927 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1928 	    " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1929 
1930 	(void) ath9k_hw_keyreset(ah, keyix);
1931 	/*
1932 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
1933 	 */
1934 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1935 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1936 		(void) ath9k_hw_keyreset(ah, keyix+32);		/* RX key */
1937 
1938 	if (keyix >= IEEE80211_WEP_NKID) {
1939 		/*
1940 		 * Don't touch keymap entries for global keys so
1941 		 * they are never considered for dynamic allocation.
1942 		 */
1943 		clr_bit(keyix, sc->sc_keymap);
1944 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1945 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1946 			/*
1947 			 * If splitmic is true +64 is TX key MIC,
1948 			 * else +64 is RX key + RX key MIC.
1949 			 */
1950 			clr_bit(keyix+64, sc->sc_keymap);
1951 			if (sc->sc_splitmic) {
1952 				/* Rx key */
1953 				clr_bit(keyix+32, sc->sc_keymap);
1954 				/* RX key MIC */
1955 				clr_bit(keyix+32+64, sc->sc_keymap);
1956 			}
1957 		}
1958 	}
1959 	return (1);
1960 }
1961 
1962 /*
1963  * Set a TKIP key into the hardware.  This handles the
1964  * potential distribution of key state to multiple key
1965  * cache slots for TKIP.
1966  */
1967 static int
1968 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1969     struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1970 {
1971 	uint8_t *key_rxmic = NULL;
1972 	uint8_t *key_txmic = NULL;
1973 	uint8_t  *key = (uint8_t *)&(k->wk_key[0]);
1974 	struct ath_hal *ah = sc->sc_ah;
1975 
1976 	key_txmic = key + 16;
1977 	key_rxmic = key + 24;
1978 
1979 	if (mac == NULL) {
1980 		/* Group key installation */
1981 		(void) memcpy(hk->kv_mic,  key_rxmic, sizeof (hk->kv_mic));
1982 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1983 		    mac, B_FALSE));
1984 	}
1985 	if (!sc->sc_splitmic) {
1986 		/*
1987 		 * data key goes at first index,
1988 		 * the hal handles the MIC keys at index+64.
1989 		 */
1990 		(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1991 		(void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1992 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1993 		    mac, B_FALSE));
1994 	}
1995 	/*
1996 	 * TX key goes at first index, RX key at +32.
1997 	 * The hal handles the MIC keys at index+64.
1998 	 */
1999 	(void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2000 	if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2001 	    B_FALSE))) {
2002 		/* Txmic entry failed. No need to proceed further */
2003 		ARN_DBG((ARN_DBG_KEYCACHE,
2004 		    "%s Setting TX MIC Key Failed\n", __func__));
2005 		return (0);
2006 	}
2007 
2008 	(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2009 
2010 	/* XXX delete tx key on failure? */
2011 	return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2012 
2013 }
2014 
2015 int
2016 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2017     const uint8_t mac[IEEE80211_ADDR_LEN])
2018 {
2019 	struct arn_softc *sc = (struct arn_softc *)ic;
2020 	const struct ieee80211_cipher *cip = k->wk_cipher;
2021 	struct ath9k_keyval hk;
2022 
2023 	/* cipher table */
2024 	static const uint8_t ciphermap[] = {
2025 		ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
2026 		ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
2027 		ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
2028 		ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
2029 		ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
2030 		ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
2031 	};
2032 
2033 	bzero(&hk, sizeof (hk));
2034 
2035 	/*
2036 	 * Software crypto uses a "clear key" so non-crypto
2037 	 * state kept in the key cache are maintainedd so that
2038 	 * rx frames have an entry to match.
2039 	 */
2040 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2041 		ASSERT(cip->ic_cipher < 6);
2042 		hk.kv_type = ciphermap[cip->ic_cipher];
2043 		hk.kv_len = k->wk_keylen;
2044 		bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2045 	} else {
2046 		hk.kv_type = ATH9K_CIPHER_CLR;
2047 	}
2048 
2049 	if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2050 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2051 		return (arn_keyset_tkip(sc, k, &hk, mac));
2052 	} else {
2053 		return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2054 		    k->wk_keyix, &hk, mac, B_FALSE));
2055 	}
2056 }
2057 
2058 /*
2059  * Enable/Disable short slot timing
2060  */
2061 void
2062 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2063 {
2064 	struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2065 
2066 	if (onoff)
2067 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2068 	else
2069 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2070 }
2071 
2072 static int
2073 arn_open(struct arn_softc *sc)
2074 {
2075 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2076 	struct ieee80211_channel *curchan = ic->ic_curchan;
2077 	struct ath9k_channel *init_channel;
2078 	int error = 0, pos, status;
2079 
2080 	ARN_LOCK_ASSERT(sc);
2081 
2082 	pos = arn_get_channel(sc, curchan);
2083 	if (pos == -1) {
2084 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2085 		    "%s: Invalid channel\n", __func__));
2086 		error = EINVAL;
2087 		goto error;
2088 	}
2089 
2090 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2091 
2092 	if (sc->sc_curmode == ATH9K_MODE_11A) {
2093 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2094 	} else {
2095 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2096 	}
2097 
2098 	init_channel = &sc->sc_ah->ah_channels[pos];
2099 
2100 	/* Reset SERDES registers */
2101 	ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2102 
2103 	/*
2104 	 * The basic interface to setting the hardware in a good
2105 	 * state is ``reset''.	On return the hardware is known to
2106 	 * be powered up and with interrupts disabled.	This must
2107 	 * be followed by initialization of the appropriate bits
2108 	 * and then setup of the interrupt mask.
2109 	 */
2110 	if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2111 	    sc->tx_chan_width, sc->sc_tx_chainmask,
2112 	    sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2113 	    B_FALSE, &status)) {
2114 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2115 		    "%s: unable to reset hardware; hal status %u "
2116 		    "(freq %u flags 0x%x)\n", __func__, status,
2117 		    init_channel->channel, init_channel->channelFlags));
2118 
2119 		error = EIO;
2120 		goto error;
2121 	}
2122 
2123 	/*
2124 	 * This is needed only to setup initial state
2125 	 * but it's best done after a reset.
2126 	 */
2127 	arn_update_txpow(sc);
2128 
2129 	/*
2130 	 * Setup the hardware after reset:
2131 	 * The receive engine is set going.
2132 	 * Frame transmit is handled entirely
2133 	 * in the frame output path; there's nothing to do
2134 	 * here except setup the interrupt mask.
2135 	 */
2136 	if (arn_startrecv(sc) != 0) {
2137 		ARN_DBG((ARN_DBG_INIT, "arn: "
2138 		    "%s: unable to start recv logic\n", __func__));
2139 		error = EIO;
2140 		goto error;
2141 	}
2142 
2143 	/* Setup our intr mask. */
2144 	sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2145 	    ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2146 	    ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2147 #ifdef ARN_ATH9K_HW_CAP_GTT
2148 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2149 		sc->sc_imask |= ATH9K_INT_GTT;
2150 #endif
2151 
2152 #ifdef ARN_ATH9K_HW_CAP_GTT
2153 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2154 		sc->sc_imask |= ATH9K_INT_CST;
2155 #endif
2156 
2157 	/*
2158 	 * Enable MIB interrupts when there are hardware phy counters.
2159 	 * Note we only do this (at the moment) for station mode.
2160 	 */
2161 #ifdef ARN_ATH9K_INT_MIB
2162 	if (ath9k_hw_phycounters(sc->sc_ah) &&
2163 	    ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2164 	    (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2165 		sc->sc_imask |= ATH9K_INT_MIB;
2166 #endif
2167 	/*
2168 	 * Some hardware processes the TIM IE and fires an
2169 	 * interrupt when the TIM bit is set.  For hardware
2170 	 * that does, if not overridden by configuration,
2171 	 * enable the TIM interrupt when operating as station.
2172 	 */
2173 #ifdef ARN_ATH9K_INT_TIM
2174 	if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2175 	    (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2176 	    !sc->sc_config.swBeaconProcess)
2177 		sc->sc_imask |= ATH9K_INT_TIM;
2178 #endif
2179 	if (arn_chan2mode(init_channel) != sc->sc_curmode)
2180 		arn_setcurmode(sc, arn_chan2mode(init_channel));
2181 	ARN_DBG((ARN_DBG_INIT, "arn: "
2182 	    "%s: current mode after arn_setcurmode is %d\n",
2183 	    __func__, sc->sc_curmode));
2184 
2185 	sc->sc_isrunning = 1;
2186 
2187 	/* Disable BMISS interrupt when we're not associated */
2188 	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2189 	(void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2190 
2191 	return (0);
2192 
2193 error:
2194 	return (error);
2195 }
2196 
2197 static void
2198 arn_close(struct arn_softc *sc)
2199 {
2200 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2201 	struct ath_hal *ah = sc->sc_ah;
2202 
2203 	ARN_LOCK_ASSERT(sc);
2204 
2205 	if (!sc->sc_isrunning)
2206 		return;
2207 
2208 	/*
2209 	 * Shutdown the hardware and driver
2210 	 * Note that some of this work is not possible if the
2211 	 * hardware is gone (invalid).
2212 	 */
2213 	ARN_UNLOCK(sc);
2214 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2215 	ieee80211_stop_watchdog(ic);
2216 	ARN_LOCK(sc);
2217 
2218 	/*
2219 	 * make sure h/w will not generate any interrupt
2220 	 * before setting the invalid flag.
2221 	 */
2222 	(void) ath9k_hw_set_interrupts(ah, 0);
2223 
2224 	if (!(sc->sc_flags & SC_OP_INVALID)) {
2225 		arn_draintxq(sc, 0);
2226 		(void) arn_stoprecv(sc);
2227 		(void) ath9k_hw_phy_disable(ah);
2228 	} else {
2229 		sc->sc_rxlink = NULL;
2230 	}
2231 
2232 	sc->sc_isrunning = 0;
2233 }
2234 
2235 /*
2236  * MAC callback functions
2237  */
2238 static int
2239 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2240 {
2241 	struct arn_softc *sc = arg;
2242 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2243 	struct ieee80211_node *in;
2244 	struct ieee80211_rateset *rs;
2245 
2246 	ARN_LOCK(sc);
2247 	switch (stat) {
2248 	case MAC_STAT_IFSPEED:
2249 		in = ic->ic_bss;
2250 		rs = &in->in_rates;
2251 		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2252 		    1000000ull;
2253 		break;
2254 	case MAC_STAT_NOXMTBUF:
2255 		*val = sc->sc_stats.ast_tx_nobuf +
2256 		    sc->sc_stats.ast_tx_nobufmgt;
2257 		break;
2258 	case MAC_STAT_IERRORS:
2259 		*val = sc->sc_stats.ast_rx_tooshort;
2260 		break;
2261 	case MAC_STAT_RBYTES:
2262 		*val = ic->ic_stats.is_rx_bytes;
2263 		break;
2264 	case MAC_STAT_IPACKETS:
2265 		*val = ic->ic_stats.is_rx_frags;
2266 		break;
2267 	case MAC_STAT_OBYTES:
2268 		*val = ic->ic_stats.is_tx_bytes;
2269 		break;
2270 	case MAC_STAT_OPACKETS:
2271 		*val = ic->ic_stats.is_tx_frags;
2272 		break;
2273 	case MAC_STAT_OERRORS:
2274 	case WIFI_STAT_TX_FAILED:
2275 		*val = sc->sc_stats.ast_tx_fifoerr +
2276 		    sc->sc_stats.ast_tx_xretries +
2277 		    sc->sc_stats.ast_tx_discard;
2278 		break;
2279 	case WIFI_STAT_TX_RETRANS:
2280 		*val = sc->sc_stats.ast_tx_xretries;
2281 		break;
2282 	case WIFI_STAT_FCS_ERRORS:
2283 		*val = sc->sc_stats.ast_rx_crcerr;
2284 		break;
2285 	case WIFI_STAT_WEP_ERRORS:
2286 		*val = sc->sc_stats.ast_rx_badcrypt;
2287 		break;
2288 	case WIFI_STAT_TX_FRAGS:
2289 	case WIFI_STAT_MCAST_TX:
2290 	case WIFI_STAT_RTS_SUCCESS:
2291 	case WIFI_STAT_RTS_FAILURE:
2292 	case WIFI_STAT_ACK_FAILURE:
2293 	case WIFI_STAT_RX_FRAGS:
2294 	case WIFI_STAT_MCAST_RX:
2295 	case WIFI_STAT_RX_DUPS:
2296 		ARN_UNLOCK(sc);
2297 		return (ieee80211_stat(ic, stat, val));
2298 	default:
2299 		ARN_UNLOCK(sc);
2300 		return (ENOTSUP);
2301 	}
2302 	ARN_UNLOCK(sc);
2303 
2304 	return (0);
2305 }
2306 
2307 int
2308 arn_m_start(void *arg)
2309 {
2310 	struct arn_softc *sc = arg;
2311 	int err = 0;
2312 
2313 	ARN_LOCK(sc);
2314 
2315 	/*
2316 	 * Stop anything previously setup.  This is safe
2317 	 * whether this is the first time through or not.
2318 	 */
2319 
2320 	arn_close(sc);
2321 
2322 	if ((err = arn_open(sc)) != 0) {
2323 		ARN_UNLOCK(sc);
2324 		return (err);
2325 	}
2326 
2327 	/* H/W is reday now */
2328 	sc->sc_flags &= ~SC_OP_INVALID;
2329 
2330 	ARN_UNLOCK(sc);
2331 
2332 	return (0);
2333 }
2334 
2335 static void
2336 arn_m_stop(void *arg)
2337 {
2338 	struct arn_softc *sc = arg;
2339 
2340 	ARN_LOCK(sc);
2341 	arn_close(sc);
2342 
2343 	/* disable HAL and put h/w to sleep */
2344 	(void) ath9k_hw_disable(sc->sc_ah);
2345 	ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2346 
2347 	/* XXX: hardware will not be ready in suspend state */
2348 	sc->sc_flags |= SC_OP_INVALID;
2349 	ARN_UNLOCK(sc);
2350 }
2351 
2352 static int
2353 arn_m_promisc(void *arg, boolean_t on)
2354 {
2355 	struct arn_softc *sc = arg;
2356 	struct ath_hal *ah = sc->sc_ah;
2357 	uint32_t rfilt;
2358 
2359 	ARN_LOCK(sc);
2360 
2361 	rfilt = ath9k_hw_getrxfilter(ah);
2362 	if (on)
2363 		rfilt |= ATH9K_RX_FILTER_PROM;
2364 	else
2365 		rfilt &= ~ATH9K_RX_FILTER_PROM;
2366 	sc->sc_promisc = on;
2367 	ath9k_hw_setrxfilter(ah, rfilt);
2368 
2369 	ARN_UNLOCK(sc);
2370 
2371 	return (0);
2372 }
2373 
2374 static int
2375 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2376 {
2377 	struct arn_softc *sc = arg;
2378 	struct ath_hal *ah = sc->sc_ah;
2379 	uint32_t val, index, bit;
2380 	uint8_t pos;
2381 	uint32_t *mfilt = sc->sc_mcast_hash;
2382 
2383 	ARN_LOCK(sc);
2384 
2385 	/* calculate XOR of eight 6bit values */
2386 	val = ARN_LE_READ_32(mca + 0);
2387 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2388 	val = ARN_LE_READ_32(mca + 3);
2389 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2390 	pos &= 0x3f;
2391 	index = pos / 32;
2392 	bit = 1 << (pos % 32);
2393 
2394 	if (add) {	/* enable multicast */
2395 		sc->sc_mcast_refs[pos]++;
2396 		mfilt[index] |= bit;
2397 	} else {	/* disable multicast */
2398 		if (--sc->sc_mcast_refs[pos] == 0)
2399 			mfilt[index] &= ~bit;
2400 	}
2401 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2402 
2403 	ARN_UNLOCK(sc);
2404 	return (0);
2405 }
2406 
2407 static int
2408 arn_m_unicst(void *arg, const uint8_t *macaddr)
2409 {
2410 	struct arn_softc *sc = arg;
2411 	struct ath_hal *ah = sc->sc_ah;
2412 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2413 
2414 	ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2415 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2416 	    macaddr[0], macaddr[1], macaddr[2],
2417 	    macaddr[3], macaddr[4], macaddr[5]));
2418 
2419 	ARN_LOCK(sc);
2420 	IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2421 	(void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2422 	(void) arn_reset(ic);
2423 	ARN_UNLOCK(sc);
2424 	return (0);
2425 }
2426 
2427 static mblk_t *
2428 arn_m_tx(void *arg, mblk_t *mp)
2429 {
2430 	struct arn_softc *sc = arg;
2431 	int error = 0;
2432 	mblk_t *next;
2433 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2434 
2435 	/*
2436 	 * No data frames go out unless we're associated; this
2437 	 * should not happen as the 802.11 layer does not enable
2438 	 * the xmit queue until we enter the RUN state.
2439 	 */
2440 	if (ic->ic_state != IEEE80211_S_RUN) {
2441 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2442 		    "discard, state %u\n", ic->ic_state));
2443 		sc->sc_stats.ast_tx_discard++;
2444 		freemsgchain(mp);
2445 		return (NULL);
2446 	}
2447 
2448 	while (mp != NULL) {
2449 		next = mp->b_next;
2450 		mp->b_next = NULL;
2451 		error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2452 		if (error != 0) {
2453 			mp->b_next = next;
2454 			if (error == ENOMEM) {
2455 				break;
2456 			} else {
2457 				freemsgchain(mp);
2458 				return (NULL);
2459 			}
2460 		}
2461 		mp = next;
2462 	}
2463 
2464 	return (mp);
2465 }
2466 
2467 static void
2468 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2469 {
2470 	struct arn_softc *sc = arg;
2471 	int32_t err;
2472 
2473 	err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2474 
2475 	ARN_LOCK(sc);
2476 	if (err == ENETRESET) {
2477 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2478 			ARN_UNLOCK(sc);
2479 
2480 			(void) arn_m_start(sc);
2481 
2482 			(void) ieee80211_new_state(&sc->sc_isc,
2483 			    IEEE80211_S_SCAN, -1);
2484 			ARN_LOCK(sc);
2485 		}
2486 	}
2487 	ARN_UNLOCK(sc);
2488 }
2489 
2490 static int
2491 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2492     uint_t wldp_length, const void *wldp_buf)
2493 {
2494 	struct arn_softc *sc = arg;
2495 	int	err;
2496 
2497 	err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2498 	    wldp_length, wldp_buf);
2499 
2500 	ARN_LOCK(sc);
2501 
2502 	if (err == ENETRESET) {
2503 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2504 			ARN_UNLOCK(sc);
2505 			(void) arn_m_start(sc);
2506 			(void) ieee80211_new_state(&sc->sc_isc,
2507 			    IEEE80211_S_SCAN, -1);
2508 			ARN_LOCK(sc);
2509 		}
2510 		err = 0;
2511 	}
2512 
2513 	ARN_UNLOCK(sc);
2514 
2515 	return (err);
2516 }
2517 
2518 /* ARGSUSED */
2519 static int
2520 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2521     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
2522 {
2523 	struct arn_softc *sc = arg;
2524 	int	err = 0;
2525 
2526 	err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2527 	    pr_flags, wldp_length, wldp_buf, perm);
2528 
2529 	return (err);
2530 }
2531 
2532 /* return bus cachesize in 4B word units */
2533 static void
2534 arn_pci_config_cachesize(struct arn_softc *sc)
2535 {
2536 	uint8_t csz;
2537 
2538 	/*
2539 	 * Cache line size is used to size and align various
2540 	 * structures used to communicate with the hardware.
2541 	 */
2542 	csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2543 	if (csz == 0) {
2544 		/*
2545 		 * We must have this setup properly for rx buffer
2546 		 * DMA to work so force a reasonable value here if it
2547 		 * comes up zero.
2548 		 */
2549 		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2550 		pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2551 		    csz);
2552 	}
2553 	sc->sc_cachelsz = csz << 2;
2554 }
2555 
2556 static int
2557 arn_pci_setup(struct arn_softc *sc)
2558 {
2559 	uint16_t command;
2560 
2561 	/*
2562 	 * Enable memory mapping and bus mastering
2563 	 */
2564 	ASSERT(sc != NULL);
2565 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2566 	command	|= PCI_COMM_MAE | PCI_COMM_ME;
2567 	pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2568 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2569 	if ((command & PCI_COMM_MAE) == 0) {
2570 		arn_problem("arn: arn_pci_setup(): "
2571 		    "failed to enable memory mapping\n");
2572 		return (EIO);
2573 	}
2574 	if ((command & PCI_COMM_ME) == 0) {
2575 		arn_problem("arn: arn_pci_setup(): "
2576 		    "failed to enable bus mastering\n");
2577 		return (EIO);
2578 	}
2579 	ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2580 	    "set command reg to 0x%x \n", command));
2581 
2582 	return (0);
2583 }
2584 
2585 static void
2586 arn_get_hw_encap(struct arn_softc *sc)
2587 {
2588 	ieee80211com_t *ic;
2589 	struct ath_hal *ah;
2590 
2591 	ic = (ieee80211com_t *)sc;
2592 	ah = sc->sc_ah;
2593 
2594 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2595 	    ATH9K_CIPHER_AES_CCM, NULL))
2596 		ic->ic_caps |= IEEE80211_C_AES_CCM;
2597 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2598 	    ATH9K_CIPHER_AES_OCB, NULL))
2599 		ic->ic_caps |= IEEE80211_C_AES;
2600 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2601 	    ATH9K_CIPHER_TKIP, NULL))
2602 		ic->ic_caps |= IEEE80211_C_TKIP;
2603 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2604 	    ATH9K_CIPHER_WEP, NULL))
2605 		ic->ic_caps |= IEEE80211_C_WEP;
2606 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2607 	    ATH9K_CIPHER_MIC, NULL))
2608 		ic->ic_caps |= IEEE80211_C_TKIPMIC;
2609 }
2610 
2611 static void
2612 arn_setup_ht_cap(struct arn_softc *sc)
2613 {
2614 #define	ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3	/* 2 ^ 16 */
2615 #define	ATH9K_HT_CAP_MPDUDENSITY_8 0x6		/* 8 usec */
2616 
2617 	/* LINTED E_FUNC_SET_NOT_USED */
2618 	uint8_t tx_streams;
2619 	uint8_t rx_streams;
2620 
2621 	arn_ht_conf *ht_info = &sc->sc_ht_conf;
2622 
2623 	ht_info->ht_supported = B_TRUE;
2624 
2625 	/* Todo: IEEE80211_HTCAP_SMPS */
2626 	ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2627 	    IEEE80211_HTCAP_SHORTGI40 |
2628 	    IEEE80211_HTCAP_DSSSCCK40;
2629 
2630 	ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2631 	ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2632 
2633 	/* set up supported mcs set */
2634 	(void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2635 	tx_streams =
2636 	    !(sc->sc_ah->ah_caps.tx_chainmask &
2637 	    (sc->sc_ah->ah_caps.tx_chainmask - 1)) ? 1 : 2;
2638 	rx_streams =
2639 	    !(sc->sc_ah->ah_caps.rx_chainmask &
2640 	    (sc->sc_ah->ah_caps.rx_chainmask - 1)) ? 1 : 2;
2641 
2642 	ht_info->rx_mcs_mask[0] = 0xff;
2643 	if (rx_streams >= 2)
2644 		ht_info->rx_mcs_mask[1] = 0xff;
2645 }
2646 
2647 /* xxx should be used for ht rate set negotiating ? */
2648 static void
2649 arn_overwrite_11n_rateset(struct arn_softc *sc)
2650 {
2651 	uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2652 	int mcs_idx, mcs_count = 0;
2653 	int i, j;
2654 
2655 	(void) memset(&ieee80211_rateset_11n, 0,
2656 	    sizeof (ieee80211_rateset_11n));
2657 	for (i = 0; i < 10; i++) {
2658 		for (j = 0; j < 8; j++) {
2659 			if (ht_rs[i] & (1 << j)) {
2660 				mcs_idx = i * 8 + j;
2661 				if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2662 					break;
2663 				}
2664 
2665 				ieee80211_rateset_11n.rs_rates[mcs_idx] =
2666 				    (uint8_t)mcs_idx;
2667 				mcs_count++;
2668 			}
2669 		}
2670 	}
2671 
2672 	ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2673 
2674 	ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2675 	    "MCS rate set supported by this station is as follows:\n"));
2676 
2677 	for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2678 		ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2679 		    i, ieee80211_rateset_11n.rs_rates[i]));
2680 	}
2681 
2682 }
2683 
2684 /*
2685  * Update WME parameters for a transmit queue.
2686  */
2687 static int
2688 arn_tx_queue_update(struct arn_softc *sc, int ac)
2689 {
2690 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2691 #define	ATH_TXOP_TO_US(v)		(v<<5)
2692 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2693 	struct ath_txq *txq;
2694 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2695 	struct ath_hal *ah = sc->sc_ah;
2696 	struct ath9k_tx_queue_info qi;
2697 
2698 	txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2699 	(void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2700 
2701 	/*
2702 	 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2703 	 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2704 	 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2705 	 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2706 	 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2707 	 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2708 	 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2709 	 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2710 	 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2711 	 */
2712 
2713 	/* xxx should update these flags here? */
2714 #if 0
2715 	qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2716 	    TXQ_FLAG_TXERRINT_ENABLE |
2717 	    TXQ_FLAG_TXDESCINT_ENABLE |
2718 	    TXQ_FLAG_TXURNINT_ENABLE;
2719 #endif
2720 
2721 	qi.tqi_aifs = wmep->wmep_aifsn;
2722 	qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2723 	qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2724 	qi.tqi_readyTime = 0;
2725 	qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2726 
2727 	ARN_DBG((ARN_DBG_INIT,
2728 	    "%s:"
2729 	    "Q%u"
2730 	    "qflags 0x%x"
2731 	    "aifs %u"
2732 	    "cwmin %u"
2733 	    "cwmax %u"
2734 	    "burstTime %u\n",
2735 	    __func__,
2736 	    txq->axq_qnum,
2737 	    qi.tqi_qflags,
2738 	    qi.tqi_aifs,
2739 	    qi.tqi_cwmin,
2740 	    qi.tqi_cwmax,
2741 	    qi.tqi_burstTime));
2742 
2743 	if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2744 		arn_problem("unable to update hardware queue "
2745 		    "parameters for %s traffic!\n",
2746 		    ieee80211_wme_acnames[ac]);
2747 		return (0);
2748 	} else {
2749 		/* push to H/W */
2750 		(void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2751 		return (1);
2752 	}
2753 
2754 #undef ATH_TXOP_TO_US
2755 #undef ATH_EXPONENT_TO_VALUE
2756 }
2757 
2758 /* Update WME parameters */
2759 static int
2760 arn_wme_update(ieee80211com_t *ic)
2761 {
2762 	struct arn_softc *sc = (struct arn_softc *)ic;
2763 
2764 	/* updateing */
2765 	return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2766 	    !arn_tx_queue_update(sc, WME_AC_BK) ||
2767 	    !arn_tx_queue_update(sc, WME_AC_VI) ||
2768 	    !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2769 }
2770 
2771 /*
2772  * Update tx/rx chainmask. For legacy association,
2773  * hard code chainmask to 1x1, for 11n association, use
2774  * the chainmask configuration.
2775  */
2776 void
2777 arn_update_chainmask(struct arn_softc *sc)
2778 {
2779 	boolean_t is_ht = B_FALSE;
2780 	sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2781 
2782 	is_ht = sc->sc_ht_conf.ht_supported;
2783 	if (is_ht) {
2784 		sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2785 		sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2786 	} else {
2787 		sc->sc_tx_chainmask = 1;
2788 		sc->sc_rx_chainmask = 1;
2789 	}
2790 
2791 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2792 	    "tx_chainmask = %d, rx_chainmask = %d\n",
2793 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2794 }
2795 
2796 static int
2797 arn_resume(dev_info_t *devinfo)
2798 {
2799 	struct arn_softc *sc;
2800 	int ret = DDI_SUCCESS;
2801 
2802 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2803 	if (sc == NULL) {
2804 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2805 		    "failed to get soft state\n"));
2806 		return (DDI_FAILURE);
2807 	}
2808 
2809 	ARN_LOCK(sc);
2810 	/*
2811 	 * Set up config space command register(s). Refuse
2812 	 * to resume on failure.
2813 	 */
2814 	if (arn_pci_setup(sc) != 0) {
2815 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2816 		    "ath_pci_setup() failed\n"));
2817 		ARN_UNLOCK(sc);
2818 		return (DDI_FAILURE);
2819 	}
2820 
2821 	if (!(sc->sc_flags & SC_OP_INVALID))
2822 		ret = arn_open(sc);
2823 	ARN_UNLOCK(sc);
2824 
2825 	return (ret);
2826 }
2827 
2828 static int
2829 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2830 {
2831 	struct arn_softc *sc;
2832 	int		instance;
2833 	int		status;
2834 	int32_t		err;
2835 	uint16_t	vendor_id;
2836 	uint16_t	device_id;
2837 	uint32_t	i;
2838 	uint32_t	val;
2839 	char		strbuf[32];
2840 	ieee80211com_t *ic;
2841 	struct ath_hal *ah;
2842 	wifi_data_t wd = { 0 };
2843 	mac_register_t *macp;
2844 
2845 	switch (cmd) {
2846 	case DDI_ATTACH:
2847 		break;
2848 	case DDI_RESUME:
2849 		return (arn_resume(devinfo));
2850 	default:
2851 		return (DDI_FAILURE);
2852 	}
2853 
2854 	instance = ddi_get_instance(devinfo);
2855 	if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2856 		ARN_DBG((ARN_DBG_ATTACH, "arn: "
2857 		    "%s: Unable to alloc softstate\n", __func__));
2858 		return (DDI_FAILURE);
2859 	}
2860 
2861 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2862 	ic = (ieee80211com_t *)sc;
2863 	sc->sc_dev = devinfo;
2864 
2865 	mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2866 	mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2867 	mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2868 	mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2869 	mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2870 #ifdef ARN_IBSS
2871 	mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2872 #endif
2873 
2874 	sc->sc_flags |= SC_OP_INVALID;
2875 
2876 	err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2877 	if (err != DDI_SUCCESS) {
2878 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2879 		    "pci_config_setup() failed"));
2880 		goto attach_fail0;
2881 	}
2882 
2883 	if (arn_pci_setup(sc) != 0)
2884 		goto attach_fail1;
2885 
2886 	/* Cache line size set up */
2887 	arn_pci_config_cachesize(sc);
2888 
2889 	vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2890 	device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2891 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2892 	    "device id 0x%x, cache size %d\n",
2893 	    vendor_id, device_id,
2894 	    pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2895 
2896 	pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2897 	val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2898 	if ((val & 0x0000ff00) != 0)
2899 		pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2900 
2901 	err = ddi_regs_map_setup(devinfo, 1,
2902 	    &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2903 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2904 	    "regs map1 = %x err=%d\n", sc->mem, err));
2905 	if (err != DDI_SUCCESS) {
2906 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2907 		    "ddi_regs_map_setup() failed"));
2908 		goto attach_fail1;
2909 	}
2910 
2911 	ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2912 	if (ah == NULL) {
2913 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2914 		    "unable to attach hw: H/W status %u\n",
2915 		    status));
2916 		goto attach_fail2;
2917 	}
2918 	sc->sc_ah = ah;
2919 
2920 	ath9k_hw_getmac(ah, ic->ic_macaddr);
2921 
2922 	/* Get the hardware key cache size. */
2923 	sc->sc_keymax = ah->ah_caps.keycache_size;
2924 	if (sc->sc_keymax > ATH_KEYMAX) {
2925 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2926 		    "Warning, using only %u entries in %u key cache\n",
2927 		    ATH_KEYMAX, sc->sc_keymax));
2928 		sc->sc_keymax = ATH_KEYMAX;
2929 	}
2930 
2931 	/*
2932 	 * Reset the key cache since some parts do not
2933 	 * reset the contents on initial power up.
2934 	 */
2935 	for (i = 0; i < sc->sc_keymax; i++)
2936 		(void) ath9k_hw_keyreset(ah, (uint16_t)i);
2937 	/*
2938 	 * Mark key cache slots associated with global keys
2939 	 * as in use.  If we knew TKIP was not to be used we
2940 	 * could leave the +32, +64, and +32+64 slots free.
2941 	 * XXX only for splitmic.
2942 	 */
2943 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2944 		set_bit(i, sc->sc_keymap);
2945 		set_bit(i + 32, sc->sc_keymap);
2946 		set_bit(i + 64, sc->sc_keymap);
2947 		set_bit(i + 32 + 64, sc->sc_keymap);
2948 	}
2949 
2950 	/* Collect the channel list using the default country code */
2951 	err = arn_setup_channels(sc);
2952 	if (err == EINVAL) {
2953 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2954 		    "ERR:arn_setup_channels\n"));
2955 		goto attach_fail3;
2956 	}
2957 
2958 	/* default to STA mode */
2959 	sc->sc_ah->ah_opmode = ATH9K_M_STA;
2960 
2961 	/* Setup rate tables */
2962 	arn_rate_attach(sc);
2963 	arn_setup_rates(sc, IEEE80211_MODE_11A);
2964 	arn_setup_rates(sc, IEEE80211_MODE_11B);
2965 	arn_setup_rates(sc, IEEE80211_MODE_11G);
2966 
2967 	/* Setup current mode here */
2968 	arn_setcurmode(sc, ATH9K_MODE_11G);
2969 
2970 	/* 802.11g features */
2971 	if (sc->sc_have11g)
2972 		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2973 		    IEEE80211_C_SHSLOT;		/* short slot time */
2974 
2975 	/* Temp workaround */
2976 	sc->sc_mrretry = 1;
2977 	sc->sc_config.ath_aggr_prot = 0;
2978 
2979 	/* Setup tx/rx descriptors */
2980 	err = arn_desc_alloc(devinfo, sc);
2981 	if (err != DDI_SUCCESS) {
2982 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2983 		    "failed to allocate descriptors: %d\n", err));
2984 		goto attach_fail3;
2985 	}
2986 
2987 	if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2988 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2989 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2990 		    "ERR:ddi_taskq_create\n"));
2991 		goto attach_fail4;
2992 	}
2993 
2994 	/*
2995 	 * Allocate hardware transmit queues: one queue for
2996 	 * beacon frames and one data queue for each QoS
2997 	 * priority.  Note that the hal handles reseting
2998 	 * these queues at the needed time.
2999 	 */
3000 #ifdef ARN_IBSS
3001 	sc->sc_beaconq = arn_beaconq_setup(ah);
3002 	if (sc->sc_beaconq == (-1)) {
3003 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3004 		    "unable to setup a beacon xmit queue\n"));
3005 		goto attach_fail4;
3006 	}
3007 #endif
3008 #ifdef ARN_HOSTAP
3009 	sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3010 	if (sc->sc_cabq == NULL) {
3011 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3012 		    "unable to setup CAB xmit queue\n"));
3013 		goto attach_fail4;
3014 	}
3015 
3016 	sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3017 	ath_cabq_update(sc);
3018 #endif
3019 
3020 	for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3021 		sc->sc_haltype2q[i] = -1;
3022 
3023 	/* Setup data queues */
3024 	/* NB: ensure BK queue is the lowest priority h/w queue */
3025 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3026 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3027 		    "unable to setup xmit queue for BK traffic\n"));
3028 		goto attach_fail4;
3029 	}
3030 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3031 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3032 		    "unable to setup xmit queue for BE traffic\n"));
3033 		goto attach_fail4;
3034 	}
3035 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3036 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3037 		    "unable to setup xmit queue for VI traffic\n"));
3038 		goto attach_fail4;
3039 	}
3040 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3041 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3042 		    "unable to setup xmit queue for VO traffic\n"));
3043 		goto attach_fail4;
3044 	}
3045 
3046 	/*
3047 	 * Initializes the noise floor to a reasonable default value.
3048 	 * Later on this will be updated during ANI processing.
3049 	 */
3050 
3051 	sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3052 
3053 
3054 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3055 	    ATH9K_CIPHER_TKIP, NULL)) {
3056 		/*
3057 		 * Whether we should enable h/w TKIP MIC.
3058 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3059 		 * report WMM capable, so it's always safe to turn on
3060 		 * TKIP MIC in this case.
3061 		 */
3062 		(void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3063 		    0, 1, NULL);
3064 	}
3065 
3066 	/* Get cipher releated capability information */
3067 	arn_get_hw_encap(sc);
3068 
3069 	/*
3070 	 * Check whether the separate key cache entries
3071 	 * are required to handle both tx+rx MIC keys.
3072 	 * With split mic keys the number of stations is limited
3073 	 * to 27 otherwise 59.
3074 	 */
3075 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3076 	    ATH9K_CIPHER_TKIP, NULL) &&
3077 	    ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3078 	    ATH9K_CIPHER_MIC, NULL) &&
3079 	    ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3080 	    0, NULL))
3081 		sc->sc_splitmic = 1;
3082 
3083 	/* turn on mcast key search if possible */
3084 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3085 		(void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3086 		    1, NULL);
3087 
3088 	sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3089 	sc->sc_config.txpowlimit_override = 0;
3090 
3091 	/* 11n Capabilities */
3092 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3093 		sc->sc_flags |= SC_OP_TXAGGR;
3094 		sc->sc_flags |= SC_OP_RXAGGR;
3095 		arn_setup_ht_cap(sc);
3096 		arn_overwrite_11n_rateset(sc);
3097 	}
3098 
3099 	sc->sc_tx_chainmask = 1;
3100 	sc->sc_rx_chainmask = 1;
3101 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3102 	    "tx_chainmask = %d, rx_chainmask = %d\n",
3103 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3104 
3105 	/* arn_update_chainmask(sc); */
3106 
3107 	(void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3108 	sc->sc_defant = ath9k_hw_getdefantenna(ah);
3109 
3110 	ath9k_hw_getmac(ah, sc->sc_myaddr);
3111 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3112 		ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3113 		ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3114 		(void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3115 	}
3116 
3117 	/* set default value to short slot time */
3118 	sc->sc_slottime = ATH9K_SLOT_TIME_9;
3119 	(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3120 
3121 	/* initialize beacon slots */
3122 	for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3123 		sc->sc_bslot[i] = ATH_IF_ID_ANY;
3124 
3125 	/* Save MISC configurations */
3126 	sc->sc_config.swBeaconProcess = 1;
3127 
3128 	/* Support QoS/WME */
3129 	ic->ic_caps |= IEEE80211_C_WME;
3130 	ic->ic_wme.wme_update = arn_wme_update;
3131 
3132 	/* Support 802.11n/HT */
3133 	if (sc->sc_ht_conf.ht_supported) {
3134 		ic->ic_htcaps =
3135 		    IEEE80211_HTCAP_CHWIDTH40 |
3136 		    IEEE80211_HTCAP_SHORTGI40 |
3137 		    IEEE80211_HTCAP_DSSSCCK40 |
3138 		    IEEE80211_HTCAP_MAXAMSDU_7935 |
3139 		    IEEE80211_HTC_HT |
3140 		    IEEE80211_HTC_AMSDU |
3141 		    IEEE80211_HTCAP_RXSTBC_2STREAM;
3142 
3143 #ifdef ARN_TX_AGGREGATION
3144 	ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3145 #endif
3146 	}
3147 
3148 	/* Header padding requested by driver */
3149 	ic->ic_flags |= IEEE80211_F_DATAPAD;
3150 	/* Support WPA/WPA2 */
3151 	ic->ic_caps |= IEEE80211_C_WPA;
3152 #if 0
3153 	ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3154 	ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3155 #endif
3156 	ic->ic_phytype = IEEE80211_T_HT;
3157 	ic->ic_opmode = IEEE80211_M_STA;
3158 	ic->ic_state = IEEE80211_S_INIT;
3159 	ic->ic_maxrssi = ARN_MAX_RSSI;
3160 	ic->ic_set_shortslot = arn_set_shortslot;
3161 	ic->ic_xmit = arn_tx;
3162 	ieee80211_attach(ic);
3163 
3164 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3165 	    "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3166 
3167 	/* different instance has different WPA door */
3168 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3169 	    ddi_driver_name(devinfo),
3170 	    ddi_get_instance(devinfo));
3171 
3172 	if (sc->sc_ht_conf.ht_supported) {
3173 		sc->sc_recv_action = ic->ic_recv_action;
3174 		ic->ic_recv_action = arn_ampdu_recv_action;
3175 		// sc->sc_send_action = ic->ic_send_action;
3176 		// ic->ic_send_action = arn_ampdu_send_action;
3177 
3178 		ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3179 		ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3180 		ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3181 	}
3182 
3183 	/* Override 80211 default routines */
3184 	sc->sc_newstate = ic->ic_newstate;
3185 	ic->ic_newstate = arn_newstate;
3186 #ifdef ARN_IBSS
3187 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3188 	ic->ic_recv_mgmt = arn_recv_mgmt;
3189 #endif
3190 	ic->ic_watchdog = arn_watchdog;
3191 	ic->ic_node_alloc = arn_node_alloc;
3192 	ic->ic_node_free = arn_node_free;
3193 	ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3194 	ic->ic_crypto.cs_key_delete = arn_key_delete;
3195 	ic->ic_crypto.cs_key_set = arn_key_set;
3196 
3197 	ieee80211_media_init(ic);
3198 
3199 	/*
3200 	 * initialize default tx key
3201 	 */
3202 	ic->ic_def_txkey = 0;
3203 
3204 	sc->sc_rx_pend = 0;
3205 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3206 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3207 	    &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3208 	if (err != DDI_SUCCESS) {
3209 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3210 		    "ddi_add_softintr() failed....\n"));
3211 		goto attach_fail5;
3212 	}
3213 
3214 	if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3215 	    != DDI_SUCCESS) {
3216 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3217 		    "Can not get iblock cookie for INT\n"));
3218 		goto attach_fail6;
3219 	}
3220 
3221 	if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3222 	    (caddr_t)sc) != DDI_SUCCESS) {
3223 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3224 		    "Can not set intr for ARN driver\n"));
3225 		goto attach_fail6;
3226 	}
3227 
3228 	/*
3229 	 * Provide initial settings for the WiFi plugin; whenever this
3230 	 * information changes, we need to call mac_plugindata_update()
3231 	 */
3232 	wd.wd_opmode = ic->ic_opmode;
3233 	wd.wd_secalloc = WIFI_SEC_NONE;
3234 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3235 
3236 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3237 	    "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3238 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3239 	    wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3240 	    wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3241 
3242 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3243 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3244 		    "MAC version mismatch\n"));
3245 		goto attach_fail7;
3246 	}
3247 
3248 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
3249 	macp->m_driver		= sc;
3250 	macp->m_dip		= devinfo;
3251 	macp->m_src_addr	= ic->ic_macaddr;
3252 	macp->m_callbacks	= &arn_m_callbacks;
3253 	macp->m_min_sdu		= 0;
3254 	macp->m_max_sdu		= IEEE80211_MTU;
3255 	macp->m_pdata		= &wd;
3256 	macp->m_pdata_size	= sizeof (wd);
3257 
3258 	err = mac_register(macp, &ic->ic_mach);
3259 	mac_free(macp);
3260 	if (err != 0) {
3261 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3262 		    "mac_register err %x\n", err));
3263 		goto attach_fail7;
3264 	}
3265 
3266 	/* Create minor node of type DDI_NT_NET_WIFI */
3267 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3268 	    ARN_NODENAME, instance);
3269 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3270 	    instance + 1, DDI_NT_NET_WIFI, 0);
3271 	if (err != DDI_SUCCESS)
3272 		ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3273 		    "Create minor node failed - %d\n", err));
3274 
3275 	/* Notify link is down now */
3276 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3277 
3278 	sc->sc_promisc = B_FALSE;
3279 	bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3280 	bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3281 
3282 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3283 	    "Atheros AR%s MAC/BB Rev:%x "
3284 	    "AR%s RF Rev:%x: mem=0x%lx\n",
3285 	    arn_mac_bb_name(ah->ah_macVersion),
3286 	    ah->ah_macRev,
3287 	    arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3288 	    ah->ah_phyRev,
3289 	    (unsigned long)sc->mem));
3290 
3291 	/* XXX: hardware will not be ready until arn_open() being called */
3292 	sc->sc_flags |= SC_OP_INVALID;
3293 	sc->sc_isrunning = 0;
3294 
3295 	return (DDI_SUCCESS);
3296 
3297 attach_fail7:
3298 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3299 attach_fail6:
3300 	ddi_remove_softintr(sc->sc_softint_id);
3301 attach_fail5:
3302 	(void) ieee80211_detach(ic);
3303 attach_fail4:
3304 	arn_desc_free(sc);
3305 	if (sc->sc_tq)
3306 		ddi_taskq_destroy(sc->sc_tq);
3307 attach_fail3:
3308 	ath9k_hw_detach(ah);
3309 attach_fail2:
3310 	ddi_regs_map_free(&sc->sc_io_handle);
3311 attach_fail1:
3312 	pci_config_teardown(&sc->sc_cfg_handle);
3313 attach_fail0:
3314 	sc->sc_flags |= SC_OP_INVALID;
3315 	/* cleanup tx queues */
3316 	mutex_destroy(&sc->sc_txbuflock);
3317 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3318 		if (ARN_TXQ_SETUP(sc, i)) {
3319 			/* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3320 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3321 		}
3322 	}
3323 	mutex_destroy(&sc->sc_rxbuflock);
3324 	mutex_destroy(&sc->sc_serial_rw);
3325 	mutex_destroy(&sc->sc_genlock);
3326 	mutex_destroy(&sc->sc_resched_lock);
3327 #ifdef ARN_IBSS
3328 	mutex_destroy(&sc->sc_bcbuflock);
3329 #endif
3330 
3331 	ddi_soft_state_free(arn_soft_state_p, instance);
3332 
3333 	return (DDI_FAILURE);
3334 
3335 }
3336 
3337 /*
3338  * Suspend transmit/receive for powerdown
3339  */
3340 static int
3341 arn_suspend(struct arn_softc *sc)
3342 {
3343 	ARN_LOCK(sc);
3344 	arn_close(sc);
3345 	ARN_UNLOCK(sc);
3346 
3347 	return (DDI_SUCCESS);
3348 }
3349 
3350 static int32_t
3351 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3352 {
3353 	struct arn_softc *sc;
3354 	int i;
3355 
3356 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3357 	ASSERT(sc != NULL);
3358 
3359 	switch (cmd) {
3360 	case DDI_DETACH:
3361 		break;
3362 
3363 	case DDI_SUSPEND:
3364 		return (arn_suspend(sc));
3365 
3366 	default:
3367 		return (DDI_FAILURE);
3368 	}
3369 
3370 	if (mac_disable(sc->sc_isc.ic_mach) != 0)
3371 		return (DDI_FAILURE);
3372 
3373 	arn_stop_scantimer(sc);
3374 	arn_stop_caltimer(sc);
3375 
3376 	/* disable interrupts */
3377 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3378 
3379 	/*
3380 	 * Unregister from the MAC layer subsystem
3381 	 */
3382 	(void) mac_unregister(sc->sc_isc.ic_mach);
3383 
3384 	/* free intterrupt resources */
3385 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3386 	ddi_remove_softintr(sc->sc_softint_id);
3387 
3388 	/*
3389 	 * NB: the order of these is important:
3390 	 * o call the 802.11 layer before detaching the hal to
3391 	 *   insure callbacks into the driver to delete global
3392 	 *   key cache entries can be handled
3393 	 * o reclaim the tx queue data structures after calling
3394 	 *   the 802.11 layer as we'll get called back to reclaim
3395 	 *   node state and potentially want to use them
3396 	 * o to cleanup the tx queues the hal is called, so detach
3397 	 *   it last
3398 	 */
3399 	ieee80211_detach(&sc->sc_isc);
3400 
3401 	arn_desc_free(sc);
3402 
3403 	ddi_taskq_destroy(sc->sc_tq);
3404 
3405 	if (!(sc->sc_flags & SC_OP_INVALID))
3406 		(void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3407 
3408 	/* cleanup tx queues */
3409 	mutex_destroy(&sc->sc_txbuflock);
3410 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3411 		if (ARN_TXQ_SETUP(sc, i)) {
3412 			arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3413 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3414 		}
3415 	}
3416 
3417 	ath9k_hw_detach(sc->sc_ah);
3418 
3419 	/* free io handle */
3420 	ddi_regs_map_free(&sc->sc_io_handle);
3421 	pci_config_teardown(&sc->sc_cfg_handle);
3422 
3423 	/* destroy locks */
3424 	mutex_destroy(&sc->sc_genlock);
3425 	mutex_destroy(&sc->sc_serial_rw);
3426 	mutex_destroy(&sc->sc_rxbuflock);
3427 	mutex_destroy(&sc->sc_resched_lock);
3428 #ifdef ARN_IBSS
3429 	mutex_destroy(&sc->sc_bcbuflock);
3430 #endif
3431 
3432 	ddi_remove_minor_node(devinfo, NULL);
3433 	ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3434 
3435 	return (DDI_SUCCESS);
3436 }
3437 
3438 /*
3439  * quiesce(9E) entry point.
3440  *
3441  * This function is called when the system is single-threaded at high
3442  * PIL with preemption disabled. Therefore, this function must not be
3443  * blocked.
3444  *
3445  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3446  * DDI_FAILURE indicates an error condition and should almost never happen.
3447  */
3448 static int32_t
3449 arn_quiesce(dev_info_t *devinfo)
3450 {
3451 	struct arn_softc *sc;
3452 	int i;
3453 	struct ath_hal *ah;
3454 
3455 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3456 
3457 	if (sc == NULL || (ah = sc->sc_ah) == NULL)
3458 		return (DDI_FAILURE);
3459 
3460 	/*
3461 	 * Disable interrupts
3462 	 */
3463 	(void) ath9k_hw_set_interrupts(ah, 0);
3464 
3465 	/*
3466 	 * Disable TX HW
3467 	 */
3468 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3469 		if (ARN_TXQ_SETUP(sc, i))
3470 			(void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3471 	}
3472 
3473 	/*
3474 	 * Disable RX HW
3475 	 */
3476 	ath9k_hw_stoppcurecv(ah);
3477 	ath9k_hw_setrxfilter(ah, 0);
3478 	(void) ath9k_hw_stopdmarecv(ah);
3479 	drv_usecwait(3000);
3480 
3481 	/*
3482 	 * Power down HW
3483 	 */
3484 	(void) ath9k_hw_phy_disable(ah);
3485 
3486 	return (DDI_SUCCESS);
3487 }
3488 
3489 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3490     nodev, NULL, D_MP, NULL, arn_quiesce);
3491 
3492 static struct modldrv arn_modldrv = {
3493 	&mod_driverops, /* Type of module.  This one is a driver */
3494 	"arn-Atheros 9000 series driver:2.0", /* short description */
3495 	&arn_dev_ops /* driver specific ops */
3496 };
3497 
3498 static struct modlinkage modlinkage = {
3499 	MODREV_1, (void *)&arn_modldrv, NULL
3500 };
3501 
3502 int
3503 _info(struct modinfo *modinfop)
3504 {
3505 	return (mod_info(&modlinkage, modinfop));
3506 }
3507 
3508 int
3509 _init(void)
3510 {
3511 	int status;
3512 
3513 	status = ddi_soft_state_init
3514 	    (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3515 	if (status != 0)
3516 		return (status);
3517 
3518 	mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3519 	mac_init_ops(&arn_dev_ops, "arn");
3520 	status = mod_install(&modlinkage);
3521 	if (status != 0) {
3522 		mac_fini_ops(&arn_dev_ops);
3523 		mutex_destroy(&arn_loglock);
3524 		ddi_soft_state_fini(&arn_soft_state_p);
3525 	}
3526 
3527 	return (status);
3528 }
3529 
3530 int
3531 _fini(void)
3532 {
3533 	int status;
3534 
3535 	status = mod_remove(&modlinkage);
3536 	if (status == 0) {
3537 		mac_fini_ops(&arn_dev_ops);
3538 		mutex_destroy(&arn_loglock);
3539 		ddi_soft_state_fini(&arn_soft_state_p);
3540 	}
3541 	return (status);
3542 }
3543