xref: /titanic_41/usr/src/uts/common/io/arn/arn_main.c (revision bbb1277b6ec1b0daad4e3ed1a2b891d3e2ece2eb)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/param.h>
23 #include <sys/types.h>
24 #include <sys/signal.h>
25 #include <sys/stream.h>
26 #include <sys/termio.h>
27 #include <sys/errno.h>
28 #include <sys/file.h>
29 #include <sys/cmn_err.h>
30 #include <sys/stropts.h>
31 #include <sys/strsubr.h>
32 #include <sys/strtty.h>
33 #include <sys/kbio.h>
34 #include <sys/cred.h>
35 #include <sys/stat.h>
36 #include <sys/consdev.h>
37 #include <sys/kmem.h>
38 #include <sys/modctl.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/pci.h>
42 #include <sys/errno.h>
43 #include <sys/mac_provider.h>
44 #include <sys/dlpi.h>
45 #include <sys/ethernet.h>
46 #include <sys/list.h>
47 #include <sys/byteorder.h>
48 #include <sys/strsun.h>
49 #include <sys/policy.h>
50 #include <inet/common.h>
51 #include <inet/nd.h>
52 #include <inet/mi.h>
53 #include <inet/wifi_ioctl.h>
54 #include <sys/mac_wifi.h>
55 
56 #include "arn_ath9k.h"
57 #include "arn_core.h"
58 #include "arn_reg.h"
59 #include "arn_hw.h"
60 
61 #define	ARN_MAX_RSSI	45	/* max rssi */
62 
63 /*
64  * PIO access attributes for registers
65  */
66 static ddi_device_acc_attr_t arn_reg_accattr = {
67 	DDI_DEVICE_ATTR_V0,
68 	DDI_STRUCTURE_LE_ACC,
69 	DDI_STRICTORDER_ACC
70 };
71 
72 /*
73  * DMA access attributes for descriptors: NOT to be byte swapped.
74  */
75 static ddi_device_acc_attr_t arn_desc_accattr = {
76 	DDI_DEVICE_ATTR_V0,
77 	DDI_STRUCTURE_LE_ACC,
78 	DDI_STRICTORDER_ACC
79 };
80 
81 /*
82  * Describes the chip's DMA engine
83  */
84 static ddi_dma_attr_t arn_dma_attr = {
85 	DMA_ATTR_V0,	/* version number */
86 	0,				/* low address */
87 	0xffffffffU,	/* high address */
88 	0x3ffffU,		/* counter register max */
89 	1,				/* alignment */
90 	0xFFF,			/* burst sizes */
91 	1,				/* minimum transfer size */
92 	0x3ffffU,		/* max transfer size */
93 	0xffffffffU,	/* address register max */
94 	1,				/* no scatter-gather */
95 	1,				/* granularity of device */
96 	0,				/* DMA flags */
97 };
98 
99 static ddi_dma_attr_t arn_desc_dma_attr = {
100 	DMA_ATTR_V0,	/* version number */
101 	0,				/* low address */
102 	0xffffffffU,	/* high address */
103 	0xffffffffU,	/* counter register max */
104 	0x1000,			/* alignment */
105 	0xFFF,			/* burst sizes */
106 	1,				/* minimum transfer size */
107 	0xffffffffU,	/* max transfer size */
108 	0xffffffffU,	/* address register max */
109 	1,				/* no scatter-gather */
110 	1,				/* granularity of device */
111 	0,				/* DMA flags */
112 };
113 
114 #define	ATH_DEF_CACHE_BYTES	32 /* default cache line size */
115 
116 static kmutex_t arn_loglock;
117 static void *arn_soft_state_p = NULL;
118 /* scan interval, ms? */
119 static int arn_dwelltime = 200; /* 150 */
120 
121 static int	arn_m_stat(void *,  uint_t, uint64_t *);
122 static int	arn_m_start(void *);
123 static void	arn_m_stop(void *);
124 static int	arn_m_promisc(void *, boolean_t);
125 static int	arn_m_multicst(void *, boolean_t, const uint8_t *);
126 static int	arn_m_unicst(void *, const uint8_t *);
127 static mblk_t	*arn_m_tx(void *, mblk_t *);
128 static void	arn_m_ioctl(void *, queue_t *, mblk_t *);
129 static int	arn_m_setprop(void *, const char *, mac_prop_id_t,
130     uint_t, const void *);
131 static int	arn_m_getprop(void *, const char *, mac_prop_id_t,
132     uint_t, uint_t, void *, uint_t *);
133 
134 /* MAC Callcack Functions */
135 static mac_callbacks_t arn_m_callbacks = {
136 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
137 	arn_m_stat,
138 	arn_m_start,
139 	arn_m_stop,
140 	arn_m_promisc,
141 	arn_m_multicst,
142 	arn_m_unicst,
143 	arn_m_tx,
144 	arn_m_ioctl,
145 	NULL,
146 	NULL,
147 	NULL,
148 	arn_m_setprop,
149 	arn_m_getprop
150 };
151 
152 /*
153  * ARN_DBG_HW
154  * ARN_DBG_REG_IO
155  * ARN_DBG_QUEUE
156  * ARN_DBG_EEPROM
157  * ARN_DBG_XMIT
158  * ARN_DBG_RECV
159  * ARN_DBG_CALIBRATE
160  * ARN_DBG_CHANNEL
161  * ARN_DBG_INTERRUPT
162  * ARN_DBG_REGULATORY
163  * ARN_DBG_ANI
164  * ARN_DBG_POWER_MGMT
165  * ARN_DBG_KEYCACHE
166  * ARN_DBG_BEACON
167  * ARN_DBG_RATE
168  * ARN_DBG_INIT
169  * ARN_DBG_ATTACH
170  * ARN_DBG_DEATCH
171  * ARN_DBG_AGGR
172  * ARN_DBG_RESET
173  * ARN_DBG_FATAL
174  * ARN_DBG_ANY
175  * ARN_DBG_ALL
176  */
177 uint32_t arn_dbg_mask = 0;
178 
179 /*
180  * Exception/warning cases not leading to panic.
181  */
182 void
183 arn_problem(const int8_t *fmt, ...)
184 {
185 	va_list args;
186 
187 	mutex_enter(&arn_loglock);
188 
189 	va_start(args, fmt);
190 	vcmn_err(CE_WARN, fmt, args);
191 	va_end(args);
192 
193 	mutex_exit(&arn_loglock);
194 }
195 
196 /*
197  * Normal log information independent of debug.
198  */
199 void
200 arn_log(const int8_t *fmt, ...)
201 {
202 	va_list args;
203 
204 	mutex_enter(&arn_loglock);
205 
206 	va_start(args, fmt);
207 	vcmn_err(CE_CONT, fmt, args);
208 	va_end(args);
209 
210 	mutex_exit(&arn_loglock);
211 }
212 
213 void
214 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
215 {
216 	va_list args;
217 
218 	if (dbg_flags & arn_dbg_mask) {
219 		mutex_enter(&arn_loglock);
220 		va_start(args, fmt);
221 		vcmn_err(CE_CONT, fmt, args);
222 		va_end(args);
223 		mutex_exit(&arn_loglock);
224 	}
225 }
226 
227 /*
228  * Read and write, they both share the same lock. We do this to serialize
229  * reads and writes on Atheros 802.11n PCI devices only. This is required
230  * as the FIFO on these devices can only accept sanely 2 requests. After
231  * that the device goes bananas. Serializing the reads/writes prevents this
232  * from happening.
233  */
234 void
235 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
236 {
237 	struct arn_softc *sc = ah->ah_sc;
238 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
239 		mutex_enter(&sc->sc_serial_rw);
240 		ddi_put32(sc->sc_io_handle,
241 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
242 		mutex_exit(&sc->sc_serial_rw);
243 	} else {
244 		ddi_put32(sc->sc_io_handle,
245 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
246 	}
247 }
248 
249 unsigned int
250 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
251 {
252 	uint32_t val;
253 	struct arn_softc *sc = ah->ah_sc;
254 	if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
255 		mutex_enter(&sc->sc_serial_rw);
256 		val = ddi_get32(sc->sc_io_handle,
257 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
258 		mutex_exit(&sc->sc_serial_rw);
259 	} else {
260 		val = ddi_get32(sc->sc_io_handle,
261 		    (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
262 	}
263 
264 	return (val);
265 }
266 
267 void
268 arn_rx_buf_link(struct arn_softc *sc, struct ath_buf *bf)
269 {
270 	struct ath_desc *ds;
271 
272 	ds = bf->bf_desc;
273 	ds->ds_link = bf->bf_daddr;
274 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
275 	/* virtual addr of the beginning of the buffer. */
276 	ds->ds_vdata = bf->bf_dma.mem_va;
277 
278 	/*
279 	 * setup rx descriptors. The bf_dma.alength here tells the H/W
280 	 * how much data it can DMA to us and that we are prepared
281 	 * to process
282 	 */
283 	(void) ath9k_hw_setuprxdesc(sc->sc_ah, ds,
284 	    bf->bf_dma.alength, /* buffer size */
285 	    0);
286 
287 	if (sc->sc_rxlink != NULL)
288 		*sc->sc_rxlink = bf->bf_daddr;
289 	sc->sc_rxlink = &ds->ds_link;
290 }
291 
292 /*
293  * Allocate an area of memory and a DMA handle for accessing it
294  */
295 static int
296 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
297     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
298     uint_t bind_flags, dma_area_t *dma_p)
299 {
300 	int err;
301 
302 	/*
303 	 * Allocate handle
304 	 */
305 	err = ddi_dma_alloc_handle(devinfo, dma_attr,
306 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
307 	if (err != DDI_SUCCESS)
308 		return (DDI_FAILURE);
309 
310 	/*
311 	 * Allocate memory
312 	 */
313 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
314 	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
315 	    &dma_p->alength, &dma_p->acc_hdl);
316 	if (err != DDI_SUCCESS)
317 		return (DDI_FAILURE);
318 
319 	/*
320 	 * Bind the two together
321 	 */
322 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
323 	    dma_p->mem_va, dma_p->alength, bind_flags,
324 	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
325 	if (err != DDI_DMA_MAPPED)
326 		return (DDI_FAILURE);
327 
328 	dma_p->nslots = ~0U;
329 	dma_p->size = ~0U;
330 	dma_p->token = ~0U;
331 	dma_p->offset = 0;
332 	return (DDI_SUCCESS);
333 }
334 
335 /*
336  * Free one allocated area of DMAable memory
337  */
338 static void
339 arn_free_dma_mem(dma_area_t *dma_p)
340 {
341 	if (dma_p->dma_hdl != NULL) {
342 		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
343 		if (dma_p->acc_hdl != NULL) {
344 			ddi_dma_mem_free(&dma_p->acc_hdl);
345 			dma_p->acc_hdl = NULL;
346 		}
347 		ddi_dma_free_handle(&dma_p->dma_hdl);
348 		dma_p->ncookies = 0;
349 		dma_p->dma_hdl = NULL;
350 	}
351 }
352 
353 /*
354  * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
355  * each buffer.
356  */
357 static int
358 arn_buflist_setup(dev_info_t *devinfo, struct arn_softc *sc, list_t *bflist,
359     struct ath_buf **pbf, struct ath_desc **pds, int nbuf, uint_t dmabflags)
360 {
361 	int i, err;
362 	struct ath_buf *bf = *pbf;
363 	struct ath_desc *ds = *pds;
364 
365 	list_create(bflist, sizeof (struct ath_buf),
366 	    offsetof(struct ath_buf, bf_node));
367 	for (i = 0; i < nbuf; i++, bf++, ds++) {
368 		bf->bf_desc = ds;
369 		bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
370 		    ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
371 		list_insert_tail(bflist, bf);
372 
373 		/* alloc DMA memory */
374 		err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
375 		    sc->sc_dmabuf_size, &arn_desc_accattr, DDI_DMA_STREAMING,
376 		    dmabflags, &bf->bf_dma);
377 		if (err != DDI_SUCCESS)
378 			return (err);
379 	}
380 	*pbf = bf;
381 	*pds = ds;
382 
383 	return (DDI_SUCCESS);
384 }
385 
386 /*
387  * Destroy tx, rx or beacon buffer list. Free DMA memory.
388  */
389 static void
390 arn_buflist_cleanup(list_t *buflist)
391 {
392 	struct ath_buf *bf;
393 
394 	if (!buflist)
395 		return;
396 
397 	bf = list_head(buflist);
398 	while (bf != NULL) {
399 		if (bf->bf_m != NULL) {
400 			freemsg(bf->bf_m);
401 			bf->bf_m = NULL;
402 		}
403 		/* Free DMA buffer */
404 		arn_free_dma_mem(&bf->bf_dma);
405 		if (bf->bf_in != NULL) {
406 			ieee80211_free_node(bf->bf_in);
407 			bf->bf_in = NULL;
408 		}
409 		list_remove(buflist, bf);
410 		bf = list_head(buflist);
411 	}
412 	list_destroy(buflist);
413 }
414 
415 static void
416 arn_desc_free(struct arn_softc *sc)
417 {
418 	arn_buflist_cleanup(&sc->sc_txbuf_list);
419 	arn_buflist_cleanup(&sc->sc_rxbuf_list);
420 #ifdef ARN_IBSS
421 	arn_buflist_cleanup(&sc->sc_bcbuf_list);
422 #endif
423 
424 	/* Free descriptor DMA buffer */
425 	arn_free_dma_mem(&sc->sc_desc_dma);
426 
427 	kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
428 	sc->sc_vbufptr = NULL;
429 }
430 
431 static int
432 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
433 {
434 	int err;
435 	size_t size;
436 	struct ath_desc *ds;
437 	struct ath_buf *bf;
438 
439 #ifdef ARN_IBSS
440 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
441 #else
442 	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
443 #endif
444 
445 	err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
446 	    &arn_desc_accattr, DDI_DMA_CONSISTENT,
447 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
448 
449 	/* virtual address of the first descriptor */
450 	sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
451 
452 	ds = sc->sc_desc;
453 	ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
454 	    "%p (%d) -> %p\n",
455 	    sc->sc_desc, sc->sc_desc_dma.alength,
456 	    sc->sc_desc_dma.cookie.dmac_address));
457 
458 	/* allocate data structures to describe TX/RX DMA buffers */
459 #ifdef ARN_IBSS
460 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
461 	    ATH_BCBUF);
462 #else
463 	sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
464 #endif
465 	bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
466 	sc->sc_vbufptr = bf;
467 
468 	/* DMA buffer size for each TX/RX packet */
469 	sc->sc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
470 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
471 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
472 	    IEEE80211_WEP_CRCLEN), sc->sc_cachelsz);
473 
474 	/* create RX buffer list */
475 	err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
476 	    ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING);
477 	if (err != DDI_SUCCESS) {
478 		arn_desc_free(sc);
479 		return (err);
480 	}
481 
482 	/* create TX buffer list */
483 	err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
484 	    ATH_TXBUF, DDI_DMA_STREAMING);
485 	if (err != DDI_SUCCESS) {
486 		arn_desc_free(sc);
487 		return (err);
488 	}
489 
490 	/* create beacon buffer list */
491 #ifdef ARN_IBSS
492 	err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
493 	    ATH_BCBUF, DDI_DMA_STREAMING);
494 	if (err != DDI_SUCCESS) {
495 		arn_desc_free(sc);
496 		return (err);
497 	}
498 #endif
499 
500 	return (DDI_SUCCESS);
501 }
502 
503 static struct ath_rate_table *
504 /* LINTED E_STATIC_UNUSED */
505 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
506 {
507 	struct ath_rate_table *rate_table = NULL;
508 
509 	switch (mode) {
510 	case IEEE80211_MODE_11A:
511 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
512 		break;
513 	case IEEE80211_MODE_11B:
514 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
515 		break;
516 	case IEEE80211_MODE_11G:
517 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
518 		break;
519 #ifdef ARB_11N
520 	case IEEE80211_MODE_11NA_HT20:
521 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
522 		break;
523 	case IEEE80211_MODE_11NG_HT20:
524 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
525 		break;
526 	case IEEE80211_MODE_11NA_HT40PLUS:
527 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
528 		break;
529 	case IEEE80211_MODE_11NA_HT40MINUS:
530 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
531 		break;
532 	case IEEE80211_MODE_11NG_HT40PLUS:
533 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
534 		break;
535 	case IEEE80211_MODE_11NG_HT40MINUS:
536 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
537 		break;
538 #endif
539 	default:
540 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
541 		    "invalid mode %u\n", mode));
542 		return (NULL);
543 	}
544 
545 	return (rate_table);
546 
547 }
548 
549 static void
550 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
551 {
552 	struct ath_rate_table *rt;
553 	int i;
554 
555 	for (i = 0; i < sizeof (sc->asc_rixmap); i++)
556 		sc->asc_rixmap[i] = 0xff;
557 
558 	rt = sc->hw_rate_table[mode];
559 	ASSERT(rt != NULL);
560 
561 	for (i = 0; i < rt->rate_cnt; i++)
562 		sc->asc_rixmap[rt->info[i].dot11rate &
563 		    IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
564 
565 	sc->sc_currates = rt;
566 	sc->sc_curmode = mode;
567 
568 	/*
569 	 * All protection frames are transmited at 2Mb/s for
570 	 * 11g, otherwise at 1Mb/s.
571 	 * XXX select protection rate index from rate table.
572 	 */
573 	sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
574 }
575 
576 static enum wireless_mode
577 arn_chan2mode(struct ath9k_channel *chan)
578 {
579 	if (chan->chanmode == CHANNEL_A)
580 		return (ATH9K_MODE_11A);
581 	else if (chan->chanmode == CHANNEL_G)
582 		return (ATH9K_MODE_11G);
583 	else if (chan->chanmode == CHANNEL_B)
584 		return (ATH9K_MODE_11B);
585 	else if (chan->chanmode == CHANNEL_A_HT20)
586 		return (ATH9K_MODE_11NA_HT20);
587 	else if (chan->chanmode == CHANNEL_G_HT20)
588 		return (ATH9K_MODE_11NG_HT20);
589 	else if (chan->chanmode == CHANNEL_A_HT40PLUS)
590 		return (ATH9K_MODE_11NA_HT40PLUS);
591 	else if (chan->chanmode == CHANNEL_A_HT40MINUS)
592 		return (ATH9K_MODE_11NA_HT40MINUS);
593 	else if (chan->chanmode == CHANNEL_G_HT40PLUS)
594 		return (ATH9K_MODE_11NG_HT40PLUS);
595 	else if (chan->chanmode == CHANNEL_G_HT40MINUS)
596 		return (ATH9K_MODE_11NG_HT40MINUS);
597 
598 	return (ATH9K_MODE_11B);
599 }
600 
601 static void
602 arn_update_txpow(struct arn_softc *sc)
603 {
604 	struct ath_hal 	*ah = sc->sc_ah;
605 	uint32_t txpow;
606 
607 	if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
608 		(void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
609 		/* read back in case value is clamped */
610 		(void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
611 		sc->sc_curtxpow = (uint32_t)txpow;
612 	}
613 }
614 
615 static void
616 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
617 {
618 	int i, maxrates;
619 	struct ath_rate_table *rate_table = NULL;
620 	struct ieee80211_rateset *rateset;
621 	ieee80211com_t *ic = (ieee80211com_t *)sc;
622 
623 	/* rate_table = arn_get_ratetable(sc, mode); */
624 	switch (mode) {
625 	case IEEE80211_MODE_11A:
626 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
627 		break;
628 	case IEEE80211_MODE_11B:
629 		rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
630 		break;
631 	case IEEE80211_MODE_11G:
632 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
633 		break;
634 #ifdef ARN_11N
635 	case IEEE80211_MODE_11NA_HT20:
636 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
637 		break;
638 	case IEEE80211_MODE_11NG_HT20:
639 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
640 		break;
641 	case IEEE80211_MODE_11NA_HT40PLUS:
642 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
643 		break;
644 	case IEEE80211_MODE_11NA_HT40MINUS:
645 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
646 		break;
647 	case IEEE80211_MODE_11NG_HT40PLUS:
648 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
649 		break;
650 	case IEEE80211_MODE_11NG_HT40MINUS:
651 		rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
652 		break;
653 #endif
654 	default:
655 		ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
656 		    "invalid mode %u\n", mode));
657 		break;
658 	}
659 	if (rate_table == NULL)
660 		return;
661 	if (rate_table->rate_cnt > ATH_RATE_MAX) {
662 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
663 		    "rate table too small (%u > %u)\n",
664 		    rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
665 		maxrates = ATH_RATE_MAX;
666 	} else
667 		maxrates = rate_table->rate_cnt;
668 
669 	ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
670 	    "maxrates is %d\n", maxrates));
671 
672 	rateset = &ic->ic_sup_rates[mode];
673 	for (i = 0; i < maxrates; i++) {
674 		rateset->ir_rates[i] = rate_table->info[i].dot11rate;
675 		ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
676 		    "%d\n", rate_table->info[i].dot11rate));
677 	}
678 	rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
679 }
680 
681 static int
682 arn_setup_channels(struct arn_softc *sc)
683 {
684 	struct ath_hal *ah = sc->sc_ah;
685 	ieee80211com_t *ic = (ieee80211com_t *)sc;
686 	int nchan, i, index;
687 	uint8_t regclassids[ATH_REGCLASSIDS_MAX];
688 	uint32_t nregclass = 0;
689 	struct ath9k_channel *c;
690 
691 	/* Fill in ah->ah_channels */
692 	if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
693 	    regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
694 	    B_FALSE, 1)) {
695 		uint32_t rd = ah->ah_currentRD;
696 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
697 		    "unable to collect channel list; "
698 		    "regdomain likely %u country code %u\n",
699 		    rd, CTRY_DEFAULT));
700 		return (EINVAL);
701 	}
702 
703 	ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
704 	    "number of channel is %d\n", nchan));
705 
706 	for (i = 0; i < nchan; i++) {
707 		c = &ah->ah_channels[i];
708 		uint16_t flags;
709 		index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
710 
711 		if (index > IEEE80211_CHAN_MAX) {
712 			ARN_DBG((ARN_DBG_CHANNEL,
713 			    "arn: arn_setup_channels(): "
714 			    "bad hal channel %d (%u/%x) ignored\n",
715 			    index, c->channel, c->channelFlags));
716 			continue;
717 		}
718 		/* NB: flags are known to be compatible */
719 		if (index < 0) {
720 			/*
721 			 * can't handle frequency <2400MHz (negative
722 			 * channels) right now
723 			 */
724 			ARN_DBG((ARN_DBG_CHANNEL,
725 			    "arn: arn_setup_channels(): "
726 			    "hal channel %d (%u/%x) "
727 			    "cannot be handled, ignored\n",
728 			    index, c->channel, c->channelFlags));
729 			continue;
730 		}
731 
732 		/*
733 		 * Calculate net80211 flags; most are compatible
734 		 * but some need massaging.  Note the static turbo
735 		 * conversion can be removed once net80211 is updated
736 		 * to understand static vs. dynamic turbo.
737 		 */
738 
739 		flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
740 
741 		if (ic->ic_sup_channels[index].ich_freq == 0) {
742 			ic->ic_sup_channels[index].ich_freq = c->channel;
743 			ic->ic_sup_channels[index].ich_flags = flags;
744 		} else {
745 			/* channels overlap; e.g. 11g and 11b */
746 			ic->ic_sup_channels[index].ich_flags |= flags;
747 		}
748 		if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
749 			sc->sc_have11g = 1;
750 			ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
751 			    IEEE80211_C_SHSLOT;	/* short slot time */
752 		}
753 	}
754 
755 	return (0);
756 }
757 
758 uint32_t
759 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
760 {
761 	static const uint32_t modeflags[] = {
762 	    0,				/* IEEE80211_MODE_AUTO */
763 	    CHANNEL_A,			/* IEEE80211_MODE_11A */
764 	    CHANNEL_B,			/* IEEE80211_MODE_11B */
765 	    CHANNEL_G,		/* IEEE80211_MODE_11G */
766 	    0,				/*  */
767 	    0,		/*  */
768 	    0		/*  */
769 	};
770 	return (modeflags[ieee80211_chan2mode(isc, chan)]);
771 }
772 
773 /*
774  * Update internal state after a channel change.
775  */
776 void
777 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
778 {
779 	struct ieee80211com *ic = &sc->sc_isc;
780 	enum ieee80211_phymode mode;
781 	enum wireless_mode wlmode;
782 
783 	/*
784 	 * Change channels and update the h/w rate map
785 	 * if we're switching; e.g. 11a to 11b/g.
786 	 */
787 	mode = ieee80211_chan2mode(ic, chan);
788 	switch (mode) {
789 	case IEEE80211_MODE_11A:
790 		wlmode = ATH9K_MODE_11A;
791 		break;
792 	case IEEE80211_MODE_11B:
793 		wlmode = ATH9K_MODE_11B;
794 		break;
795 	case IEEE80211_MODE_11G:
796 		wlmode = ATH9K_MODE_11B;
797 		break;
798 	default:
799 		break;
800 	}
801 	if (wlmode != sc->sc_curmode)
802 		arn_setcurmode(sc, wlmode);
803 
804 }
805 
806 /*
807  * Set/change channels.  If the channel is really being changed, it's done
808  * by reseting the chip.  To accomplish this we must first cleanup any pending
809  * DMA, then restart stuff.
810  */
811 static int
812 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
813 {
814 	struct ath_hal *ah = sc->sc_ah;
815 	ieee80211com_t *ic = &sc->sc_isc;
816 	boolean_t fastcc = B_TRUE;
817 	boolean_t  stopped;
818 	struct ieee80211_channel chan;
819 	enum wireless_mode curmode;
820 
821 	if (sc->sc_flags & SC_OP_INVALID)
822 		return (EIO);
823 
824 	if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
825 	    hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
826 	    (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
827 	    (sc->sc_flags & SC_OP_FULL_RESET)) {
828 		int status;
829 
830 		/*
831 		 * This is only performed if the channel settings have
832 		 * actually changed.
833 		 *
834 		 * To switch channels clear any pending DMA operations;
835 		 * wait long enough for the RX fifo to drain, reset the
836 		 * hardware at the new frequency, and then re-enable
837 		 * the relevant bits of the h/w.
838 		 */
839 		(void) ath9k_hw_set_interrupts(ah, 0);	/* disable interrupts */
840 		arn_draintxq(sc, B_FALSE);	/* clear pending tx frames */
841 		stopped = arn_stoprecv(sc);	/* turn off frame recv */
842 
843 		/*
844 		 * XXX: do not flush receive queue here. We don't want
845 		 * to flush data frames already in queue because of
846 		 * changing channel.
847 		 */
848 
849 		if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
850 			fastcc = B_FALSE;
851 
852 		ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
853 		    "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
854 		    sc->sc_ah->ah_curchan->channel,
855 		    hchan->channel, hchan->channelFlags, sc->tx_chan_width));
856 
857 		if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
858 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
859 		    sc->sc_ht_extprotspacing, fastcc, &status)) {
860 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
861 			    "unable to reset channel %u (%uMhz) "
862 			    "flags 0x%x hal status %u\n",
863 			    ath9k_hw_mhz2ieee(ah, hchan->channel,
864 			    hchan->channelFlags),
865 			    hchan->channel, hchan->channelFlags, status));
866 			return (EIO);
867 		}
868 
869 		sc->sc_curchan = *hchan;
870 
871 		sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
872 		sc->sc_flags &= ~SC_OP_FULL_RESET;
873 
874 		if (arn_startrecv(sc) != 0) {
875 			arn_problem("arn: arn_set_channel(): "
876 			    "unable to restart recv logic\n");
877 			return (EIO);
878 		}
879 
880 		chan.ich_freq = hchan->channel;
881 		chan.ich_flags = hchan->channelFlags;
882 		ic->ic_ibss_chan = &chan;
883 
884 		/*
885 		 * Change channels and update the h/w rate map
886 		 * if we're switching; e.g. 11a to 11b/g.
887 		 */
888 		curmode = arn_chan2mode(hchan);
889 		if (curmode != sc->sc_curmode)
890 			arn_setcurmode(sc, arn_chan2mode(hchan));
891 
892 		arn_update_txpow(sc);
893 
894 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
895 	}
896 
897 	return (0);
898 }
899 
900 /*
901  *  This routine performs the periodic noise floor calibration function
902  *  that is used to adjust and optimize the chip performance.  This
903  *  takes environmental changes (location, temperature) into account.
904  *  When the task is complete, it reschedules itself depending on the
905  *  appropriate interval that was calculated.
906  */
907 static void
908 arn_ani_calibrate(void *arg)
909 
910 {
911 	ieee80211com_t *ic = (ieee80211com_t *)arg;
912 	struct arn_softc *sc = (struct arn_softc *)ic;
913 	struct ath_hal *ah = sc->sc_ah;
914 	boolean_t longcal = B_FALSE;
915 	boolean_t shortcal = B_FALSE;
916 	boolean_t aniflag = B_FALSE;
917 	unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
918 	uint32_t cal_interval;
919 
920 	/*
921 	 * don't calibrate when we're scanning.
922 	 * we are most likely not on our home channel.
923 	 */
924 	if (ic->ic_state != IEEE80211_S_RUN)
925 		goto settimer;
926 
927 	/* Long calibration runs independently of short calibration. */
928 	if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
929 		longcal = B_TRUE;
930 		ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
931 		    "%s: longcal @%lu\n", __func__, drv_hztousec));
932 		sc->sc_ani.sc_longcal_timer = timestamp;
933 	}
934 
935 	/* Short calibration applies only while sc_caldone is FALSE */
936 	if (!sc->sc_ani.sc_caldone) {
937 		if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
938 		    ATH_SHORT_CALINTERVAL) {
939 			shortcal = B_TRUE;
940 			ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
941 			    "%s: shortcal @%lu\n",
942 			    __func__, drv_hztousec));
943 			sc->sc_ani.sc_shortcal_timer = timestamp;
944 			sc->sc_ani.sc_resetcal_timer = timestamp;
945 		}
946 	} else {
947 		if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
948 		    ATH_RESTART_CALINTERVAL) {
949 			ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
950 						&sc->sc_ani.sc_caldone);
951 			if (sc->sc_ani.sc_caldone)
952 				sc->sc_ani.sc_resetcal_timer = timestamp;
953 		}
954 	}
955 
956 	/* Verify whether we must check ANI */
957 	if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
958 	    ATH_ANI_POLLINTERVAL) {
959 		aniflag = B_TRUE;
960 		sc->sc_ani.sc_checkani_timer = timestamp;
961 	}
962 
963 	/* Skip all processing if there's nothing to do. */
964 	if (longcal || shortcal || aniflag) {
965 		/* Call ANI routine if necessary */
966 		if (aniflag)
967 			ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
968 			    ah->ah_curchan);
969 
970 		/* Perform calibration if necessary */
971 		if (longcal || shortcal) {
972 			boolean_t iscaldone = B_FALSE;
973 
974 			if (ath9k_hw_calibrate(ah, ah->ah_curchan,
975 			    sc->sc_rx_chainmask, longcal, &iscaldone)) {
976 				if (longcal)
977 					sc->sc_ani.sc_noise_floor =
978 					    ath9k_hw_getchan_noise(ah,
979 					    ah->ah_curchan);
980 
981 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
982 				    "%s: calibrate chan %u/%x nf: %d\n",
983 				    __func__,
984 				    ah->ah_curchan->channel,
985 				    ah->ah_curchan->channelFlags,
986 				    sc->sc_ani.sc_noise_floor));
987 			} else {
988 				ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
989 				    "%s: calibrate chan %u/%x failed\n",
990 				    __func__,
991 				    ah->ah_curchan->channel,
992 				    ah->ah_curchan->channelFlags));
993 			}
994 			sc->sc_ani.sc_caldone = iscaldone;
995 		}
996 	}
997 
998 settimer:
999 	/*
1000 	 * Set timer interval based on previous results.
1001 	 * The interval must be the shortest necessary to satisfy ANI,
1002 	 * short calibration and long calibration.
1003 	 */
1004 	cal_interval = ATH_LONG_CALINTERVAL;
1005 	if (sc->sc_ah->ah_config.enable_ani)
1006 		cal_interval =
1007 		    min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1008 
1009 	if (!sc->sc_ani.sc_caldone)
1010 		cal_interval = min(cal_interval,
1011 		    (uint32_t)ATH_SHORT_CALINTERVAL);
1012 
1013 	sc->sc_scan_timer = 0;
1014 	sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1015 	    drv_usectohz(cal_interval * 1000));
1016 }
1017 
1018 static void
1019 arn_stop_caltimer(struct arn_softc *sc)
1020 {
1021 	timeout_id_t tmp_id = 0;
1022 
1023 	while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1024 		tmp_id = sc->sc_cal_timer;
1025 		(void) untimeout(tmp_id);
1026 	}
1027 	sc->sc_cal_timer = 0;
1028 }
1029 
1030 static uint_t
1031 arn_isr(caddr_t arg)
1032 {
1033 	/* LINTED E_BAD_PTR_CAST_ALIGN */
1034 	struct arn_softc *sc = (struct arn_softc *)arg;
1035 	struct ath_hal *ah = sc->sc_ah;
1036 	enum ath9k_int status;
1037 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1038 
1039 	ARN_LOCK(sc);
1040 
1041 	if (sc->sc_flags & SC_OP_INVALID) {
1042 		/*
1043 		 * The hardware is not ready/present, don't
1044 		 * touch anything. Note this can happen early
1045 		 * on if the IRQ is shared.
1046 		 */
1047 		ARN_UNLOCK(sc);
1048 		return (DDI_INTR_UNCLAIMED);
1049 	}
1050 	if (!ath9k_hw_intrpend(ah)) {	/* shared irq, not for us */
1051 		ARN_UNLOCK(sc);
1052 		return (DDI_INTR_UNCLAIMED);
1053 	}
1054 
1055 	/*
1056 	 * Figure out the reason(s) for the interrupt. Note
1057 	 * that the hal returns a pseudo-ISR that may include
1058 	 * bits we haven't explicitly enabled so we mask the
1059 	 * value to insure we only process bits we requested.
1060 	 */
1061 	(void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1062 
1063 	status &= sc->sc_imask; /* discard unasked-for bits */
1064 
1065 	/*
1066 	 * If there are no status bits set, then this interrupt was not
1067 	 * for me (should have been caught above).
1068 	 */
1069 	if (!status) {
1070 		ARN_UNLOCK(sc);
1071 		return (DDI_INTR_UNCLAIMED);
1072 	}
1073 
1074 	sc->sc_intrstatus = status;
1075 
1076 	if (status & ATH9K_INT_FATAL) {
1077 		/* need a chip reset */
1078 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1079 		    "ATH9K_INT_FATAL\n"));
1080 		goto reset;
1081 	} else if (status & ATH9K_INT_RXORN) {
1082 		/* need a chip reset */
1083 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1084 		    "ATH9K_INT_RXORN\n"));
1085 		goto reset;
1086 	} else {
1087 		if (status & ATH9K_INT_RXEOL) {
1088 			/*
1089 			 * NB: the hardware should re-read the link when
1090 			 * RXE bit is written, but it doesn't work
1091 			 * at least on older hardware revs.
1092 			 */
1093 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1094 			    "ATH9K_INT_RXEOL\n"));
1095 			sc->sc_rxlink = NULL;
1096 		}
1097 		if (status & ATH9K_INT_TXURN) {
1098 			/* bump tx trigger level */
1099 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1100 			    "ATH9K_INT_TXURN\n"));
1101 			(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1102 		}
1103 		/* XXX: optimize this */
1104 		if (status & ATH9K_INT_RX) {
1105 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1106 			    "ATH9K_INT_RX\n"));
1107 			sc->sc_rx_pend = 1;
1108 			ddi_trigger_softintr(sc->sc_softint_id);
1109 		}
1110 		if (status & ATH9K_INT_TX) {
1111 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1112 			    "ATH9K_INT_TX\n"));
1113 			if (ddi_taskq_dispatch(sc->sc_tq,
1114 			    arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1115 			    DDI_SUCCESS) {
1116 				arn_problem("arn: arn_isr(): "
1117 				    "No memory for tx taskq\n");
1118 				}
1119 			}
1120 #ifdef ARN_ATH9K_INT_MIB
1121 		if (status & ATH9K_INT_MIB) {
1122 			/*
1123 			 * Disable interrupts until we service the MIB
1124 			 * interrupt; otherwise it will continue to
1125 			 * fire.
1126 			 */
1127 			(void) ath9k_hw_set_interrupts(ah, 0);
1128 			/*
1129 			 * Let the hal handle the event. We assume
1130 			 * it will clear whatever condition caused
1131 			 * the interrupt.
1132 			 */
1133 			ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1134 			(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1135 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1136 			    "ATH9K_INT_MIB\n"));
1137 		}
1138 #endif
1139 
1140 #ifdef ARN_ATH9K_INT_TIM_TIMER
1141 		if (status & ATH9K_INT_TIM_TIMER) {
1142 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1143 			    "ATH9K_INT_TIM_TIMER\n"));
1144 			if (!(ah->ah_caps.hw_caps &
1145 			    ATH9K_HW_CAP_AUTOSLEEP)) {
1146 				/*
1147 				 * Clear RxAbort bit so that we can
1148 				 * receive frames
1149 				 */
1150 				ath9k_hw_setrxabort(ah, 0);
1151 				goto reset;
1152 			}
1153 		}
1154 #endif
1155 
1156 		if (status & ATH9K_INT_BMISS) {
1157 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1158 			    "ATH9K_INT_BMISS\n"));
1159 #ifdef HW_BEACON_MISS_HANDLE
1160 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1161 			    "handle beacon mmiss by H/W mechanism\n"));
1162 			if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1163 			    sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1164 				arn_problem("arn: arn_isr(): "
1165 				    "No memory available for bmiss taskq\n");
1166 			}
1167 #else
1168 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1169 			    "handle beacon mmiss by S/W mechanism\n"));
1170 #endif /* HW_BEACON_MISS_HANDLE */
1171 		}
1172 
1173 		ARN_UNLOCK(sc);
1174 
1175 #ifdef ARN_ATH9K_INT_CST
1176 		/* carrier sense timeout */
1177 		if (status & ATH9K_INT_CST) {
1178 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1179 			    "ATH9K_INT_CST\n"));
1180 			return (DDI_INTR_CLAIMED);
1181 		}
1182 #endif
1183 
1184 		if (status & ATH9K_INT_SWBA) {
1185 			ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1186 			    "ATH9K_INT_SWBA\n"));
1187 			/* This will occur only in Host-AP or Ad-Hoc mode */
1188 			return (DDI_INTR_CLAIMED);
1189 		}
1190 	}
1191 
1192 	return (DDI_INTR_CLAIMED);
1193 reset:
1194 	ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1195 	(void) arn_reset(ic);
1196 	ARN_UNLOCK(sc);
1197 	return (DDI_INTR_CLAIMED);
1198 }
1199 
1200 static int
1201 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1202 {
1203 	int i;
1204 
1205 	for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1206 		if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1207 			return (i);
1208 	}
1209 
1210 	return (-1);
1211 }
1212 
1213 int
1214 arn_reset(ieee80211com_t *ic)
1215 {
1216 	struct arn_softc *sc = (struct arn_softc *)ic;
1217 	struct ath_hal *ah = sc->sc_ah;
1218 	int status;
1219 	int error = 0;
1220 
1221 	(void) ath9k_hw_set_interrupts(ah, 0);
1222 	arn_draintxq(sc, 0);
1223 	(void) arn_stoprecv(sc);
1224 
1225 	if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1226 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1227 	    sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1228 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1229 		    "unable to reset hardware; hal status %u\n", status));
1230 		error = EIO;
1231 	}
1232 
1233 	if (arn_startrecv(sc) != 0)
1234 		ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1235 		    "unable to start recv logic\n"));
1236 
1237 	/*
1238 	 * We may be doing a reset in response to a request
1239 	 * that changes the channel so update any state that
1240 	 * might change as a result.
1241 	 */
1242 	arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1243 
1244 	arn_update_txpow(sc);
1245 
1246 	if (sc->sc_flags & SC_OP_BEACONS)
1247 		arn_beacon_config(sc);	/* restart beacons */
1248 
1249 	(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1250 
1251 	return (error);
1252 }
1253 
1254 int
1255 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1256 {
1257 	int qnum;
1258 
1259 	switch (queue) {
1260 	case WME_AC_VO:
1261 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1262 		break;
1263 	case WME_AC_VI:
1264 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1265 		break;
1266 	case WME_AC_BE:
1267 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1268 		break;
1269 	case WME_AC_BK:
1270 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1271 		break;
1272 	default:
1273 		qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1274 		break;
1275 	}
1276 
1277 	return (qnum);
1278 }
1279 
1280 static struct {
1281 	uint32_t version;
1282 	const char *name;
1283 } ath_mac_bb_names[] = {
1284 	{ AR_SREV_VERSION_5416_PCI,	"5416" },
1285 	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
1286 	{ AR_SREV_VERSION_9100,		"9100" },
1287 	{ AR_SREV_VERSION_9160,		"9160" },
1288 	{ AR_SREV_VERSION_9280,		"9280" },
1289 	{ AR_SREV_VERSION_9285,		"9285" }
1290 };
1291 
1292 static struct {
1293 	uint16_t version;
1294 	const char *name;
1295 } ath_rf_names[] = {
1296 	{ 0,				"5133" },
1297 	{ AR_RAD5133_SREV_MAJOR,	"5133" },
1298 	{ AR_RAD5122_SREV_MAJOR,	"5122" },
1299 	{ AR_RAD2133_SREV_MAJOR,	"2133" },
1300 	{ AR_RAD2122_SREV_MAJOR,	"2122" }
1301 };
1302 
1303 /*
1304  * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1305  */
1306 
1307 static const char *
1308 arn_mac_bb_name(uint32_t mac_bb_version)
1309 {
1310 	int i;
1311 
1312 	for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1313 		if (ath_mac_bb_names[i].version == mac_bb_version) {
1314 			return (ath_mac_bb_names[i].name);
1315 		}
1316 	}
1317 
1318 	return ("????");
1319 }
1320 
1321 /*
1322  * Return the RF name. "????" is returned if the RF is unknown.
1323  */
1324 
1325 static const char *
1326 arn_rf_name(uint16_t rf_version)
1327 {
1328 	int i;
1329 
1330 	for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1331 		if (ath_rf_names[i].version == rf_version) {
1332 			return (ath_rf_names[i].name);
1333 		}
1334 	}
1335 
1336 	return ("????");
1337 }
1338 
1339 static void
1340 arn_next_scan(void *arg)
1341 {
1342 	ieee80211com_t *ic = arg;
1343 	struct arn_softc *sc = (struct arn_softc *)ic;
1344 
1345 	sc->sc_scan_timer = 0;
1346 	if (ic->ic_state == IEEE80211_S_SCAN) {
1347 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1348 		    drv_usectohz(arn_dwelltime * 1000));
1349 		ieee80211_next_scan(ic);
1350 	}
1351 }
1352 
1353 static void
1354 arn_stop_scantimer(struct arn_softc *sc)
1355 {
1356 	timeout_id_t tmp_id = 0;
1357 
1358 	while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1359 		tmp_id = sc->sc_scan_timer;
1360 		(void) untimeout(tmp_id);
1361 	}
1362 	sc->sc_scan_timer = 0;
1363 }
1364 
1365 static int32_t
1366 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1367 {
1368 	struct arn_softc *sc = (struct arn_softc *)ic;
1369 	struct ath_hal *ah = sc->sc_ah;
1370 	struct ieee80211_node *in;
1371 	int32_t i, error;
1372 	uint8_t *bssid;
1373 	uint32_t rfilt;
1374 	enum ieee80211_state ostate;
1375 	struct ath9k_channel *channel;
1376 	int pos;
1377 
1378 	/* Should set up & init LED here */
1379 
1380 	if (sc->sc_flags & SC_OP_INVALID)
1381 		return (0);
1382 
1383 	ostate = ic->ic_state;
1384 	ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1385 	    "%x -> %x!\n", ostate, nstate));
1386 
1387 	ARN_LOCK(sc);
1388 
1389 	if (nstate != IEEE80211_S_SCAN)
1390 		arn_stop_scantimer(sc);
1391 	if (nstate != IEEE80211_S_RUN)
1392 		arn_stop_caltimer(sc);
1393 
1394 	/* Should set LED here */
1395 
1396 	if (nstate == IEEE80211_S_INIT) {
1397 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1398 		/*
1399 		 * Disable interrupts.
1400 		 */
1401 		(void) ath9k_hw_set_interrupts
1402 		    (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1403 
1404 #ifdef ARN_IBSS
1405 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1406 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1407 			arn_beacon_return(sc);
1408 		}
1409 #endif
1410 		ARN_UNLOCK(sc);
1411 		ieee80211_stop_watchdog(ic);
1412 		goto done;
1413 	}
1414 	in = ic->ic_bss;
1415 
1416 	pos = arn_get_channel(sc, ic->ic_curchan);
1417 
1418 	if (pos == -1) {
1419 		ARN_DBG((ARN_DBG_FATAL, "arn: "
1420 		    "%s: Invalid channel\n", __func__));
1421 		error = EINVAL;
1422 		ARN_UNLOCK(sc);
1423 		goto bad;
1424 	}
1425 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1426 	sc->sc_ah->ah_channels[pos].chanmode =
1427 	    arn_chan2flags(ic, ic->ic_curchan);
1428 	channel = &sc->sc_ah->ah_channels[pos];
1429 	if (channel == NULL) {
1430 		arn_problem("arn_newstate(): channel == NULL");
1431 		ARN_UNLOCK(sc);
1432 		goto bad;
1433 	}
1434 	error = arn_set_channel(sc, channel);
1435 	if (error != 0) {
1436 		if (nstate != IEEE80211_S_SCAN) {
1437 			ARN_UNLOCK(sc);
1438 			ieee80211_reset_chan(ic);
1439 			goto bad;
1440 		}
1441 	}
1442 
1443 	/*
1444 	 * Get the receive filter according to the
1445 	 * operating mode and state
1446 	 */
1447 	rfilt = arn_calcrxfilter(sc);
1448 
1449 	if (nstate == IEEE80211_S_SCAN)
1450 		bssid = ic->ic_macaddr;
1451 	else
1452 		bssid = in->in_bssid;
1453 
1454 	ath9k_hw_setrxfilter(ah, rfilt);
1455 
1456 	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1457 		ath9k_hw_write_associd(ah, bssid, in->in_associd);
1458 	else
1459 		ath9k_hw_write_associd(ah, bssid, 0);
1460 
1461 	/* Check for WLAN_CAPABILITY_PRIVACY ? */
1462 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1463 		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1464 			if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1465 				(void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1466 				    bssid);
1467 		}
1468 	}
1469 
1470 	if (nstate == IEEE80211_S_RUN) {
1471 		switch (ic->ic_opmode) {
1472 #ifdef ARN_IBSS
1473 		case IEEE80211_M_IBSS:
1474 			/*
1475 			 * Allocate and setup the beacon frame.
1476 			 * Stop any previous beacon DMA.
1477 			 */
1478 			(void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1479 			arn_beacon_return(sc);
1480 			error = arn_beacon_alloc(sc, in);
1481 			if (error != 0) {
1482 				ARN_UNLOCK(sc);
1483 				goto bad;
1484 			}
1485 			/*
1486 			 * If joining an adhoc network defer beacon timer
1487 			 * configuration to the next beacon frame so we
1488 			 * have a current TSF to use.  Otherwise we're
1489 			 * starting an ibss/bss so there's no need to delay.
1490 			 */
1491 			if (ic->ic_opmode == IEEE80211_M_IBSS &&
1492 			    ic->ic_bss->in_tstamp.tsf != 0) {
1493 				sc->sc_bsync = 1;
1494 			} else {
1495 				arn_beacon_config(sc);
1496 			}
1497 			break;
1498 #endif /* ARN_IBSS */
1499 		case IEEE80211_M_STA:
1500 			if (ostate != IEEE80211_S_RUN) {
1501 				/*
1502 				 * Defer beacon timer configuration to the next
1503 				 * beacon frame so we have a current TSF to use.
1504 				 * Any TSF collected when scanning is likely old
1505 				 */
1506 #ifdef ARN_IBSS
1507 				sc->sc_bsync = 1;
1508 #else
1509 				/* Configure the beacon and sleep timers. */
1510 				arn_beacon_config(sc);
1511 
1512 				/* reset rssi stats */
1513 				sc->sc_halstats.ns_avgbrssi =
1514 				    ATH_RSSI_DUMMY_MARKER;
1515 				sc->sc_halstats.ns_avgrssi =
1516 				    ATH_RSSI_DUMMY_MARKER;
1517 				sc->sc_halstats.ns_avgtxrssi =
1518 				    ATH_RSSI_DUMMY_MARKER;
1519 				sc->sc_halstats.ns_avgtxrate =
1520 				    ATH_RATE_DUMMY_MARKER;
1521 #endif /* ARN_IBSS */
1522 			}
1523 			break;
1524 		default:
1525 			break;
1526 		}
1527 	} else {
1528 		sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1529 		(void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1530 	}
1531 
1532 	/*
1533 	 * Reset the rate control state.
1534 	 */
1535 	arn_rate_ctl_reset(sc, nstate);
1536 
1537 	ARN_UNLOCK(sc);
1538 done:
1539 	/*
1540 	 * Invoke the parent method to complete the work.
1541 	 */
1542 	error = sc->sc_newstate(ic, nstate, arg);
1543 
1544 	/*
1545 	 * Finally, start any timers.
1546 	 */
1547 	if (nstate == IEEE80211_S_RUN) {
1548 		ieee80211_start_watchdog(ic, 1);
1549 		ASSERT(sc->sc_cal_timer == 0);
1550 		sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1551 		    drv_usectohz(100 * 1000));
1552 	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1553 		/* start ap/neighbor scan timer */
1554 		/* ASSERT(sc->sc_scan_timer == 0); */
1555 		if (sc->sc_scan_timer != 0) {
1556 			(void) untimeout(sc->sc_scan_timer);
1557 			sc->sc_scan_timer = 0;
1558 		}
1559 		sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1560 		    drv_usectohz(arn_dwelltime * 1000));
1561 	}
1562 
1563 bad:
1564 	return (error);
1565 }
1566 
1567 static void
1568 arn_watchdog(void *arg)
1569 {
1570 	struct arn_softc *sc = arg;
1571 	ieee80211com_t *ic = &sc->sc_isc;
1572 	int ntimer = 0;
1573 
1574 	ARN_LOCK(sc);
1575 	ic->ic_watchdog_timer = 0;
1576 	if (sc->sc_flags & SC_OP_INVALID) {
1577 		ARN_UNLOCK(sc);
1578 		return;
1579 	}
1580 
1581 	if (ic->ic_state == IEEE80211_S_RUN) {
1582 		/*
1583 		 * Start the background rate control thread if we
1584 		 * are not configured to use a fixed xmit rate.
1585 		 */
1586 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1587 			sc->sc_stats.ast_rate_calls ++;
1588 			if (ic->ic_opmode == IEEE80211_M_STA)
1589 				arn_rate_ctl(ic, ic->ic_bss);
1590 			else
1591 				ieee80211_iterate_nodes(&ic->ic_sta,
1592 				    arn_rate_ctl, sc);
1593 		}
1594 
1595 #ifdef HW_BEACON_MISS_HANDLE
1596 		/* nothing to do here */
1597 #else
1598 		/* currently set 10 seconds as beacon miss threshold */
1599 		if (ic->ic_beaconmiss++ > 100) {
1600 			ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1601 			    "Beacon missed for 10 seconds, run"
1602 			    "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1603 			ARN_UNLOCK(sc);
1604 			(void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1605 			return;
1606 		}
1607 #endif /* HW_BEACON_MISS_HANDLE */
1608 
1609 		ntimer = 1;
1610 	}
1611 	ARN_UNLOCK(sc);
1612 
1613 	ieee80211_watchdog(ic);
1614 	if (ntimer != 0)
1615 		ieee80211_start_watchdog(ic, ntimer);
1616 }
1617 
1618 static struct ieee80211_node *
1619 arn_node_alloc(ieee80211com_t *ic)
1620 {
1621 	struct ath_node *an;
1622 	struct arn_softc *sc = (struct arn_softc *)ic;
1623 
1624 	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1625 	arn_rate_update(sc, &an->an_node, 0);
1626 
1627 	return ((an != NULL) ? &an->an_node : NULL);
1628 }
1629 
1630 static void
1631 arn_node_free(struct ieee80211_node *in)
1632 {
1633 	ieee80211com_t *ic = in->in_ic;
1634 	struct arn_softc *sc = (struct arn_softc *)ic;
1635 	struct ath_buf *bf;
1636 	struct ath_txq *txq;
1637 	int32_t i;
1638 
1639 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1640 		if (ARN_TXQ_SETUP(sc, i)) {
1641 			txq = &sc->sc_txq[i];
1642 			mutex_enter(&txq->axq_lock);
1643 			bf = list_head(&txq->axq_list);
1644 			while (bf != NULL) {
1645 				if (bf->bf_in == in) {
1646 					bf->bf_in = NULL;
1647 				}
1648 				bf = list_next(&txq->axq_list, bf);
1649 			}
1650 			mutex_exit(&txq->axq_lock);
1651 		}
1652 	}
1653 
1654 	ic->ic_node_cleanup(in);
1655 
1656 	if (in->in_wpa_ie != NULL)
1657 		ieee80211_free(in->in_wpa_ie);
1658 
1659 	if (in->in_wme_ie != NULL)
1660 		ieee80211_free(in->in_wme_ie);
1661 
1662 	if (in->in_htcap_ie != NULL)
1663 		ieee80211_free(in->in_htcap_ie);
1664 
1665 	kmem_free(in, sizeof (struct ath_node));
1666 }
1667 
1668 /*
1669  * Allocate tx/rx key slots for TKIP.  We allocate one slot for
1670  * each key. MIC is right after the decrypt/encrypt key.
1671  */
1672 static uint16_t
1673 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1674     ieee80211_keyix *rxkeyix)
1675 {
1676 	uint16_t i, keyix;
1677 
1678 	ASSERT(!sc->sc_splitmic);
1679 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1680 		uint8_t b = sc->sc_keymap[i];
1681 		if (b == 0xff)
1682 			continue;
1683 		for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1684 		    keyix++, b >>= 1) {
1685 			if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1686 				/* full pair unavailable */
1687 				continue;
1688 			}
1689 			set_bit(keyix, sc->sc_keymap);
1690 			set_bit(keyix+64, sc->sc_keymap);
1691 			ARN_DBG((ARN_DBG_KEYCACHE,
1692 			    "arn_key_alloc_pair(): key pair %u,%u\n",
1693 			    keyix, keyix+64));
1694 			*txkeyix = *rxkeyix = keyix;
1695 			return (1);
1696 		}
1697 	}
1698 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1699 	    " out of pair space\n"));
1700 
1701 	return (0);
1702 }
1703 
1704 /*
1705  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
1706  * each key, one for decrypt/encrypt and the other for the MIC.
1707  */
1708 static int
1709 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1710     ieee80211_keyix *rxkeyix)
1711 {
1712 	uint16_t i, keyix;
1713 
1714 	ASSERT(sc->sc_splitmic);
1715 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1716 		uint8_t b = sc->sc_keymap[i];
1717 		if (b != 0xff) {
1718 			/*
1719 			 * One or more slots in this byte are free.
1720 			 */
1721 			keyix = i*NBBY;
1722 			while (b & 1) {
1723 		again:
1724 				keyix++;
1725 				b >>= 1;
1726 			}
1727 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1728 			if (is_set(keyix+32, sc->sc_keymap) ||
1729 			    is_set(keyix+64, sc->sc_keymap) ||
1730 			    is_set(keyix+32+64, sc->sc_keymap)) {
1731 				/* full pair unavailable */
1732 				if (keyix == (i+1)*NBBY) {
1733 					/* no slots were appropriate, advance */
1734 					continue;
1735 				}
1736 				goto again;
1737 			}
1738 			set_bit(keyix, sc->sc_keymap);
1739 			set_bit(keyix+64, sc->sc_keymap);
1740 			set_bit(keyix+32, sc->sc_keymap);
1741 			set_bit(keyix+32+64, sc->sc_keymap);
1742 			ARN_DBG((ARN_DBG_KEYCACHE,
1743 			    "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1744 			    keyix, keyix+64,
1745 			    keyix+32, keyix+32+64));
1746 			*txkeyix = *rxkeyix = keyix;
1747 			return (1);
1748 		}
1749 	}
1750 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1751 	    " out of pair space\n"));
1752 
1753 	return (0);
1754 }
1755 /*
1756  * Allocate a single key cache slot.
1757  */
1758 static int
1759 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1760     ieee80211_keyix *rxkeyix)
1761 {
1762 	uint16_t i, keyix;
1763 
1764 	/* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1765 	for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1766 		uint8_t b = sc->sc_keymap[i];
1767 
1768 		if (b != 0xff) {
1769 			/*
1770 			 * One or more slots are free.
1771 			 */
1772 			keyix = i*NBBY;
1773 			while (b & 1)
1774 				keyix++, b >>= 1;
1775 			set_bit(keyix, sc->sc_keymap);
1776 			ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1777 			    "key %u\n", keyix));
1778 			*txkeyix = *rxkeyix = keyix;
1779 			return (1);
1780 		}
1781 	}
1782 	return (0);
1783 }
1784 
1785 /*
1786  * Allocate one or more key cache slots for a unicast key.  The
1787  * key itself is needed only to identify the cipher.  For hardware
1788  * TKIP with split cipher+MIC keys we allocate two key cache slot
1789  * pairs so that we can setup separate TX and RX MIC keys.  Note
1790  * that the MIC key for a TKIP key at slot i is assumed by the
1791  * hardware to be at slot i+64.  This limits TKIP keys to the first
1792  * 64 entries.
1793  */
1794 /* ARGSUSED */
1795 int
1796 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1797     ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1798 {
1799 	struct arn_softc *sc = (struct arn_softc *)ic;
1800 
1801 	/*
1802 	 * We allocate two pair for TKIP when using the h/w to do
1803 	 * the MIC.  For everything else, including software crypto,
1804 	 * we allocate a single entry.  Note that s/w crypto requires
1805 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
1806 	 * not support pass-through cache entries and we map all
1807 	 * those requests to slot 0.
1808 	 */
1809 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1810 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1811 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1812 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1813 		if (sc->sc_splitmic)
1814 			return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1815 		else
1816 			return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1817 	} else {
1818 		return (arn_key_alloc_single(sc, keyix, rxkeyix));
1819 	}
1820 }
1821 
1822 /*
1823  * Delete an entry in the key cache allocated by ath_key_alloc.
1824  */
1825 int
1826 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1827 {
1828 	struct arn_softc *sc = (struct arn_softc *)ic;
1829 	struct ath_hal *ah = sc->sc_ah;
1830 	const struct ieee80211_cipher *cip = k->wk_cipher;
1831 	ieee80211_keyix keyix = k->wk_keyix;
1832 
1833 	ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1834 	    " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1835 
1836 	(void) ath9k_hw_keyreset(ah, keyix);
1837 	/*
1838 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
1839 	 */
1840 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1841 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1842 		(void) ath9k_hw_keyreset(ah, keyix+32);		/* RX key */
1843 
1844 	if (keyix >= IEEE80211_WEP_NKID) {
1845 		/*
1846 		 * Don't touch keymap entries for global keys so
1847 		 * they are never considered for dynamic allocation.
1848 		 */
1849 		clr_bit(keyix, sc->sc_keymap);
1850 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1851 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1852 			/*
1853 			 * If splitmic is true +64 is TX key MIC,
1854 			 * else +64 is RX key + RX key MIC.
1855 			 */
1856 			clr_bit(keyix+64, sc->sc_keymap);
1857 			if (sc->sc_splitmic) {
1858 				/* Rx key */
1859 				clr_bit(keyix+32, sc->sc_keymap);
1860 				/* RX key MIC */
1861 				clr_bit(keyix+32+64, sc->sc_keymap);
1862 			}
1863 		}
1864 	}
1865 	return (1);
1866 }
1867 
1868 /*
1869  * Set a TKIP key into the hardware.  This handles the
1870  * potential distribution of key state to multiple key
1871  * cache slots for TKIP.
1872  */
1873 static int
1874 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1875     struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1876 {
1877 	uint8_t *key_rxmic = NULL;
1878 	uint8_t *key_txmic = NULL;
1879 	uint8_t  *key = (uint8_t *)&(k->wk_key[0]);
1880 	struct ath_hal *ah = sc->sc_ah;
1881 
1882 	key_txmic = key + 16;
1883 	key_rxmic = key + 24;
1884 
1885 	if (mac == NULL) {
1886 		/* Group key installation */
1887 		(void) memcpy(hk->kv_mic,  key_rxmic, sizeof (hk->kv_mic));
1888 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1889 		    mac, B_FALSE));
1890 	}
1891 	if (!sc->sc_splitmic) {
1892 		/*
1893 		 * data key goes at first index,
1894 		 * the hal handles the MIC keys at index+64.
1895 		 */
1896 		(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1897 		(void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1898 		return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1899 		    mac, B_FALSE));
1900 	}
1901 	/*
1902 	 * TX key goes at first index, RX key at +32.
1903 	 * The hal handles the MIC keys at index+64.
1904 	 */
1905 	(void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
1906 	if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
1907 	    B_FALSE))) {
1908 		/* Txmic entry failed. No need to proceed further */
1909 		ARN_DBG((ARN_DBG_KEYCACHE,
1910 		    "%s Setting TX MIC Key Failed\n", __func__));
1911 		return (0);
1912 	}
1913 
1914 	(void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1915 
1916 	/* XXX delete tx key on failure? */
1917 	return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
1918 
1919 }
1920 
1921 int
1922 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1923     const uint8_t mac[IEEE80211_ADDR_LEN])
1924 {
1925 	struct arn_softc *sc = (struct arn_softc *)ic;
1926 	const struct ieee80211_cipher *cip = k->wk_cipher;
1927 	struct ath9k_keyval hk;
1928 
1929 	/* cipher table */
1930 	static const uint8_t ciphermap[] = {
1931 		ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
1932 		ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
1933 		ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
1934 		ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
1935 		ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
1936 		ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
1937 	};
1938 
1939 	bzero(&hk, sizeof (hk));
1940 
1941 	/*
1942 	 * Software crypto uses a "clear key" so non-crypto
1943 	 * state kept in the key cache are maintainedd so that
1944 	 * rx frames have an entry to match.
1945 	 */
1946 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
1947 		ASSERT(cip->ic_cipher < 6);
1948 		hk.kv_type = ciphermap[cip->ic_cipher];
1949 		hk.kv_len = k->wk_keylen;
1950 		bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
1951 	} else {
1952 		hk.kv_type = ATH9K_CIPHER_CLR;
1953 	}
1954 
1955 	if (hk.kv_type == ATH9K_CIPHER_TKIP &&
1956 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1957 		return (arn_keyset_tkip(sc, k, &hk, mac));
1958 	} else {
1959 		return (ath9k_hw_set_keycache_entry(sc->sc_ah,
1960 		    k->wk_keyix, &hk, mac, B_FALSE));
1961 	}
1962 }
1963 
1964 /*
1965  * Enable/Disable short slot timing
1966  */
1967 void
1968 arn_set_shortslot(ieee80211com_t *ic, int onoff)
1969 {
1970 	struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
1971 
1972 	if (onoff)
1973 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
1974 	else
1975 		(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
1976 }
1977 
1978 static int
1979 arn_open(struct arn_softc *sc)
1980 {
1981 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1982 	struct ieee80211_channel *curchan = ic->ic_curchan;
1983 	struct ath9k_channel *init_channel;
1984 	int error = 0, pos, status;
1985 
1986 	ARN_LOCK_ASSERT(sc);
1987 
1988 	pos = arn_get_channel(sc, curchan);
1989 	if (pos == -1) {
1990 		ARN_DBG((ARN_DBG_FATAL, "arn: "
1991 		    "%s: Invalid channel\n", __func__));
1992 		error = EINVAL;
1993 		goto error;
1994 	}
1995 
1996 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1997 
1998 	if (sc->sc_curmode == ATH9K_MODE_11A) {
1999 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2000 	} else {
2001 		sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2002 	}
2003 
2004 	init_channel = &sc->sc_ah->ah_channels[pos];
2005 
2006 	/* Reset SERDES registers */
2007 	ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2008 
2009 	/*
2010 	 * The basic interface to setting the hardware in a good
2011 	 * state is ``reset''.	On return the hardware is known to
2012 	 * be powered up and with interrupts disabled.	This must
2013 	 * be followed by initialization of the appropriate bits
2014 	 * and then setup of the interrupt mask.
2015 	 */
2016 	if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2017 	    sc->tx_chan_width, sc->sc_tx_chainmask,
2018 	    sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2019 	    B_FALSE, &status)) {
2020 		ARN_DBG((ARN_DBG_FATAL, "arn: "
2021 		    "%s: unable to reset hardware; hal status %u "
2022 		    "(freq %u flags 0x%x)\n", __func__, status,
2023 		    init_channel->channel, init_channel->channelFlags));
2024 
2025 		error = EIO;
2026 		goto error;
2027 	}
2028 
2029 	/*
2030 	 * This is needed only to setup initial state
2031 	 * but it's best done after a reset.
2032 	 */
2033 	arn_update_txpow(sc);
2034 
2035 	/*
2036 	 * Setup the hardware after reset:
2037 	 * The receive engine is set going.
2038 	 * Frame transmit is handled entirely
2039 	 * in the frame output path; there's nothing to do
2040 	 * here except setup the interrupt mask.
2041 	 */
2042 	if (arn_startrecv(sc) != 0) {
2043 		ARN_DBG((ARN_DBG_INIT, "arn: "
2044 		    "%s: unable to start recv logic\n", __func__));
2045 		error = EIO;
2046 		goto error;
2047 	}
2048 
2049 	/* Setup our intr mask. */
2050 	sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2051 	    ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2052 	    ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2053 #ifdef ARN_ATH9K_HW_CAP_GTT
2054 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2055 		sc->sc_imask |= ATH9K_INT_GTT;
2056 #endif
2057 
2058 #ifdef ARN_ATH9K_HW_CAP_GTT
2059 	if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2060 		sc->sc_imask |= ATH9K_INT_CST;
2061 #endif
2062 
2063 	/*
2064 	 * Enable MIB interrupts when there are hardware phy counters.
2065 	 * Note we only do this (at the moment) for station mode.
2066 	 */
2067 #ifdef ARN_ATH9K_INT_MIB
2068 	if (ath9k_hw_phycounters(sc->sc_ah) &&
2069 	    ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2070 	    (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2071 		sc->sc_imask |= ATH9K_INT_MIB;
2072 #endif
2073 	/*
2074 	 * Some hardware processes the TIM IE and fires an
2075 	 * interrupt when the TIM bit is set.  For hardware
2076 	 * that does, if not overridden by configuration,
2077 	 * enable the TIM interrupt when operating as station.
2078 	 */
2079 #ifdef ARN_ATH9K_INT_TIM
2080 	if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2081 	    (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2082 	    !sc->sc_config.swBeaconProcess)
2083 		sc->sc_imask |= ATH9K_INT_TIM;
2084 #endif
2085 	if (arn_chan2mode(init_channel) != sc->sc_curmode)
2086 		arn_setcurmode(sc, arn_chan2mode(init_channel));
2087 	ARN_DBG((ARN_DBG_INIT, "arn: "
2088 	    "%s: current mode after arn_setcurmode is %d\n",
2089 	    __func__, sc->sc_curmode));
2090 
2091 	sc->sc_isrunning = 1;
2092 
2093 	/* Disable BMISS interrupt when we're not associated */
2094 	sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2095 	(void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2096 
2097 	return (0);
2098 
2099 error:
2100 	return (error);
2101 }
2102 
2103 static void
2104 arn_close(struct arn_softc *sc)
2105 {
2106 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2107 	struct ath_hal *ah = sc->sc_ah;
2108 
2109 	ARN_LOCK_ASSERT(sc);
2110 
2111 	if (!sc->sc_isrunning)
2112 		return;
2113 
2114 	/*
2115 	 * Shutdown the hardware and driver
2116 	 * Note that some of this work is not possible if the
2117 	 * hardware is gone (invalid).
2118 	 */
2119 	ARN_UNLOCK(sc);
2120 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2121 	ieee80211_stop_watchdog(ic);
2122 	ARN_LOCK(sc);
2123 
2124 	/*
2125 	 * make sure h/w will not generate any interrupt
2126 	 * before setting the invalid flag.
2127 	 */
2128 	(void) ath9k_hw_set_interrupts(ah, 0);
2129 
2130 	if (!(sc->sc_flags & SC_OP_INVALID)) {
2131 		arn_draintxq(sc, 0);
2132 		(void) arn_stoprecv(sc);
2133 		(void) ath9k_hw_phy_disable(ah);
2134 	} else {
2135 		sc->sc_rxlink = NULL;
2136 	}
2137 
2138 	sc->sc_isrunning = 0;
2139 }
2140 
2141 /*
2142  * MAC callback functions
2143  */
2144 static int
2145 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2146 {
2147 	struct arn_softc *sc = arg;
2148 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2149 	struct ieee80211_node *in;
2150 	struct ieee80211_rateset *rs;
2151 
2152 	ARN_LOCK(sc);
2153 	switch (stat) {
2154 	case MAC_STAT_IFSPEED:
2155 		in = ic->ic_bss;
2156 		rs = &in->in_rates;
2157 		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2158 		    1000000ull;
2159 		break;
2160 	case MAC_STAT_NOXMTBUF:
2161 		*val = sc->sc_stats.ast_tx_nobuf +
2162 		    sc->sc_stats.ast_tx_nobufmgt;
2163 		break;
2164 	case MAC_STAT_IERRORS:
2165 		*val = sc->sc_stats.ast_rx_tooshort;
2166 		break;
2167 	case MAC_STAT_RBYTES:
2168 		*val = ic->ic_stats.is_rx_bytes;
2169 		break;
2170 	case MAC_STAT_IPACKETS:
2171 		*val = ic->ic_stats.is_rx_frags;
2172 		break;
2173 	case MAC_STAT_OBYTES:
2174 		*val = ic->ic_stats.is_tx_bytes;
2175 		break;
2176 	case MAC_STAT_OPACKETS:
2177 		*val = ic->ic_stats.is_tx_frags;
2178 		break;
2179 	case MAC_STAT_OERRORS:
2180 	case WIFI_STAT_TX_FAILED:
2181 		*val = sc->sc_stats.ast_tx_fifoerr +
2182 		    sc->sc_stats.ast_tx_xretries +
2183 		    sc->sc_stats.ast_tx_discard;
2184 		break;
2185 	case WIFI_STAT_TX_RETRANS:
2186 		*val = sc->sc_stats.ast_tx_xretries;
2187 		break;
2188 	case WIFI_STAT_FCS_ERRORS:
2189 		*val = sc->sc_stats.ast_rx_crcerr;
2190 		break;
2191 	case WIFI_STAT_WEP_ERRORS:
2192 		*val = sc->sc_stats.ast_rx_badcrypt;
2193 		break;
2194 	case WIFI_STAT_TX_FRAGS:
2195 	case WIFI_STAT_MCAST_TX:
2196 	case WIFI_STAT_RTS_SUCCESS:
2197 	case WIFI_STAT_RTS_FAILURE:
2198 	case WIFI_STAT_ACK_FAILURE:
2199 	case WIFI_STAT_RX_FRAGS:
2200 	case WIFI_STAT_MCAST_RX:
2201 	case WIFI_STAT_RX_DUPS:
2202 		ARN_UNLOCK(sc);
2203 		return (ieee80211_stat(ic, stat, val));
2204 	default:
2205 		ARN_UNLOCK(sc);
2206 		return (ENOTSUP);
2207 	}
2208 	ARN_UNLOCK(sc);
2209 
2210 	return (0);
2211 }
2212 
2213 int
2214 arn_m_start(void *arg)
2215 {
2216 	struct arn_softc *sc = arg;
2217 	int err = 0;
2218 
2219 	ARN_LOCK(sc);
2220 
2221 	/*
2222 	 * Stop anything previously setup.  This is safe
2223 	 * whether this is the first time through or not.
2224 	 */
2225 
2226 	arn_close(sc);
2227 
2228 	if ((err = arn_open(sc)) != 0) {
2229 		ARN_UNLOCK(sc);
2230 		return (err);
2231 	}
2232 
2233 	/* H/W is reday now */
2234 	sc->sc_flags &= ~SC_OP_INVALID;
2235 
2236 	ARN_UNLOCK(sc);
2237 
2238 	return (0);
2239 }
2240 
2241 static void
2242 arn_m_stop(void *arg)
2243 {
2244 	struct arn_softc *sc = arg;
2245 
2246 	ARN_LOCK(sc);
2247 	arn_close(sc);
2248 
2249 	/* disable HAL and put h/w to sleep */
2250 	(void) ath9k_hw_disable(sc->sc_ah);
2251 	ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2252 
2253 	/* XXX: hardware will not be ready in suspend state */
2254 	sc->sc_flags |= SC_OP_INVALID;
2255 	ARN_UNLOCK(sc);
2256 }
2257 
2258 static int
2259 arn_m_promisc(void *arg, boolean_t on)
2260 {
2261 	struct arn_softc *sc = arg;
2262 	struct ath_hal *ah = sc->sc_ah;
2263 	uint32_t rfilt;
2264 
2265 	ARN_LOCK(sc);
2266 
2267 	rfilt = ath9k_hw_getrxfilter(ah);
2268 	if (on)
2269 		rfilt |= ATH9K_RX_FILTER_PROM;
2270 	else
2271 		rfilt &= ~ATH9K_RX_FILTER_PROM;
2272 	sc->sc_promisc = on;
2273 	ath9k_hw_setrxfilter(ah, rfilt);
2274 
2275 	ARN_UNLOCK(sc);
2276 
2277 	return (0);
2278 }
2279 
2280 static int
2281 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2282 {
2283 	struct arn_softc *sc = arg;
2284 	struct ath_hal *ah = sc->sc_ah;
2285 	uint32_t val, index, bit;
2286 	uint8_t pos;
2287 	uint32_t *mfilt = sc->sc_mcast_hash;
2288 
2289 	ARN_LOCK(sc);
2290 
2291 	/* calculate XOR of eight 6bit values */
2292 	val = ARN_LE_READ_32(mca + 0);
2293 	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2294 	val = ARN_LE_READ_32(mca + 3);
2295 	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2296 	pos &= 0x3f;
2297 	index = pos / 32;
2298 	bit = 1 << (pos % 32);
2299 
2300 	if (add) {	/* enable multicast */
2301 		sc->sc_mcast_refs[pos]++;
2302 		mfilt[index] |= bit;
2303 	} else {	/* disable multicast */
2304 		if (--sc->sc_mcast_refs[pos] == 0)
2305 			mfilt[index] &= ~bit;
2306 	}
2307 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2308 
2309 	ARN_UNLOCK(sc);
2310 	return (0);
2311 }
2312 
2313 static int
2314 arn_m_unicst(void *arg, const uint8_t *macaddr)
2315 {
2316 	struct arn_softc *sc = arg;
2317 	struct ath_hal *ah = sc->sc_ah;
2318 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2319 
2320 	ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2321 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2322 	    macaddr[0], macaddr[1], macaddr[2],
2323 	    macaddr[3], macaddr[4], macaddr[5]));
2324 
2325 	ARN_LOCK(sc);
2326 	IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2327 	(void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2328 	(void) arn_reset(ic);
2329 	ARN_UNLOCK(sc);
2330 	return (0);
2331 }
2332 
2333 static mblk_t *
2334 arn_m_tx(void *arg, mblk_t *mp)
2335 {
2336 	struct arn_softc *sc = arg;
2337 	int error = 0;
2338 	mblk_t *next;
2339 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2340 
2341 	/*
2342 	 * No data frames go out unless we're associated; this
2343 	 * should not happen as the 802.11 layer does not enable
2344 	 * the xmit queue until we enter the RUN state.
2345 	 */
2346 	if (ic->ic_state != IEEE80211_S_RUN) {
2347 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2348 		    "discard, state %u\n", ic->ic_state));
2349 		sc->sc_stats.ast_tx_discard++;
2350 		freemsgchain(mp);
2351 		return (NULL);
2352 	}
2353 
2354 	while (mp != NULL) {
2355 		next = mp->b_next;
2356 		mp->b_next = NULL;
2357 		error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2358 		if (error != 0) {
2359 			mp->b_next = next;
2360 			if (error == ENOMEM) {
2361 				break;
2362 			} else {
2363 				freemsgchain(mp);
2364 				return (NULL);
2365 			}
2366 		}
2367 		mp = next;
2368 	}
2369 
2370 	return (mp);
2371 }
2372 
2373 static void
2374 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2375 {
2376 	struct arn_softc *sc = arg;
2377 	int32_t err;
2378 
2379 	err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2380 
2381 	ARN_LOCK(sc);
2382 	if (err == ENETRESET) {
2383 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2384 			ARN_UNLOCK(sc);
2385 
2386 			(void) arn_m_start(sc);
2387 
2388 			(void) ieee80211_new_state(&sc->sc_isc,
2389 			    IEEE80211_S_SCAN, -1);
2390 			ARN_LOCK(sc);
2391 		}
2392 	}
2393 	ARN_UNLOCK(sc);
2394 }
2395 
2396 static int
2397 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2398     uint_t wldp_length, const void *wldp_buf)
2399 {
2400 	struct arn_softc *sc = arg;
2401 	int	err;
2402 
2403 	err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2404 	    wldp_length, wldp_buf);
2405 
2406 	ARN_LOCK(sc);
2407 
2408 	if (err == ENETRESET) {
2409 		if (!(sc->sc_flags & SC_OP_INVALID)) {
2410 			ARN_UNLOCK(sc);
2411 			(void) arn_m_start(sc);
2412 			(void) ieee80211_new_state(&sc->sc_isc,
2413 			    IEEE80211_S_SCAN, -1);
2414 			ARN_LOCK(sc);
2415 		}
2416 		err = 0;
2417 	}
2418 
2419 	ARN_UNLOCK(sc);
2420 
2421 	return (err);
2422 }
2423 
2424 /* ARGSUSED */
2425 static int
2426 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2427     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
2428 {
2429 	struct arn_softc *sc = arg;
2430 	int	err = 0;
2431 
2432 	err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2433 	    pr_flags, wldp_length, wldp_buf, perm);
2434 
2435 	return (err);
2436 }
2437 
2438 /* return bus cachesize in 4B word units */
2439 static void
2440 arn_pci_config_cachesize(struct arn_softc *sc)
2441 {
2442 	uint8_t csz;
2443 
2444 	/*
2445 	 * Cache line size is used to size and align various
2446 	 * structures used to communicate with the hardware.
2447 	 */
2448 	csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2449 	if (csz == 0) {
2450 		/*
2451 		 * We must have this setup properly for rx buffer
2452 		 * DMA to work so force a reasonable value here if it
2453 		 * comes up zero.
2454 		 */
2455 		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2456 		pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2457 		    csz);
2458 	}
2459 	sc->sc_cachelsz = csz << 2;
2460 }
2461 
2462 static int
2463 arn_pci_setup(struct arn_softc *sc)
2464 {
2465 	uint16_t command;
2466 
2467 	/*
2468 	 * Enable memory mapping and bus mastering
2469 	 */
2470 	ASSERT(sc != NULL);
2471 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2472 	command	|= PCI_COMM_MAE | PCI_COMM_ME;
2473 	pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2474 	command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2475 	if ((command & PCI_COMM_MAE) == 0) {
2476 		arn_problem("arn: arn_pci_setup(): "
2477 		    "failed to enable memory mapping\n");
2478 		return (EIO);
2479 	}
2480 	if ((command & PCI_COMM_ME) == 0) {
2481 		arn_problem("arn: arn_pci_setup(): "
2482 		    "failed to enable bus mastering\n");
2483 		return (EIO);
2484 	}
2485 	ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2486 	    "set command reg to 0x%x \n", command));
2487 
2488 	return (0);
2489 }
2490 
2491 static void
2492 arn_get_hw_encap(struct arn_softc *sc)
2493 {
2494 	ieee80211com_t *ic;
2495 	struct ath_hal *ah;
2496 
2497 	ic = (ieee80211com_t *)sc;
2498 	ah = sc->sc_ah;
2499 
2500 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2501 	    ATH9K_CIPHER_AES_CCM, NULL))
2502 		ic->ic_caps |= IEEE80211_C_AES_CCM;
2503 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2504 	    ATH9K_CIPHER_AES_OCB, NULL))
2505 		ic->ic_caps |= IEEE80211_C_AES;
2506 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2507 	    ATH9K_CIPHER_TKIP, NULL))
2508 		ic->ic_caps |= IEEE80211_C_TKIP;
2509 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2510 	    ATH9K_CIPHER_WEP, NULL))
2511 		ic->ic_caps |= IEEE80211_C_WEP;
2512 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2513 	    ATH9K_CIPHER_MIC, NULL))
2514 		ic->ic_caps |= IEEE80211_C_TKIPMIC;
2515 }
2516 
2517 static int
2518 arn_resume(dev_info_t *devinfo)
2519 {
2520 	struct arn_softc *sc;
2521 	int ret = DDI_SUCCESS;
2522 
2523 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2524 	if (sc == NULL) {
2525 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2526 		    "failed to get soft state\n"));
2527 		return (DDI_FAILURE);
2528 	}
2529 
2530 	ARN_LOCK(sc);
2531 	/*
2532 	 * Set up config space command register(s). Refuse
2533 	 * to resume on failure.
2534 	 */
2535 	if (arn_pci_setup(sc) != 0) {
2536 		ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2537 		    "ath_pci_setup() failed\n"));
2538 		ARN_UNLOCK(sc);
2539 		return (DDI_FAILURE);
2540 	}
2541 
2542 	if (!(sc->sc_flags & SC_OP_INVALID))
2543 		ret = arn_open(sc);
2544 	ARN_UNLOCK(sc);
2545 
2546 	return (ret);
2547 }
2548 
2549 static int
2550 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2551 {
2552 	struct arn_softc *sc;
2553 	int		instance;
2554 	int		status;
2555 	int32_t		err;
2556 	uint16_t	vendor_id;
2557 	uint16_t	device_id;
2558 	uint32_t	i;
2559 	uint32_t	val;
2560 	char		strbuf[32];
2561 	ieee80211com_t *ic;
2562 	struct ath_hal *ah;
2563 	wifi_data_t wd = { 0 };
2564 	mac_register_t *macp;
2565 
2566 	switch (cmd) {
2567 	case DDI_ATTACH:
2568 		break;
2569 	case DDI_RESUME:
2570 		return (arn_resume(devinfo));
2571 	default:
2572 		return (DDI_FAILURE);
2573 	}
2574 
2575 	instance = ddi_get_instance(devinfo);
2576 	if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2577 		ARN_DBG((ARN_DBG_ATTACH, "arn: "
2578 		    "%s: Unable to alloc softstate\n", __func__));
2579 		return (DDI_FAILURE);
2580 	}
2581 
2582 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2583 	ic = (ieee80211com_t *)sc;
2584 	sc->sc_dev = devinfo;
2585 
2586 	mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2587 	mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2588 	mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2589 	mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2590 	mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2591 #ifdef ARN_IBSS
2592 	mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2593 #endif
2594 
2595 	sc->sc_flags |= SC_OP_INVALID;
2596 
2597 	err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2598 	if (err != DDI_SUCCESS) {
2599 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2600 		    "pci_config_setup() failed"));
2601 		goto attach_fail0;
2602 	}
2603 
2604 	if (arn_pci_setup(sc) != 0)
2605 		goto attach_fail1;
2606 
2607 	/* Cache line size set up */
2608 	arn_pci_config_cachesize(sc);
2609 
2610 	vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2611 	device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2612 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2613 	    "device id 0x%x, cache size %d\n",
2614 	    vendor_id, device_id,
2615 	    pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2616 
2617 	pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2618 	val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2619 	if ((val & 0x0000ff00) != 0)
2620 		pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2621 
2622 	err = ddi_regs_map_setup(devinfo, 1,
2623 	    &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2624 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2625 	    "regs map1 = %x err=%d\n", sc->mem, err));
2626 	if (err != DDI_SUCCESS) {
2627 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2628 		    "ddi_regs_map_setup() failed"));
2629 		goto attach_fail1;
2630 	}
2631 
2632 	ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2633 	if (ah == NULL) {
2634 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2635 		    "unable to attach hw: H/W status %u\n",
2636 		    status));
2637 		goto attach_fail2;
2638 	}
2639 	sc->sc_ah = ah;
2640 
2641 	ath9k_hw_getmac(ah, ic->ic_macaddr);
2642 
2643 	/* Get the hardware key cache size. */
2644 	sc->sc_keymax = ah->ah_caps.keycache_size;
2645 	if (sc->sc_keymax > ATH_KEYMAX) {
2646 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2647 		    "Warning, using only %u entries in %u key cache\n",
2648 		    ATH_KEYMAX, sc->sc_keymax));
2649 		sc->sc_keymax = ATH_KEYMAX;
2650 	}
2651 
2652 	/*
2653 	 * Reset the key cache since some parts do not
2654 	 * reset the contents on initial power up.
2655 	 */
2656 	for (i = 0; i < sc->sc_keymax; i++)
2657 		(void) ath9k_hw_keyreset(ah, (uint16_t)i);
2658 	/*
2659 	 * Mark key cache slots associated with global keys
2660 	 * as in use.  If we knew TKIP was not to be used we
2661 	 * could leave the +32, +64, and +32+64 slots free.
2662 	 * XXX only for splitmic.
2663 	 */
2664 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2665 		set_bit(i, sc->sc_keymap);
2666 		set_bit(i + 32, sc->sc_keymap);
2667 		set_bit(i + 64, sc->sc_keymap);
2668 		set_bit(i + 32 + 64, sc->sc_keymap);
2669 	}
2670 
2671 	/* Collect the channel list using the default country code */
2672 	err = arn_setup_channels(sc);
2673 	if (err == EINVAL) {
2674 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2675 		    "ERR:arn_setup_channels\n"));
2676 		goto attach_fail3;
2677 	}
2678 
2679 	/* default to STA mode */
2680 	sc->sc_ah->ah_opmode = ATH9K_M_STA;
2681 
2682 	/* Setup rate tables */
2683 	arn_rate_attach(sc);
2684 	arn_setup_rates(sc, IEEE80211_MODE_11A);
2685 	arn_setup_rates(sc, IEEE80211_MODE_11B);
2686 	arn_setup_rates(sc, IEEE80211_MODE_11G);
2687 
2688 	/* Setup current mode here */
2689 	arn_setcurmode(sc, ATH9K_MODE_11G);
2690 
2691 	/* 802.11g features */
2692 	if (sc->sc_have11g)
2693 		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2694 		    IEEE80211_C_SHSLOT;		/* short slot time */
2695 
2696 	/* temp workaround */
2697 	sc->sc_mrretry = 1;
2698 
2699 	/* Setup tx/rx descriptors */
2700 	err = arn_desc_alloc(devinfo, sc);
2701 	if (err != DDI_SUCCESS) {
2702 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2703 		    "failed to allocate descriptors: %d\n", err));
2704 		goto attach_fail3;
2705 	}
2706 
2707 	if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2708 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2709 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2710 		    "ERR:ddi_taskq_create\n"));
2711 		goto attach_fail4;
2712 	}
2713 
2714 	/*
2715 	 * Allocate hardware transmit queues: one queue for
2716 	 * beacon frames and one data queue for each QoS
2717 	 * priority.  Note that the hal handles reseting
2718 	 * these queues at the needed time.
2719 	 */
2720 #ifdef ARN_IBSS
2721 	sc->sc_beaconq = arn_beaconq_setup(ah);
2722 	if (sc->sc_beaconq == (-1)) {
2723 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2724 		    "unable to setup a beacon xmit queue\n"));
2725 		goto attach_fail4;
2726 	}
2727 #endif
2728 #ifdef ARN_HOSTAP
2729 	sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
2730 	if (sc->sc_cabq == NULL) {
2731 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2732 		    "unable to setup CAB xmit queue\n"));
2733 		goto attach_fail4;
2734 	}
2735 
2736 	sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
2737 	ath_cabq_update(sc);
2738 #endif
2739 
2740 	for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
2741 		sc->sc_haltype2q[i] = -1;
2742 
2743 	/* Setup data queues */
2744 	/* NB: ensure BK queue is the lowest priority h/w queue */
2745 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
2746 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2747 		    "unable to setup xmit queue for BK traffic\n"));
2748 		goto attach_fail4;
2749 	}
2750 	if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
2751 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2752 		    "unable to setup xmit queue for BE traffic\n"));
2753 		goto attach_fail4;
2754 	}
2755 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
2756 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2757 		    "unable to setup xmit queue for VI traffic\n"));
2758 		goto attach_fail4;
2759 	}
2760 	if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
2761 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2762 		    "unable to setup xmit queue for VO traffic\n"));
2763 		goto attach_fail4;
2764 	}
2765 
2766 	/*
2767 	 * Initializes the noise floor to a reasonable default value.
2768 	 * Later on this will be updated during ANI processing.
2769 	 */
2770 
2771 	sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
2772 
2773 
2774 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2775 	    ATH9K_CIPHER_TKIP, NULL)) {
2776 		/*
2777 		 * Whether we should enable h/w TKIP MIC.
2778 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
2779 		 * report WMM capable, so it's always safe to turn on
2780 		 * TKIP MIC in this case.
2781 		 */
2782 		(void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
2783 		    0, 1, NULL);
2784 	}
2785 
2786 	/* Get cipher releated capability information */
2787 	arn_get_hw_encap(sc);
2788 
2789 	/*
2790 	 * Check whether the separate key cache entries
2791 	 * are required to handle both tx+rx MIC keys.
2792 	 * With split mic keys the number of stations is limited
2793 	 * to 27 otherwise 59.
2794 	 */
2795 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2796 	    ATH9K_CIPHER_TKIP, NULL) &&
2797 	    ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2798 	    ATH9K_CIPHER_MIC, NULL) &&
2799 	    ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
2800 	    0, NULL))
2801 		sc->sc_splitmic = 1;
2802 
2803 	/* turn on mcast key search if possible */
2804 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
2805 		(void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
2806 		    1, NULL);
2807 
2808 	sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
2809 	sc->sc_config.txpowlimit_override = 0;
2810 
2811 #ifdef ARN_11N
2812 	/* 11n Capabilities */
2813 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
2814 		sc->sc_flags |= SC_OP_TXAGGR;
2815 		sc->sc_flags |= SC_OP_RXAGGR;
2816 	}
2817 #endif
2818 
2819 #ifdef ARN_11N
2820 	sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
2821 	sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
2822 #else
2823 	sc->sc_tx_chainmask = 1;
2824 	sc->sc_rx_chainmask = 1;
2825 #endif
2826 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2827 	    "tx_chainmask = %d, rx_chainmask = %d\n",
2828 	    sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2829 
2830 	(void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
2831 	sc->sc_defant = ath9k_hw_getdefantenna(ah);
2832 
2833 	ath9k_hw_getmac(ah, sc->sc_myaddr);
2834 	if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
2835 		ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
2836 		ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
2837 		(void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
2838 	}
2839 
2840 	/* set default value to short slot time */
2841 	sc->sc_slottime = ATH9K_SLOT_TIME_9;
2842 	(void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2843 
2844 	/* initialize beacon slots */
2845 	for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
2846 		sc->sc_bslot[i] = ATH_IF_ID_ANY;
2847 
2848 	/* save MISC configurations */
2849 	sc->sc_config.swBeaconProcess = 1;
2850 
2851 
2852 	ic->ic_caps |= IEEE80211_C_WPA;	/* Support WPA/WPA2 */
2853 	ic->ic_phytype = IEEE80211_T_OFDM;
2854 	ic->ic_opmode = IEEE80211_M_STA;
2855 	ic->ic_state = IEEE80211_S_INIT;
2856 	ic->ic_maxrssi = ARN_MAX_RSSI;
2857 	ic->ic_set_shortslot = arn_set_shortslot;
2858 	ic->ic_xmit = arn_tx;
2859 	ieee80211_attach(ic);
2860 
2861 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2862 	    "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
2863 
2864 	/* different instance has different WPA door */
2865 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
2866 	    ddi_driver_name(devinfo),
2867 	    ddi_get_instance(devinfo));
2868 
2869 	/* Override 80211 default routines */
2870 	ic->ic_reset = arn_reset;
2871 	sc->sc_newstate = ic->ic_newstate;
2872 	ic->ic_newstate = arn_newstate;
2873 #ifdef ARN_IBSS
2874 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
2875 	ic->ic_recv_mgmt = arn_recv_mgmt;
2876 #endif
2877 	ic->ic_watchdog = arn_watchdog;
2878 	ic->ic_node_alloc = arn_node_alloc;
2879 	ic->ic_node_free = arn_node_free;
2880 	ic->ic_crypto.cs_key_alloc = arn_key_alloc;
2881 	ic->ic_crypto.cs_key_delete = arn_key_delete;
2882 	ic->ic_crypto.cs_key_set = arn_key_set;
2883 
2884 	ieee80211_media_init(ic);
2885 
2886 	/*
2887 	 * initialize default tx key
2888 	 */
2889 	ic->ic_def_txkey = 0;
2890 
2891 	sc->sc_rx_pend = 0;
2892 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
2893 	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
2894 	    &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
2895 	if (err != DDI_SUCCESS) {
2896 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2897 		    "ddi_add_softintr() failed....\n"));
2898 		goto attach_fail5;
2899 	}
2900 
2901 	if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
2902 	    != DDI_SUCCESS) {
2903 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2904 		    "Can not get iblock cookie for INT\n"));
2905 		goto attach_fail6;
2906 	}
2907 
2908 	if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
2909 	    (caddr_t)sc) != DDI_SUCCESS) {
2910 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2911 		    "Can not set intr for ARN driver\n"));
2912 		goto attach_fail6;
2913 	}
2914 
2915 	/*
2916 	 * Provide initial settings for the WiFi plugin; whenever this
2917 	 * information changes, we need to call mac_plugindata_update()
2918 	 */
2919 	wd.wd_opmode = ic->ic_opmode;
2920 	wd.wd_secalloc = WIFI_SEC_NONE;
2921 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
2922 
2923 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2924 	    "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
2925 	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2926 	    wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
2927 	    wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
2928 
2929 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2930 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2931 		    "MAC version mismatch\n"));
2932 		goto attach_fail7;
2933 	}
2934 
2935 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
2936 	macp->m_driver		= sc;
2937 	macp->m_dip		= devinfo;
2938 	macp->m_src_addr	= ic->ic_macaddr;
2939 	macp->m_callbacks	= &arn_m_callbacks;
2940 	macp->m_min_sdu		= 0;
2941 	macp->m_max_sdu		= IEEE80211_MTU;
2942 	macp->m_pdata		= &wd;
2943 	macp->m_pdata_size	= sizeof (wd);
2944 
2945 	err = mac_register(macp, &ic->ic_mach);
2946 	mac_free(macp);
2947 	if (err != 0) {
2948 		ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2949 		    "mac_register err %x\n", err));
2950 		goto attach_fail7;
2951 	}
2952 
2953 	/* Create minor node of type DDI_NT_NET_WIFI */
2954 	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
2955 	    ARN_NODENAME, instance);
2956 	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
2957 	    instance + 1, DDI_NT_NET_WIFI, 0);
2958 	if (err != DDI_SUCCESS)
2959 		ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
2960 		    "Create minor node failed - %d\n", err));
2961 
2962 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
2963 
2964 	sc->sc_promisc = B_FALSE;
2965 	bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
2966 	bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
2967 
2968 	ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2969 	    "Atheros AR%s MAC/BB Rev:%x "
2970 	    "AR%s RF Rev:%x: mem=0x%lx\n",
2971 	    arn_mac_bb_name(ah->ah_macVersion),
2972 	    ah->ah_macRev,
2973 	    arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
2974 	    ah->ah_phyRev,
2975 	    (unsigned long)sc->mem));
2976 
2977 	/* XXX: hardware will not be ready until arn_open() being called */
2978 	sc->sc_flags |= SC_OP_INVALID;
2979 	sc->sc_isrunning = 0;
2980 
2981 	return (DDI_SUCCESS);
2982 
2983 attach_fail7:
2984 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
2985 attach_fail6:
2986 	ddi_remove_softintr(sc->sc_softint_id);
2987 attach_fail5:
2988 	(void) ieee80211_detach(ic);
2989 attach_fail4:
2990 	arn_desc_free(sc);
2991 	if (sc->sc_tq)
2992 		ddi_taskq_destroy(sc->sc_tq);
2993 attach_fail3:
2994 	ath9k_hw_detach(ah);
2995 attach_fail2:
2996 	ddi_regs_map_free(&sc->sc_io_handle);
2997 attach_fail1:
2998 	pci_config_teardown(&sc->sc_cfg_handle);
2999 attach_fail0:
3000 	sc->sc_flags |= SC_OP_INVALID;
3001 	/* cleanup tx queues */
3002 	mutex_destroy(&sc->sc_txbuflock);
3003 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3004 		if (ARN_TXQ_SETUP(sc, i)) {
3005 			/* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3006 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3007 		}
3008 	}
3009 	mutex_destroy(&sc->sc_rxbuflock);
3010 	mutex_destroy(&sc->sc_serial_rw);
3011 	mutex_destroy(&sc->sc_genlock);
3012 	mutex_destroy(&sc->sc_resched_lock);
3013 #ifdef ARN_IBSS
3014 	mutex_destroy(&sc->sc_bcbuflock);
3015 #endif
3016 
3017 	ddi_soft_state_free(arn_soft_state_p, instance);
3018 
3019 	return (DDI_FAILURE);
3020 
3021 }
3022 
3023 /*
3024  * Suspend transmit/receive for powerdown
3025  */
3026 static int
3027 arn_suspend(struct arn_softc *sc)
3028 {
3029 	ARN_LOCK(sc);
3030 	arn_close(sc);
3031 	ARN_UNLOCK(sc);
3032 
3033 	return (DDI_SUCCESS);
3034 }
3035 
3036 static int32_t
3037 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3038 {
3039 	struct arn_softc *sc;
3040 	int i;
3041 
3042 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3043 	ASSERT(sc != NULL);
3044 
3045 	switch (cmd) {
3046 	case DDI_DETACH:
3047 		break;
3048 
3049 	case DDI_SUSPEND:
3050 		return (arn_suspend(sc));
3051 
3052 	default:
3053 		return (DDI_FAILURE);
3054 	}
3055 
3056 	if (mac_disable(sc->sc_isc.ic_mach) != 0)
3057 		return (DDI_FAILURE);
3058 
3059 	arn_stop_scantimer(sc);
3060 	arn_stop_caltimer(sc);
3061 
3062 	/* disable interrupts */
3063 	(void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3064 
3065 	/*
3066 	 * Unregister from the MAC layer subsystem
3067 	 */
3068 	(void) mac_unregister(sc->sc_isc.ic_mach);
3069 
3070 	/* free intterrupt resources */
3071 	ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3072 	ddi_remove_softintr(sc->sc_softint_id);
3073 
3074 	/*
3075 	 * NB: the order of these is important:
3076 	 * o call the 802.11 layer before detaching the hal to
3077 	 *   insure callbacks into the driver to delete global
3078 	 *   key cache entries can be handled
3079 	 * o reclaim the tx queue data structures after calling
3080 	 *   the 802.11 layer as we'll get called back to reclaim
3081 	 *   node state and potentially want to use them
3082 	 * o to cleanup the tx queues the hal is called, so detach
3083 	 *   it last
3084 	 */
3085 	ieee80211_detach(&sc->sc_isc);
3086 
3087 	arn_desc_free(sc);
3088 
3089 	ddi_taskq_destroy(sc->sc_tq);
3090 
3091 	if (!(sc->sc_flags & SC_OP_INVALID))
3092 		(void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3093 
3094 	/* cleanup tx queues */
3095 	mutex_destroy(&sc->sc_txbuflock);
3096 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3097 		if (ARN_TXQ_SETUP(sc, i)) {
3098 			arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3099 			mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3100 		}
3101 	}
3102 
3103 	ath9k_hw_detach(sc->sc_ah);
3104 
3105 	/* free io handle */
3106 	ddi_regs_map_free(&sc->sc_io_handle);
3107 	pci_config_teardown(&sc->sc_cfg_handle);
3108 
3109 	/* destroy locks */
3110 	mutex_destroy(&sc->sc_genlock);
3111 	mutex_destroy(&sc->sc_serial_rw);
3112 	mutex_destroy(&sc->sc_rxbuflock);
3113 	mutex_destroy(&sc->sc_resched_lock);
3114 #ifdef ARN_IBSS
3115 	mutex_destroy(&sc->sc_bcbuflock);
3116 #endif
3117 
3118 	ddi_remove_minor_node(devinfo, NULL);
3119 	ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3120 
3121 	return (DDI_SUCCESS);
3122 }
3123 
3124 /*
3125  * quiesce(9E) entry point.
3126  *
3127  * This function is called when the system is single-threaded at high
3128  * PIL with preemption disabled. Therefore, this function must not be
3129  * blocked.
3130  *
3131  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3132  * DDI_FAILURE indicates an error condition and should almost never happen.
3133  */
3134 static int32_t
3135 arn_quiesce(dev_info_t *devinfo)
3136 {
3137 	struct arn_softc *sc;
3138 	int i;
3139 	struct ath_hal *ah;
3140 
3141 	sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3142 
3143 	if (sc == NULL || (ah = sc->sc_ah) == NULL)
3144 		return (DDI_FAILURE);
3145 
3146 	/*
3147 	 * Disable interrupts
3148 	 */
3149 	(void) ath9k_hw_set_interrupts(ah, 0);
3150 
3151 	/*
3152 	 * Disable TX HW
3153 	 */
3154 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3155 		if (ARN_TXQ_SETUP(sc, i))
3156 			(void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3157 	}
3158 
3159 	/*
3160 	 * Disable RX HW
3161 	 */
3162 	ath9k_hw_stoppcurecv(ah);
3163 	ath9k_hw_setrxfilter(ah, 0);
3164 	(void) ath9k_hw_stopdmarecv(ah);
3165 	drv_usecwait(3000);
3166 
3167 	/*
3168 	 * Power down HW
3169 	 */
3170 	(void) ath9k_hw_phy_disable(ah);
3171 
3172 	return (DDI_SUCCESS);
3173 }
3174 
3175 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3176     nodev, NULL, D_MP, NULL, arn_quiesce);
3177 
3178 static struct modldrv arn_modldrv = {
3179 	&mod_driverops, /* Type of module.  This one is a driver */
3180 	"arn-Atheros 9000 series driver:vertion 1.1", /* short description */
3181 	&arn_dev_ops /* driver specific ops */
3182 };
3183 
3184 static struct modlinkage modlinkage = {
3185 	MODREV_1, (void *)&arn_modldrv, NULL
3186 };
3187 
3188 int
3189 _info(struct modinfo *modinfop)
3190 {
3191 	return (mod_info(&modlinkage, modinfop));
3192 }
3193 
3194 int
3195 _init(void)
3196 {
3197 	int status;
3198 
3199 	status = ddi_soft_state_init
3200 	    (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3201 	if (status != 0)
3202 		return (status);
3203 
3204 	mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3205 	mac_init_ops(&arn_dev_ops, "arn");
3206 	status = mod_install(&modlinkage);
3207 	if (status != 0) {
3208 		mac_fini_ops(&arn_dev_ops);
3209 		mutex_destroy(&arn_loglock);
3210 		ddi_soft_state_fini(&arn_soft_state_p);
3211 	}
3212 
3213 	return (status);
3214 }
3215 
3216 int
3217 _fini(void)
3218 {
3219 	int status;
3220 
3221 	status = mod_remove(&modlinkage);
3222 	if (status == 0) {
3223 		mac_fini_ops(&arn_dev_ops);
3224 		mutex_destroy(&arn_loglock);
3225 		ddi_soft_state_fini(&arn_soft_state_p);
3226 	}
3227 	return (status);
3228 }
3229