1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2008 Atheros Communications Inc.
8 *
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/sysmacros.h>
23 #include <sys/param.h>
24 #include <sys/types.h>
25 #include <sys/signal.h>
26 #include <sys/stream.h>
27 #include <sys/termio.h>
28 #include <sys/errno.h>
29 #include <sys/file.h>
30 #include <sys/cmn_err.h>
31 #include <sys/stropts.h>
32 #include <sys/strsubr.h>
33 #include <sys/strtty.h>
34 #include <sys/kbio.h>
35 #include <sys/cred.h>
36 #include <sys/stat.h>
37 #include <sys/consdev.h>
38 #include <sys/kmem.h>
39 #include <sys/modctl.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/pci.h>
43 #include <sys/errno.h>
44 #include <sys/mac_provider.h>
45 #include <sys/dlpi.h>
46 #include <sys/ethernet.h>
47 #include <sys/list.h>
48 #include <sys/byteorder.h>
49 #include <sys/strsun.h>
50 #include <sys/policy.h>
51 #include <inet/common.h>
52 #include <inet/nd.h>
53 #include <inet/mi.h>
54 #include <inet/wifi_ioctl.h>
55 #include <sys/mac_wifi.h>
56 #include <sys/net80211.h>
57 #include <sys/net80211_proto.h>
58 #include <sys/net80211_ht.h>
59
60
61 #include "arn_ath9k.h"
62 #include "arn_core.h"
63 #include "arn_reg.h"
64 #include "arn_hw.h"
65
66 #define ARN_MAX_RSSI 45 /* max rssi */
67
68 /*
69 * Default 11n reates supported by this station.
70 */
71 extern struct ieee80211_htrateset ieee80211_rateset_11n;
72
73 /*
74 * PIO access attributes for registers
75 */
76 static ddi_device_acc_attr_t arn_reg_accattr = {
77 DDI_DEVICE_ATTR_V0,
78 DDI_STRUCTURE_LE_ACC,
79 DDI_STRICTORDER_ACC,
80 DDI_DEFAULT_ACC
81 };
82
83 /*
84 * DMA access attributes for descriptors: NOT to be byte swapped.
85 */
86 static ddi_device_acc_attr_t arn_desc_accattr = {
87 DDI_DEVICE_ATTR_V0,
88 DDI_STRUCTURE_LE_ACC,
89 DDI_STRICTORDER_ACC,
90 DDI_DEFAULT_ACC
91 };
92
93 /*
94 * Describes the chip's DMA engine
95 */
96 static ddi_dma_attr_t arn_dma_attr = {
97 DMA_ATTR_V0, /* version number */
98 0, /* low address */
99 0xffffffffU, /* high address */
100 0x3ffffU, /* counter register max */
101 1, /* alignment */
102 0xFFF, /* burst sizes */
103 1, /* minimum transfer size */
104 0x3ffffU, /* max transfer size */
105 0xffffffffU, /* address register max */
106 1, /* no scatter-gather */
107 1, /* granularity of device */
108 0, /* DMA flags */
109 };
110
111 static ddi_dma_attr_t arn_desc_dma_attr = {
112 DMA_ATTR_V0, /* version number */
113 0, /* low address */
114 0xffffffffU, /* high address */
115 0xffffffffU, /* counter register max */
116 0x1000, /* alignment */
117 0xFFF, /* burst sizes */
118 1, /* minimum transfer size */
119 0xffffffffU, /* max transfer size */
120 0xffffffffU, /* address register max */
121 1, /* no scatter-gather */
122 1, /* granularity of device */
123 0, /* DMA flags */
124 };
125
126 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */
127
128 static kmutex_t arn_loglock;
129 static void *arn_soft_state_p = NULL;
130 static int arn_dwelltime = 200; /* scan interval */
131
132 static int arn_m_stat(void *, uint_t, uint64_t *);
133 static int arn_m_start(void *);
134 static void arn_m_stop(void *);
135 static int arn_m_promisc(void *, boolean_t);
136 static int arn_m_multicst(void *, boolean_t, const uint8_t *);
137 static int arn_m_unicst(void *, const uint8_t *);
138 static mblk_t *arn_m_tx(void *, mblk_t *);
139 static void arn_m_ioctl(void *, queue_t *, mblk_t *);
140 static int arn_m_setprop(void *, const char *, mac_prop_id_t,
141 uint_t, const void *);
142 static int arn_m_getprop(void *, const char *, mac_prop_id_t,
143 uint_t, void *);
144 static void arn_m_propinfo(void *, const char *, mac_prop_id_t,
145 mac_prop_info_handle_t);
146
147 /* MAC Callcack Functions */
148 static mac_callbacks_t arn_m_callbacks = {
149 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
150 arn_m_stat,
151 arn_m_start,
152 arn_m_stop,
153 arn_m_promisc,
154 arn_m_multicst,
155 arn_m_unicst,
156 arn_m_tx,
157 NULL,
158 arn_m_ioctl,
159 NULL,
160 NULL,
161 NULL,
162 arn_m_setprop,
163 arn_m_getprop,
164 arn_m_propinfo
165 };
166
167 /*
168 * ARN_DBG_HW
169 * ARN_DBG_REG_IO
170 * ARN_DBG_QUEUE
171 * ARN_DBG_EEPROM
172 * ARN_DBG_XMIT
173 * ARN_DBG_RECV
174 * ARN_DBG_CALIBRATE
175 * ARN_DBG_CHANNEL
176 * ARN_DBG_INTERRUPT
177 * ARN_DBG_REGULATORY
178 * ARN_DBG_ANI
179 * ARN_DBG_POWER_MGMT
180 * ARN_DBG_KEYCACHE
181 * ARN_DBG_BEACON
182 * ARN_DBG_RATE
183 * ARN_DBG_INIT
184 * ARN_DBG_ATTACH
185 * ARN_DBG_DEATCH
186 * ARN_DBG_AGGR
187 * ARN_DBG_RESET
188 * ARN_DBG_FATAL
189 * ARN_DBG_ANY
190 * ARN_DBG_ALL
191 */
192 uint32_t arn_dbg_mask = 0;
193
194 /*
195 * Exception/warning cases not leading to panic.
196 */
197 void
arn_problem(const int8_t * fmt,...)198 arn_problem(const int8_t *fmt, ...)
199 {
200 va_list args;
201
202 mutex_enter(&arn_loglock);
203
204 va_start(args, fmt);
205 vcmn_err(CE_WARN, fmt, args);
206 va_end(args);
207
208 mutex_exit(&arn_loglock);
209 }
210
211 /*
212 * Normal log information independent of debug.
213 */
214 void
arn_log(const int8_t * fmt,...)215 arn_log(const int8_t *fmt, ...)
216 {
217 va_list args;
218
219 mutex_enter(&arn_loglock);
220
221 va_start(args, fmt);
222 vcmn_err(CE_CONT, fmt, args);
223 va_end(args);
224
225 mutex_exit(&arn_loglock);
226 }
227
228 void
arn_dbg(uint32_t dbg_flags,const int8_t * fmt,...)229 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
230 {
231 va_list args;
232
233 if (dbg_flags & arn_dbg_mask) {
234 mutex_enter(&arn_loglock);
235 va_start(args, fmt);
236 vcmn_err(CE_CONT, fmt, args);
237 va_end(args);
238 mutex_exit(&arn_loglock);
239 }
240 }
241
242 /*
243 * Read and write, they both share the same lock. We do this to serialize
244 * reads and writes on Atheros 802.11n PCI devices only. This is required
245 * as the FIFO on these devices can only accept sanely 2 requests. After
246 * that the device goes bananas. Serializing the reads/writes prevents this
247 * from happening.
248 */
249 void
arn_iowrite32(struct ath_hal * ah,uint32_t reg_offset,uint32_t val)250 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
251 {
252 struct arn_softc *sc = ah->ah_sc;
253 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
254 mutex_enter(&sc->sc_serial_rw);
255 ddi_put32(sc->sc_io_handle,
256 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
257 mutex_exit(&sc->sc_serial_rw);
258 } else {
259 ddi_put32(sc->sc_io_handle,
260 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
261 }
262 }
263
264 unsigned int
arn_ioread32(struct ath_hal * ah,uint32_t reg_offset)265 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
266 {
267 uint32_t val;
268 struct arn_softc *sc = ah->ah_sc;
269 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
270 mutex_enter(&sc->sc_serial_rw);
271 val = ddi_get32(sc->sc_io_handle,
272 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
273 mutex_exit(&sc->sc_serial_rw);
274 } else {
275 val = ddi_get32(sc->sc_io_handle,
276 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
277 }
278
279 return (val);
280 }
281
282 /*
283 * Allocate an area of memory and a DMA handle for accessing it
284 */
285 static int
arn_alloc_dma_mem(dev_info_t * devinfo,ddi_dma_attr_t * dma_attr,size_t memsize,ddi_device_acc_attr_t * attr_p,uint_t alloc_flags,uint_t bind_flags,dma_area_t * dma_p)286 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
287 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
288 uint_t bind_flags, dma_area_t *dma_p)
289 {
290 int err;
291
292 /*
293 * Allocate handle
294 */
295 err = ddi_dma_alloc_handle(devinfo, dma_attr,
296 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
297 if (err != DDI_SUCCESS)
298 return (DDI_FAILURE);
299
300 /*
301 * Allocate memory
302 */
303 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
304 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
305 &dma_p->alength, &dma_p->acc_hdl);
306 if (err != DDI_SUCCESS)
307 return (DDI_FAILURE);
308
309 /*
310 * Bind the two together
311 */
312 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
313 dma_p->mem_va, dma_p->alength, bind_flags,
314 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
315 if (err != DDI_DMA_MAPPED)
316 return (DDI_FAILURE);
317
318 dma_p->nslots = ~0U;
319 dma_p->size = ~0U;
320 dma_p->token = ~0U;
321 dma_p->offset = 0;
322 return (DDI_SUCCESS);
323 }
324
325 /*
326 * Free one allocated area of DMAable memory
327 */
328 static void
arn_free_dma_mem(dma_area_t * dma_p)329 arn_free_dma_mem(dma_area_t *dma_p)
330 {
331 if (dma_p->dma_hdl != NULL) {
332 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
333 if (dma_p->acc_hdl != NULL) {
334 ddi_dma_mem_free(&dma_p->acc_hdl);
335 dma_p->acc_hdl = NULL;
336 }
337 ddi_dma_free_handle(&dma_p->dma_hdl);
338 dma_p->ncookies = 0;
339 dma_p->dma_hdl = NULL;
340 }
341 }
342
343 /*
344 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
345 * each buffer.
346 */
347 static int
arn_buflist_setup(dev_info_t * devinfo,struct arn_softc * sc,list_t * bflist,struct ath_buf ** pbf,struct ath_desc ** pds,int nbuf,uint_t dmabflags,uint32_t buflen)348 arn_buflist_setup(dev_info_t *devinfo,
349 struct arn_softc *sc,
350 list_t *bflist,
351 struct ath_buf **pbf,
352 struct ath_desc **pds,
353 int nbuf,
354 uint_t dmabflags,
355 uint32_t buflen)
356 {
357 int i, err;
358 struct ath_buf *bf = *pbf;
359 struct ath_desc *ds = *pds;
360
361 list_create(bflist, sizeof (struct ath_buf),
362 offsetof(struct ath_buf, bf_node));
363 for (i = 0; i < nbuf; i++, bf++, ds++) {
364 bf->bf_desc = ds;
365 bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
366 ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
367 list_insert_tail(bflist, bf);
368
369 /* alloc DMA memory */
370 err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
371 buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
372 dmabflags, &bf->bf_dma);
373 if (err != DDI_SUCCESS)
374 return (err);
375 }
376 *pbf = bf;
377 *pds = ds;
378
379 return (DDI_SUCCESS);
380 }
381
382 /*
383 * Destroy tx, rx or beacon buffer list. Free DMA memory.
384 */
385 static void
arn_buflist_cleanup(list_t * buflist)386 arn_buflist_cleanup(list_t *buflist)
387 {
388 struct ath_buf *bf;
389
390 if (!buflist)
391 return;
392
393 bf = list_head(buflist);
394 while (bf != NULL) {
395 if (bf->bf_m != NULL) {
396 freemsg(bf->bf_m);
397 bf->bf_m = NULL;
398 }
399 /* Free DMA buffer */
400 arn_free_dma_mem(&bf->bf_dma);
401 if (bf->bf_in != NULL) {
402 ieee80211_free_node(bf->bf_in);
403 bf->bf_in = NULL;
404 }
405 list_remove(buflist, bf);
406 bf = list_head(buflist);
407 }
408 list_destroy(buflist);
409 }
410
411 static void
arn_desc_free(struct arn_softc * sc)412 arn_desc_free(struct arn_softc *sc)
413 {
414 arn_buflist_cleanup(&sc->sc_txbuf_list);
415 arn_buflist_cleanup(&sc->sc_rxbuf_list);
416 #ifdef ARN_IBSS
417 arn_buflist_cleanup(&sc->sc_bcbuf_list);
418 #endif
419
420 /* Free descriptor DMA buffer */
421 arn_free_dma_mem(&sc->sc_desc_dma);
422
423 kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
424 sc->sc_vbufptr = NULL;
425 }
426
427 static int
arn_desc_alloc(dev_info_t * devinfo,struct arn_softc * sc)428 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
429 {
430 int err;
431 size_t size;
432 struct ath_desc *ds;
433 struct ath_buf *bf;
434
435 #ifdef ARN_IBSS
436 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
437 #else
438 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
439 #endif
440
441 err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
442 &arn_desc_accattr, DDI_DMA_CONSISTENT,
443 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
444
445 /* virtual address of the first descriptor */
446 sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
447
448 ds = sc->sc_desc;
449 ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
450 "%p (%d) -> %p\n",
451 sc->sc_desc, sc->sc_desc_dma.alength,
452 sc->sc_desc_dma.cookie.dmac_address));
453
454 /* allocate data structures to describe TX/RX DMA buffers */
455 #ifdef ARN_IBSS
456 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
457 ATH_BCBUF);
458 #else
459 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
460 #endif
461 bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
462 sc->sc_vbufptr = bf;
463
464 /* DMA buffer size for each TX/RX packet */
465 #ifdef ARN_TX_AGGREGRATION
466 sc->tx_dmabuf_size =
467 roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
468 min(sc->sc_cachelsz, (uint16_t)64));
469 #else
470 sc->tx_dmabuf_size =
471 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
472 #endif
473 sc->rx_dmabuf_size =
474 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
475
476 /* create RX buffer list */
477 err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
478 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
479 if (err != DDI_SUCCESS) {
480 arn_desc_free(sc);
481 return (err);
482 }
483
484 /* create TX buffer list */
485 err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
486 ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
487 if (err != DDI_SUCCESS) {
488 arn_desc_free(sc);
489 return (err);
490 }
491
492 /* create beacon buffer list */
493 #ifdef ARN_IBSS
494 err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
495 ATH_BCBUF, DDI_DMA_STREAMING);
496 if (err != DDI_SUCCESS) {
497 arn_desc_free(sc);
498 return (err);
499 }
500 #endif
501
502 return (DDI_SUCCESS);
503 }
504
505 static void
arn_setcurmode(struct arn_softc * sc,enum wireless_mode mode)506 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
507 {
508 struct ath_rate_table *rt;
509 int i;
510
511 for (i = 0; i < sizeof (sc->asc_rixmap); i++)
512 sc->asc_rixmap[i] = 0xff;
513
514 rt = sc->hw_rate_table[mode];
515 ASSERT(rt != NULL);
516
517 for (i = 0; i < rt->rate_cnt; i++)
518 sc->asc_rixmap[rt->info[i].dot11rate &
519 IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
520
521 sc->sc_currates = rt;
522 sc->sc_curmode = mode;
523
524 /*
525 * All protection frames are transmited at 2Mb/s for
526 * 11g, otherwise at 1Mb/s.
527 * XXX select protection rate index from rate table.
528 */
529 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
530 }
531
532 static enum wireless_mode
arn_chan2mode(struct ath9k_channel * chan)533 arn_chan2mode(struct ath9k_channel *chan)
534 {
535 if (chan->chanmode == CHANNEL_A)
536 return (ATH9K_MODE_11A);
537 else if (chan->chanmode == CHANNEL_G)
538 return (ATH9K_MODE_11G);
539 else if (chan->chanmode == CHANNEL_B)
540 return (ATH9K_MODE_11B);
541 else if (chan->chanmode == CHANNEL_A_HT20)
542 return (ATH9K_MODE_11NA_HT20);
543 else if (chan->chanmode == CHANNEL_G_HT20)
544 return (ATH9K_MODE_11NG_HT20);
545 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
546 return (ATH9K_MODE_11NA_HT40PLUS);
547 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
548 return (ATH9K_MODE_11NA_HT40MINUS);
549 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
550 return (ATH9K_MODE_11NG_HT40PLUS);
551 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
552 return (ATH9K_MODE_11NG_HT40MINUS);
553
554 return (ATH9K_MODE_11B);
555 }
556
557 static void
arn_update_txpow(struct arn_softc * sc)558 arn_update_txpow(struct arn_softc *sc)
559 {
560 struct ath_hal *ah = sc->sc_ah;
561 uint32_t txpow;
562
563 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
564 (void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
565 /* read back in case value is clamped */
566 (void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
567 sc->sc_curtxpow = (uint32_t)txpow;
568 }
569 }
570
571 uint8_t
parse_mpdudensity(uint8_t mpdudensity)572 parse_mpdudensity(uint8_t mpdudensity)
573 {
574 /*
575 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
576 * 0 for no restriction
577 * 1 for 1/4 us
578 * 2 for 1/2 us
579 * 3 for 1 us
580 * 4 for 2 us
581 * 5 for 4 us
582 * 6 for 8 us
583 * 7 for 16 us
584 */
585 switch (mpdudensity) {
586 case 0:
587 return (0);
588 case 1:
589 case 2:
590 case 3:
591 /*
592 * Our lower layer calculations limit our
593 * precision to 1 microsecond
594 */
595 return (1);
596 case 4:
597 return (2);
598 case 5:
599 return (4);
600 case 6:
601 return (8);
602 case 7:
603 return (16);
604 default:
605 return (0);
606 }
607 }
608
609 static void
arn_setup_rates(struct arn_softc * sc,uint32_t mode)610 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
611 {
612 int i, maxrates;
613 struct ath_rate_table *rate_table = NULL;
614 struct ieee80211_rateset *rateset;
615 ieee80211com_t *ic = (ieee80211com_t *)sc;
616
617 /* rate_table = arn_get_ratetable(sc, mode); */
618 switch (mode) {
619 case IEEE80211_MODE_11A:
620 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
621 break;
622 case IEEE80211_MODE_11B:
623 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
624 break;
625 case IEEE80211_MODE_11G:
626 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
627 break;
628 #ifdef ARN_11N
629 case IEEE80211_MODE_11NA_HT20:
630 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
631 break;
632 case IEEE80211_MODE_11NG_HT20:
633 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
634 break;
635 case IEEE80211_MODE_11NA_HT40PLUS:
636 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
637 break;
638 case IEEE80211_MODE_11NA_HT40MINUS:
639 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
640 break;
641 case IEEE80211_MODE_11NG_HT40PLUS:
642 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
643 break;
644 case IEEE80211_MODE_11NG_HT40MINUS:
645 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
646 break;
647 #endif
648 default:
649 ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
650 "invalid mode %u\n", mode));
651 break;
652 }
653 if (rate_table == NULL)
654 return;
655 if (rate_table->rate_cnt > ATH_RATE_MAX) {
656 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
657 "rate table too small (%u > %u)\n",
658 rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
659 maxrates = ATH_RATE_MAX;
660 } else
661 maxrates = rate_table->rate_cnt;
662
663 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
664 "maxrates is %d\n", maxrates));
665
666 rateset = &ic->ic_sup_rates[mode];
667 for (i = 0; i < maxrates; i++) {
668 rateset->ir_rates[i] = rate_table->info[i].dot11rate;
669 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
670 "%d\n", rate_table->info[i].dot11rate));
671 }
672 rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
673 }
674
675 static int
arn_setup_channels(struct arn_softc * sc)676 arn_setup_channels(struct arn_softc *sc)
677 {
678 struct ath_hal *ah = sc->sc_ah;
679 ieee80211com_t *ic = (ieee80211com_t *)sc;
680 int nchan, i, index;
681 uint8_t regclassids[ATH_REGCLASSIDS_MAX];
682 uint32_t nregclass = 0;
683 struct ath9k_channel *c;
684
685 /* Fill in ah->ah_channels */
686 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
687 regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
688 B_FALSE, 1)) {
689 uint32_t rd = ah->ah_currentRD;
690 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
691 "unable to collect channel list; "
692 "regdomain likely %u country code %u\n",
693 rd, CTRY_DEFAULT));
694 return (EINVAL);
695 }
696
697 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
698 "number of channel is %d\n", nchan));
699
700 for (i = 0; i < nchan; i++) {
701 c = &ah->ah_channels[i];
702 uint32_t flags;
703 index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
704
705 if (index > IEEE80211_CHAN_MAX) {
706 ARN_DBG((ARN_DBG_CHANNEL,
707 "arn: arn_setup_channels(): "
708 "bad hal channel %d (%u/%x) ignored\n",
709 index, c->channel, c->channelFlags));
710 continue;
711 }
712 /* NB: flags are known to be compatible */
713 if (index < 0) {
714 /*
715 * can't handle frequency <2400MHz (negative
716 * channels) right now
717 */
718 ARN_DBG((ARN_DBG_CHANNEL,
719 "arn: arn_setup_channels(): "
720 "hal channel %d (%u/%x) "
721 "cannot be handled, ignored\n",
722 index, c->channel, c->channelFlags));
723 continue;
724 }
725
726 /*
727 * Calculate net80211 flags; most are compatible
728 * but some need massaging. Note the static turbo
729 * conversion can be removed once net80211 is updated
730 * to understand static vs. dynamic turbo.
731 */
732
733 flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
734
735 if (ic->ic_sup_channels[index].ich_freq == 0) {
736 ic->ic_sup_channels[index].ich_freq = c->channel;
737 ic->ic_sup_channels[index].ich_flags = flags;
738 } else {
739 /* channels overlap; e.g. 11g and 11b */
740 ic->ic_sup_channels[index].ich_flags |= flags;
741 }
742 if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
743 sc->sc_have11g = 1;
744 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
745 IEEE80211_C_SHSLOT; /* short slot time */
746 }
747 }
748
749 return (0);
750 }
751
752 uint32_t
arn_chan2flags(ieee80211com_t * isc,struct ieee80211_channel * chan)753 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
754 {
755 uint32_t channel_mode;
756 switch (ieee80211_chan2mode(isc, chan)) {
757 case IEEE80211_MODE_11NA:
758 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
759 channel_mode = CHANNEL_A_HT40PLUS;
760 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
761 channel_mode = CHANNEL_A_HT40MINUS;
762 else
763 channel_mode = CHANNEL_A_HT20;
764 break;
765 case IEEE80211_MODE_11NG:
766 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
767 channel_mode = CHANNEL_G_HT40PLUS;
768 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
769 channel_mode = CHANNEL_G_HT40MINUS;
770 else
771 channel_mode = CHANNEL_G_HT20;
772 break;
773 case IEEE80211_MODE_TURBO_G:
774 case IEEE80211_MODE_STURBO_A:
775 case IEEE80211_MODE_TURBO_A:
776 channel_mode = 0;
777 break;
778 case IEEE80211_MODE_11A:
779 channel_mode = CHANNEL_A;
780 break;
781 case IEEE80211_MODE_11G:
782 channel_mode = CHANNEL_B;
783 break;
784 case IEEE80211_MODE_11B:
785 channel_mode = CHANNEL_G;
786 break;
787 case IEEE80211_MODE_FH:
788 channel_mode = 0;
789 break;
790 default:
791 break;
792 }
793
794 return (channel_mode);
795 }
796
797 /*
798 * Update internal state after a channel change.
799 */
800 void
arn_chan_change(struct arn_softc * sc,struct ieee80211_channel * chan)801 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
802 {
803 struct ieee80211com *ic = &sc->sc_isc;
804 enum ieee80211_phymode mode;
805 enum wireless_mode wlmode;
806
807 /*
808 * Change channels and update the h/w rate map
809 * if we're switching; e.g. 11a to 11b/g.
810 */
811 mode = ieee80211_chan2mode(ic, chan);
812 switch (mode) {
813 case IEEE80211_MODE_11A:
814 wlmode = ATH9K_MODE_11A;
815 break;
816 case IEEE80211_MODE_11B:
817 wlmode = ATH9K_MODE_11B;
818 break;
819 case IEEE80211_MODE_11G:
820 wlmode = ATH9K_MODE_11B;
821 break;
822 default:
823 break;
824 }
825 if (wlmode != sc->sc_curmode)
826 arn_setcurmode(sc, wlmode);
827
828 }
829
830 /*
831 * Set/change channels. If the channel is really being changed, it's done
832 * by reseting the chip. To accomplish this we must first cleanup any pending
833 * DMA, then restart stuff.
834 */
835 static int
arn_set_channel(struct arn_softc * sc,struct ath9k_channel * hchan)836 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
837 {
838 struct ath_hal *ah = sc->sc_ah;
839 ieee80211com_t *ic = &sc->sc_isc;
840 boolean_t fastcc = B_TRUE;
841 boolean_t stopped;
842 struct ieee80211_channel chan;
843 enum wireless_mode curmode;
844
845 if (sc->sc_flags & SC_OP_INVALID)
846 return (EIO);
847
848 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
849 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
850 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
851 (sc->sc_flags & SC_OP_FULL_RESET)) {
852 int status;
853
854 /*
855 * This is only performed if the channel settings have
856 * actually changed.
857 *
858 * To switch channels clear any pending DMA operations;
859 * wait long enough for the RX fifo to drain, reset the
860 * hardware at the new frequency, and then re-enable
861 * the relevant bits of the h/w.
862 */
863 (void) ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
864 arn_draintxq(sc, B_FALSE); /* clear pending tx frames */
865 stopped = arn_stoprecv(sc); /* turn off frame recv */
866
867 /*
868 * XXX: do not flush receive queue here. We don't want
869 * to flush data frames already in queue because of
870 * changing channel.
871 */
872
873 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
874 fastcc = B_FALSE;
875
876 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
877 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
878 sc->sc_ah->ah_curchan->channel,
879 hchan->channel, hchan->channelFlags, sc->tx_chan_width));
880
881 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
882 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
883 sc->sc_ht_extprotspacing, fastcc, &status)) {
884 ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
885 "unable to reset channel %u (%uMhz) "
886 "flags 0x%x hal status %u\n",
887 ath9k_hw_mhz2ieee(ah, hchan->channel,
888 hchan->channelFlags),
889 hchan->channel, hchan->channelFlags, status));
890 return (EIO);
891 }
892
893 sc->sc_curchan = *hchan;
894
895 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
896 sc->sc_flags &= ~SC_OP_FULL_RESET;
897
898 if (arn_startrecv(sc) != 0) {
899 arn_problem("arn: arn_set_channel(): "
900 "unable to restart recv logic\n");
901 return (EIO);
902 }
903
904 chan.ich_freq = hchan->channel;
905 chan.ich_flags = hchan->channelFlags;
906 ic->ic_ibss_chan = &chan;
907
908 /*
909 * Change channels and update the h/w rate map
910 * if we're switching; e.g. 11a to 11b/g.
911 */
912 curmode = arn_chan2mode(hchan);
913 if (curmode != sc->sc_curmode)
914 arn_setcurmode(sc, arn_chan2mode(hchan));
915
916 arn_update_txpow(sc);
917
918 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
919 }
920
921 return (0);
922 }
923
924 /*
925 * This routine performs the periodic noise floor calibration function
926 * that is used to adjust and optimize the chip performance. This
927 * takes environmental changes (location, temperature) into account.
928 * When the task is complete, it reschedules itself depending on the
929 * appropriate interval that was calculated.
930 */
931 static void
arn_ani_calibrate(void * arg)932 arn_ani_calibrate(void *arg)
933 {
934 ieee80211com_t *ic = (ieee80211com_t *)arg;
935 struct arn_softc *sc = (struct arn_softc *)ic;
936 struct ath_hal *ah = sc->sc_ah;
937 boolean_t longcal = B_FALSE;
938 boolean_t shortcal = B_FALSE;
939 boolean_t aniflag = B_FALSE;
940 unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
941 uint32_t cal_interval;
942
943 /*
944 * don't calibrate when we're scanning.
945 * we are most likely not on our home channel.
946 */
947 if (ic->ic_state != IEEE80211_S_RUN)
948 goto settimer;
949
950 /* Long calibration runs independently of short calibration. */
951 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
952 longcal = B_TRUE;
953 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
954 "%s: longcal @%lu\n", __func__, drv_hztousec));
955 sc->sc_ani.sc_longcal_timer = timestamp;
956 }
957
958 /* Short calibration applies only while sc_caldone is FALSE */
959 if (!sc->sc_ani.sc_caldone) {
960 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
961 ATH_SHORT_CALINTERVAL) {
962 shortcal = B_TRUE;
963 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
964 "%s: shortcal @%lu\n",
965 __func__, drv_hztousec));
966 sc->sc_ani.sc_shortcal_timer = timestamp;
967 sc->sc_ani.sc_resetcal_timer = timestamp;
968 }
969 } else {
970 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
971 ATH_RESTART_CALINTERVAL) {
972 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
973 &sc->sc_ani.sc_caldone);
974 if (sc->sc_ani.sc_caldone)
975 sc->sc_ani.sc_resetcal_timer = timestamp;
976 }
977 }
978
979 /* Verify whether we must check ANI */
980 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
981 ATH_ANI_POLLINTERVAL) {
982 aniflag = B_TRUE;
983 sc->sc_ani.sc_checkani_timer = timestamp;
984 }
985
986 /* Skip all processing if there's nothing to do. */
987 if (longcal || shortcal || aniflag) {
988 /* Call ANI routine if necessary */
989 if (aniflag)
990 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
991 ah->ah_curchan);
992
993 /* Perform calibration if necessary */
994 if (longcal || shortcal) {
995 boolean_t iscaldone = B_FALSE;
996
997 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
998 sc->sc_rx_chainmask, longcal, &iscaldone)) {
999 if (longcal)
1000 sc->sc_ani.sc_noise_floor =
1001 ath9k_hw_getchan_noise(ah,
1002 ah->ah_curchan);
1003
1004 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1005 "%s: calibrate chan %u/%x nf: %d\n",
1006 __func__,
1007 ah->ah_curchan->channel,
1008 ah->ah_curchan->channelFlags,
1009 sc->sc_ani.sc_noise_floor));
1010 } else {
1011 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1012 "%s: calibrate chan %u/%x failed\n",
1013 __func__,
1014 ah->ah_curchan->channel,
1015 ah->ah_curchan->channelFlags));
1016 }
1017 sc->sc_ani.sc_caldone = iscaldone;
1018 }
1019 }
1020
1021 settimer:
1022 /*
1023 * Set timer interval based on previous results.
1024 * The interval must be the shortest necessary to satisfy ANI,
1025 * short calibration and long calibration.
1026 */
1027 cal_interval = ATH_LONG_CALINTERVAL;
1028 if (sc->sc_ah->ah_config.enable_ani)
1029 cal_interval =
1030 min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1031
1032 if (!sc->sc_ani.sc_caldone)
1033 cal_interval = min(cal_interval,
1034 (uint32_t)ATH_SHORT_CALINTERVAL);
1035
1036 sc->sc_scan_timer = 0;
1037 sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1038 drv_usectohz(cal_interval * 1000));
1039 }
1040
1041 static void
arn_stop_caltimer(struct arn_softc * sc)1042 arn_stop_caltimer(struct arn_softc *sc)
1043 {
1044 timeout_id_t tmp_id = 0;
1045
1046 while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1047 tmp_id = sc->sc_cal_timer;
1048 (void) untimeout(tmp_id);
1049 }
1050 sc->sc_cal_timer = 0;
1051 }
1052
1053 static uint_t
arn_isr(caddr_t arg)1054 arn_isr(caddr_t arg)
1055 {
1056 /* LINTED E_BAD_PTR_CAST_ALIGN */
1057 struct arn_softc *sc = (struct arn_softc *)arg;
1058 struct ath_hal *ah = sc->sc_ah;
1059 enum ath9k_int status;
1060 ieee80211com_t *ic = (ieee80211com_t *)sc;
1061
1062 ARN_LOCK(sc);
1063
1064 if (sc->sc_flags & SC_OP_INVALID) {
1065 /*
1066 * The hardware is not ready/present, don't
1067 * touch anything. Note this can happen early
1068 * on if the IRQ is shared.
1069 */
1070 ARN_UNLOCK(sc);
1071 return (DDI_INTR_UNCLAIMED);
1072 }
1073 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
1074 ARN_UNLOCK(sc);
1075 return (DDI_INTR_UNCLAIMED);
1076 }
1077
1078 /*
1079 * Figure out the reason(s) for the interrupt. Note
1080 * that the hal returns a pseudo-ISR that may include
1081 * bits we haven't explicitly enabled so we mask the
1082 * value to insure we only process bits we requested.
1083 */
1084 (void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1085
1086 status &= sc->sc_imask; /* discard unasked-for bits */
1087
1088 /*
1089 * If there are no status bits set, then this interrupt was not
1090 * for me (should have been caught above).
1091 */
1092 if (!status) {
1093 ARN_UNLOCK(sc);
1094 return (DDI_INTR_UNCLAIMED);
1095 }
1096
1097 sc->sc_intrstatus = status;
1098
1099 if (status & ATH9K_INT_FATAL) {
1100 /* need a chip reset */
1101 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1102 "ATH9K_INT_FATAL\n"));
1103 goto reset;
1104 } else if (status & ATH9K_INT_RXORN) {
1105 /* need a chip reset */
1106 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1107 "ATH9K_INT_RXORN\n"));
1108 goto reset;
1109 } else {
1110 if (status & ATH9K_INT_RXEOL) {
1111 /*
1112 * NB: the hardware should re-read the link when
1113 * RXE bit is written, but it doesn't work
1114 * at least on older hardware revs.
1115 */
1116 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1117 "ATH9K_INT_RXEOL\n"));
1118 sc->sc_rxlink = NULL;
1119 }
1120 if (status & ATH9K_INT_TXURN) {
1121 /* bump tx trigger level */
1122 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1123 "ATH9K_INT_TXURN\n"));
1124 (void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1125 }
1126 /* XXX: optimize this */
1127 if (status & ATH9K_INT_RX) {
1128 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1129 "ATH9K_INT_RX\n"));
1130 sc->sc_rx_pend = 1;
1131 ddi_trigger_softintr(sc->sc_softint_id);
1132 }
1133 if (status & ATH9K_INT_TX) {
1134 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1135 "ATH9K_INT_TX\n"));
1136 if (ddi_taskq_dispatch(sc->sc_tq,
1137 arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1138 DDI_SUCCESS) {
1139 arn_problem("arn: arn_isr(): "
1140 "No memory for tx taskq\n");
1141 }
1142 }
1143 #ifdef ARN_ATH9K_INT_MIB
1144 if (status & ATH9K_INT_MIB) {
1145 /*
1146 * Disable interrupts until we service the MIB
1147 * interrupt; otherwise it will continue to
1148 * fire.
1149 */
1150 (void) ath9k_hw_set_interrupts(ah, 0);
1151 /*
1152 * Let the hal handle the event. We assume
1153 * it will clear whatever condition caused
1154 * the interrupt.
1155 */
1156 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1157 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1158 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1159 "ATH9K_INT_MIB\n"));
1160 }
1161 #endif
1162
1163 #ifdef ARN_ATH9K_INT_TIM_TIMER
1164 if (status & ATH9K_INT_TIM_TIMER) {
1165 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1166 "ATH9K_INT_TIM_TIMER\n"));
1167 if (!(ah->ah_caps.hw_caps &
1168 ATH9K_HW_CAP_AUTOSLEEP)) {
1169 /*
1170 * Clear RxAbort bit so that we can
1171 * receive frames
1172 */
1173 ath9k_hw_setrxabort(ah, 0);
1174 goto reset;
1175 }
1176 }
1177 #endif
1178
1179 if (status & ATH9K_INT_BMISS) {
1180 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1181 "ATH9K_INT_BMISS\n"));
1182 #ifdef ARN_HW_BEACON_MISS_HANDLE
1183 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1184 "handle beacon mmiss by H/W mechanism\n"));
1185 if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1186 sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1187 arn_problem("arn: arn_isr(): "
1188 "No memory available for bmiss taskq\n");
1189 }
1190 #else
1191 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1192 "handle beacon mmiss by S/W mechanism\n"));
1193 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1194 }
1195
1196 ARN_UNLOCK(sc);
1197
1198 #ifdef ARN_ATH9K_INT_CST
1199 /* carrier sense timeout */
1200 if (status & ATH9K_INT_CST) {
1201 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1202 "ATH9K_INT_CST\n"));
1203 return (DDI_INTR_CLAIMED);
1204 }
1205 #endif
1206
1207 if (status & ATH9K_INT_SWBA) {
1208 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1209 "ATH9K_INT_SWBA\n"));
1210 /* This will occur only in Host-AP or Ad-Hoc mode */
1211 return (DDI_INTR_CLAIMED);
1212 }
1213 }
1214
1215 return (DDI_INTR_CLAIMED);
1216 reset:
1217 ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1218 (void) arn_reset(ic);
1219 ARN_UNLOCK(sc);
1220 return (DDI_INTR_CLAIMED);
1221 }
1222
1223 static int
arn_get_channel(struct arn_softc * sc,struct ieee80211_channel * chan)1224 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1225 {
1226 int i;
1227
1228 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1229 if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1230 return (i);
1231 }
1232
1233 return (-1);
1234 }
1235
1236 int
arn_reset(ieee80211com_t * ic)1237 arn_reset(ieee80211com_t *ic)
1238 {
1239 struct arn_softc *sc = (struct arn_softc *)ic;
1240 struct ath_hal *ah = sc->sc_ah;
1241 int status;
1242 int error = 0;
1243
1244 (void) ath9k_hw_set_interrupts(ah, 0);
1245 arn_draintxq(sc, 0);
1246 (void) arn_stoprecv(sc);
1247
1248 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1249 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1250 sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1251 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1252 "unable to reset hardware; hal status %u\n", status));
1253 error = EIO;
1254 }
1255
1256 if (arn_startrecv(sc) != 0)
1257 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1258 "unable to start recv logic\n"));
1259
1260 /*
1261 * We may be doing a reset in response to a request
1262 * that changes the channel so update any state that
1263 * might change as a result.
1264 */
1265 arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1266
1267 arn_update_txpow(sc);
1268
1269 if (sc->sc_flags & SC_OP_BEACONS)
1270 arn_beacon_config(sc); /* restart beacons */
1271
1272 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1273
1274 return (error);
1275 }
1276
1277 int
arn_get_hal_qnum(uint16_t queue,struct arn_softc * sc)1278 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1279 {
1280 int qnum;
1281
1282 switch (queue) {
1283 case WME_AC_VO:
1284 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1285 break;
1286 case WME_AC_VI:
1287 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1288 break;
1289 case WME_AC_BE:
1290 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1291 break;
1292 case WME_AC_BK:
1293 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1294 break;
1295 default:
1296 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1297 break;
1298 }
1299
1300 return (qnum);
1301 }
1302
1303 static struct {
1304 uint32_t version;
1305 const char *name;
1306 } ath_mac_bb_names[] = {
1307 { AR_SREV_VERSION_5416_PCI, "5416" },
1308 { AR_SREV_VERSION_5416_PCIE, "5418" },
1309 { AR_SREV_VERSION_9100, "9100" },
1310 { AR_SREV_VERSION_9160, "9160" },
1311 { AR_SREV_VERSION_9280, "9280" },
1312 { AR_SREV_VERSION_9285, "9285" }
1313 };
1314
1315 static struct {
1316 uint16_t version;
1317 const char *name;
1318 } ath_rf_names[] = {
1319 { 0, "5133" },
1320 { AR_RAD5133_SREV_MAJOR, "5133" },
1321 { AR_RAD5122_SREV_MAJOR, "5122" },
1322 { AR_RAD2133_SREV_MAJOR, "2133" },
1323 { AR_RAD2122_SREV_MAJOR, "2122" }
1324 };
1325
1326 /*
1327 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1328 */
1329
1330 static const char *
arn_mac_bb_name(uint32_t mac_bb_version)1331 arn_mac_bb_name(uint32_t mac_bb_version)
1332 {
1333 int i;
1334
1335 for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1336 if (ath_mac_bb_names[i].version == mac_bb_version) {
1337 return (ath_mac_bb_names[i].name);
1338 }
1339 }
1340
1341 return ("????");
1342 }
1343
1344 /*
1345 * Return the RF name. "????" is returned if the RF is unknown.
1346 */
1347
1348 static const char *
arn_rf_name(uint16_t rf_version)1349 arn_rf_name(uint16_t rf_version)
1350 {
1351 int i;
1352
1353 for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1354 if (ath_rf_names[i].version == rf_version) {
1355 return (ath_rf_names[i].name);
1356 }
1357 }
1358
1359 return ("????");
1360 }
1361
1362 static void
arn_next_scan(void * arg)1363 arn_next_scan(void *arg)
1364 {
1365 ieee80211com_t *ic = arg;
1366 struct arn_softc *sc = (struct arn_softc *)ic;
1367
1368 sc->sc_scan_timer = 0;
1369 if (ic->ic_state == IEEE80211_S_SCAN) {
1370 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1371 drv_usectohz(arn_dwelltime * 1000));
1372 ieee80211_next_scan(ic);
1373 }
1374 }
1375
1376 static void
arn_stop_scantimer(struct arn_softc * sc)1377 arn_stop_scantimer(struct arn_softc *sc)
1378 {
1379 timeout_id_t tmp_id = 0;
1380
1381 while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1382 tmp_id = sc->sc_scan_timer;
1383 (void) untimeout(tmp_id);
1384 }
1385 sc->sc_scan_timer = 0;
1386 }
1387
1388 static int32_t
arn_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1389 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1390 {
1391 struct arn_softc *sc = (struct arn_softc *)ic;
1392 struct ath_hal *ah = sc->sc_ah;
1393 struct ieee80211_node *in;
1394 int32_t i, error;
1395 uint8_t *bssid;
1396 uint32_t rfilt;
1397 enum ieee80211_state ostate;
1398 struct ath9k_channel *channel;
1399 int pos;
1400
1401 /* Should set up & init LED here */
1402
1403 if (sc->sc_flags & SC_OP_INVALID)
1404 return (0);
1405
1406 ostate = ic->ic_state;
1407 ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1408 "%x -> %x!\n", ostate, nstate));
1409
1410 ARN_LOCK(sc);
1411
1412 if (nstate != IEEE80211_S_SCAN)
1413 arn_stop_scantimer(sc);
1414 if (nstate != IEEE80211_S_RUN)
1415 arn_stop_caltimer(sc);
1416
1417 /* Should set LED here */
1418
1419 if (nstate == IEEE80211_S_INIT) {
1420 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1421 /*
1422 * Disable interrupts.
1423 */
1424 (void) ath9k_hw_set_interrupts
1425 (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1426
1427 #ifdef ARN_IBSS
1428 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1429 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1430 arn_beacon_return(sc);
1431 }
1432 #endif
1433 ARN_UNLOCK(sc);
1434 ieee80211_stop_watchdog(ic);
1435 goto done;
1436 }
1437 in = ic->ic_bss;
1438
1439 pos = arn_get_channel(sc, ic->ic_curchan);
1440
1441 if (pos == -1) {
1442 ARN_DBG((ARN_DBG_FATAL, "arn: "
1443 "%s: Invalid channel\n", __func__));
1444 error = EINVAL;
1445 ARN_UNLOCK(sc);
1446 goto bad;
1447 }
1448
1449 if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1450 arn_update_chainmask(sc);
1451 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1452 } else
1453 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1454
1455 sc->sc_ah->ah_channels[pos].chanmode =
1456 arn_chan2flags(ic, ic->ic_curchan);
1457 channel = &sc->sc_ah->ah_channels[pos];
1458 if (channel == NULL) {
1459 arn_problem("arn_newstate(): channel == NULL");
1460 ARN_UNLOCK(sc);
1461 goto bad;
1462 }
1463 error = arn_set_channel(sc, channel);
1464 if (error != 0) {
1465 if (nstate != IEEE80211_S_SCAN) {
1466 ARN_UNLOCK(sc);
1467 ieee80211_reset_chan(ic);
1468 goto bad;
1469 }
1470 }
1471
1472 /*
1473 * Get the receive filter according to the
1474 * operating mode and state
1475 */
1476 rfilt = arn_calcrxfilter(sc);
1477
1478 if (nstate == IEEE80211_S_SCAN)
1479 bssid = ic->ic_macaddr;
1480 else
1481 bssid = in->in_bssid;
1482
1483 ath9k_hw_setrxfilter(ah, rfilt);
1484
1485 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1486 ath9k_hw_write_associd(ah, bssid, in->in_associd);
1487 else
1488 ath9k_hw_write_associd(ah, bssid, 0);
1489
1490 /* Check for WLAN_CAPABILITY_PRIVACY ? */
1491 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1492 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1493 if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1494 (void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1495 bssid);
1496 }
1497 }
1498
1499 if (nstate == IEEE80211_S_RUN) {
1500 switch (ic->ic_opmode) {
1501 #ifdef ARN_IBSS
1502 case IEEE80211_M_IBSS:
1503 /*
1504 * Allocate and setup the beacon frame.
1505 * Stop any previous beacon DMA.
1506 */
1507 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1508 arn_beacon_return(sc);
1509 error = arn_beacon_alloc(sc, in);
1510 if (error != 0) {
1511 ARN_UNLOCK(sc);
1512 goto bad;
1513 }
1514 /*
1515 * If joining an adhoc network defer beacon timer
1516 * configuration to the next beacon frame so we
1517 * have a current TSF to use. Otherwise we're
1518 * starting an ibss/bss so there's no need to delay.
1519 */
1520 if (ic->ic_opmode == IEEE80211_M_IBSS &&
1521 ic->ic_bss->in_tstamp.tsf != 0) {
1522 sc->sc_bsync = 1;
1523 } else {
1524 arn_beacon_config(sc);
1525 }
1526 break;
1527 #endif /* ARN_IBSS */
1528 case IEEE80211_M_STA:
1529 if (ostate != IEEE80211_S_RUN) {
1530 /*
1531 * Defer beacon timer configuration to the next
1532 * beacon frame so we have a current TSF to use.
1533 * Any TSF collected when scanning is likely old
1534 */
1535 #ifdef ARN_IBSS
1536 sc->sc_bsync = 1;
1537 #else
1538 /* Configure the beacon and sleep timers. */
1539 arn_beacon_config(sc);
1540 /* Reset rssi stats */
1541 sc->sc_halstats.ns_avgbrssi =
1542 ATH_RSSI_DUMMY_MARKER;
1543 sc->sc_halstats.ns_avgrssi =
1544 ATH_RSSI_DUMMY_MARKER;
1545 sc->sc_halstats.ns_avgtxrssi =
1546 ATH_RSSI_DUMMY_MARKER;
1547 sc->sc_halstats.ns_avgtxrate =
1548 ATH_RATE_DUMMY_MARKER;
1549 /* end */
1550
1551 #endif /* ARN_IBSS */
1552 }
1553 break;
1554 default:
1555 break;
1556 }
1557 } else {
1558 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1559 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1560 }
1561
1562 /*
1563 * Reset the rate control state.
1564 */
1565 arn_rate_ctl_reset(sc, nstate);
1566
1567 ARN_UNLOCK(sc);
1568 done:
1569 /*
1570 * Invoke the parent method to complete the work.
1571 */
1572 error = sc->sc_newstate(ic, nstate, arg);
1573
1574 /*
1575 * Finally, start any timers.
1576 */
1577 if (nstate == IEEE80211_S_RUN) {
1578 ieee80211_start_watchdog(ic, 1);
1579 ASSERT(sc->sc_cal_timer == 0);
1580 sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1581 drv_usectohz(100 * 1000));
1582 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1583 /* start ap/neighbor scan timer */
1584 /* ASSERT(sc->sc_scan_timer == 0); */
1585 if (sc->sc_scan_timer != 0) {
1586 (void) untimeout(sc->sc_scan_timer);
1587 sc->sc_scan_timer = 0;
1588 }
1589 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1590 drv_usectohz(arn_dwelltime * 1000));
1591 }
1592
1593 bad:
1594 return (error);
1595 }
1596
1597 static void
arn_watchdog(void * arg)1598 arn_watchdog(void *arg)
1599 {
1600 struct arn_softc *sc = arg;
1601 ieee80211com_t *ic = &sc->sc_isc;
1602 int ntimer = 0;
1603
1604 ARN_LOCK(sc);
1605 ic->ic_watchdog_timer = 0;
1606 if (sc->sc_flags & SC_OP_INVALID) {
1607 ARN_UNLOCK(sc);
1608 return;
1609 }
1610
1611 if (ic->ic_state == IEEE80211_S_RUN) {
1612 /*
1613 * Start the background rate control thread if we
1614 * are not configured to use a fixed xmit rate.
1615 */
1616 #ifdef ARN_LEGACY_RC
1617 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1618 sc->sc_stats.ast_rate_calls ++;
1619 if (ic->ic_opmode == IEEE80211_M_STA)
1620 arn_rate_ctl(ic, ic->ic_bss);
1621 else
1622 ieee80211_iterate_nodes(&ic->ic_sta,
1623 arn_rate_ctl, sc);
1624 }
1625 #endif /* ARN_LEGACY_RC */
1626
1627 #ifdef ARN_HW_BEACON_MISS_HANDLE
1628 /* nothing to do here */
1629 #else
1630 /* currently set 10 seconds as beacon miss threshold */
1631 if (ic->ic_beaconmiss++ > 100) {
1632 ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1633 "Beacon missed for 10 seconds, run"
1634 "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1635 ARN_UNLOCK(sc);
1636 (void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1637 return;
1638 }
1639 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1640
1641 ntimer = 1;
1642 }
1643 ARN_UNLOCK(sc);
1644
1645 ieee80211_watchdog(ic);
1646 if (ntimer != 0)
1647 ieee80211_start_watchdog(ic, ntimer);
1648 }
1649
1650 /* ARGSUSED */
1651 static struct ieee80211_node *
arn_node_alloc(ieee80211com_t * ic)1652 arn_node_alloc(ieee80211com_t *ic)
1653 {
1654 struct ath_node *an;
1655 #ifdef ARN_TX_AGGREGATION
1656 struct arn_softc *sc = (struct arn_softc *)ic;
1657 #endif
1658
1659 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1660
1661 /* legacy rate control */
1662 #ifdef ARN_LEGACY_RC
1663 arn_rate_update(sc, &an->an_node, 0);
1664 #endif
1665
1666 #ifdef ARN_TX_AGGREGATION
1667 if (sc->sc_flags & SC_OP_TXAGGR) {
1668 arn_tx_node_init(sc, an);
1669 }
1670 #endif /* ARN_TX_AGGREGATION */
1671
1672 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1673
1674 return ((an != NULL) ? &an->an_node : NULL);
1675 }
1676
1677 static void
arn_node_free(struct ieee80211_node * in)1678 arn_node_free(struct ieee80211_node *in)
1679 {
1680 ieee80211com_t *ic = in->in_ic;
1681 struct arn_softc *sc = (struct arn_softc *)ic;
1682 struct ath_buf *bf;
1683 struct ath_txq *txq;
1684 int32_t i;
1685
1686 #ifdef ARN_TX_AGGREGATION
1687 if (sc->sc_flags & SC_OP_TXAGGR)
1688 arn_tx_node_cleanup(sc, in);
1689 #endif /* TX_AGGREGATION */
1690
1691 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1692 if (ARN_TXQ_SETUP(sc, i)) {
1693 txq = &sc->sc_txq[i];
1694 mutex_enter(&txq->axq_lock);
1695 bf = list_head(&txq->axq_list);
1696 while (bf != NULL) {
1697 if (bf->bf_in == in) {
1698 bf->bf_in = NULL;
1699 }
1700 bf = list_next(&txq->axq_list, bf);
1701 }
1702 mutex_exit(&txq->axq_lock);
1703 }
1704 }
1705
1706 ic->ic_node_cleanup(in);
1707
1708 if (in->in_wpa_ie != NULL)
1709 ieee80211_free(in->in_wpa_ie);
1710
1711 if (in->in_wme_ie != NULL)
1712 ieee80211_free(in->in_wme_ie);
1713
1714 if (in->in_htcap_ie != NULL)
1715 ieee80211_free(in->in_htcap_ie);
1716
1717 kmem_free(in, sizeof (struct ath_node));
1718 }
1719
1720 /*
1721 * Allocate tx/rx key slots for TKIP. We allocate one slot for
1722 * each key. MIC is right after the decrypt/encrypt key.
1723 */
1724 static uint16_t
arn_key_alloc_pair(struct arn_softc * sc,ieee80211_keyix * txkeyix,ieee80211_keyix * rxkeyix)1725 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1726 ieee80211_keyix *rxkeyix)
1727 {
1728 uint16_t i, keyix;
1729
1730 ASSERT(!sc->sc_splitmic);
1731 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1732 uint8_t b = sc->sc_keymap[i];
1733 if (b == 0xff)
1734 continue;
1735 for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1736 keyix++, b >>= 1) {
1737 if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1738 /* full pair unavailable */
1739 continue;
1740 }
1741 set_bit(keyix, sc->sc_keymap);
1742 set_bit(keyix+64, sc->sc_keymap);
1743 ARN_DBG((ARN_DBG_KEYCACHE,
1744 "arn_key_alloc_pair(): key pair %u,%u\n",
1745 keyix, keyix+64));
1746 *txkeyix = *rxkeyix = keyix;
1747 return (1);
1748 }
1749 }
1750 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1751 " out of pair space\n"));
1752
1753 return (0);
1754 }
1755
1756 /*
1757 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1758 * each key, one for decrypt/encrypt and the other for the MIC.
1759 */
1760 static int
arn_key_alloc_2pair(struct arn_softc * sc,ieee80211_keyix * txkeyix,ieee80211_keyix * rxkeyix)1761 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1762 ieee80211_keyix *rxkeyix)
1763 {
1764 uint16_t i, keyix;
1765
1766 ASSERT(sc->sc_splitmic);
1767 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1768 uint8_t b = sc->sc_keymap[i];
1769 if (b != 0xff) {
1770 /*
1771 * One or more slots in this byte are free.
1772 */
1773 keyix = i*NBBY;
1774 while (b & 1) {
1775 again:
1776 keyix++;
1777 b >>= 1;
1778 }
1779 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1780 if (is_set(keyix+32, sc->sc_keymap) ||
1781 is_set(keyix+64, sc->sc_keymap) ||
1782 is_set(keyix+32+64, sc->sc_keymap)) {
1783 /* full pair unavailable */
1784 if (keyix == (i+1)*NBBY) {
1785 /* no slots were appropriate, advance */
1786 continue;
1787 }
1788 goto again;
1789 }
1790 set_bit(keyix, sc->sc_keymap);
1791 set_bit(keyix+64, sc->sc_keymap);
1792 set_bit(keyix+32, sc->sc_keymap);
1793 set_bit(keyix+32+64, sc->sc_keymap);
1794 ARN_DBG((ARN_DBG_KEYCACHE,
1795 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1796 keyix, keyix+64,
1797 keyix+32, keyix+32+64));
1798 *txkeyix = *rxkeyix = keyix;
1799 return (1);
1800 }
1801 }
1802 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1803 " out of pair space\n"));
1804
1805 return (0);
1806 }
1807 /*
1808 * Allocate a single key cache slot.
1809 */
1810 static int
arn_key_alloc_single(struct arn_softc * sc,ieee80211_keyix * txkeyix,ieee80211_keyix * rxkeyix)1811 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1812 ieee80211_keyix *rxkeyix)
1813 {
1814 uint16_t i, keyix;
1815
1816 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1817 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1818 uint8_t b = sc->sc_keymap[i];
1819
1820 if (b != 0xff) {
1821 /*
1822 * One or more slots are free.
1823 */
1824 keyix = i*NBBY;
1825 while (b & 1)
1826 keyix++, b >>= 1;
1827 set_bit(keyix, sc->sc_keymap);
1828 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1829 "key %u\n", keyix));
1830 *txkeyix = *rxkeyix = keyix;
1831 return (1);
1832 }
1833 }
1834 return (0);
1835 }
1836
1837 /*
1838 * Allocate one or more key cache slots for a unicast key. The
1839 * key itself is needed only to identify the cipher. For hardware
1840 * TKIP with split cipher+MIC keys we allocate two key cache slot
1841 * pairs so that we can setup separate TX and RX MIC keys. Note
1842 * that the MIC key for a TKIP key at slot i is assumed by the
1843 * hardware to be at slot i+64. This limits TKIP keys to the first
1844 * 64 entries.
1845 */
1846 /* ARGSUSED */
1847 int
arn_key_alloc(ieee80211com_t * ic,const struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)1848 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1849 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1850 {
1851 struct arn_softc *sc = (struct arn_softc *)ic;
1852
1853 /*
1854 * We allocate two pair for TKIP when using the h/w to do
1855 * the MIC. For everything else, including software crypto,
1856 * we allocate a single entry. Note that s/w crypto requires
1857 * a pass-through slot on the 5211 and 5212. The 5210 does
1858 * not support pass-through cache entries and we map all
1859 * those requests to slot 0.
1860 */
1861 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1862 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1863 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1864 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1865 if (sc->sc_splitmic)
1866 return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1867 else
1868 return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1869 } else {
1870 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1871 }
1872 }
1873
1874 /*
1875 * Delete an entry in the key cache allocated by ath_key_alloc.
1876 */
1877 int
arn_key_delete(ieee80211com_t * ic,const struct ieee80211_key * k)1878 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1879 {
1880 struct arn_softc *sc = (struct arn_softc *)ic;
1881 struct ath_hal *ah = sc->sc_ah;
1882 const struct ieee80211_cipher *cip = k->wk_cipher;
1883 ieee80211_keyix keyix = k->wk_keyix;
1884
1885 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1886 " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1887
1888 (void) ath9k_hw_keyreset(ah, keyix);
1889 /*
1890 * Handle split tx/rx keying required for TKIP with h/w MIC.
1891 */
1892 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1893 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1894 (void) ath9k_hw_keyreset(ah, keyix+32); /* RX key */
1895
1896 if (keyix >= IEEE80211_WEP_NKID) {
1897 /*
1898 * Don't touch keymap entries for global keys so
1899 * they are never considered for dynamic allocation.
1900 */
1901 clr_bit(keyix, sc->sc_keymap);
1902 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1903 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1904 /*
1905 * If splitmic is true +64 is TX key MIC,
1906 * else +64 is RX key + RX key MIC.
1907 */
1908 clr_bit(keyix+64, sc->sc_keymap);
1909 if (sc->sc_splitmic) {
1910 /* Rx key */
1911 clr_bit(keyix+32, sc->sc_keymap);
1912 /* RX key MIC */
1913 clr_bit(keyix+32+64, sc->sc_keymap);
1914 }
1915 }
1916 }
1917 return (1);
1918 }
1919
1920 /*
1921 * Set a TKIP key into the hardware. This handles the
1922 * potential distribution of key state to multiple key
1923 * cache slots for TKIP.
1924 */
1925 static int
arn_keyset_tkip(struct arn_softc * sc,const struct ieee80211_key * k,struct ath9k_keyval * hk,const uint8_t mac[IEEE80211_ADDR_LEN])1926 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1927 struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1928 {
1929 uint8_t *key_rxmic = NULL;
1930 uint8_t *key_txmic = NULL;
1931 uint8_t *key = (uint8_t *)&(k->wk_key[0]);
1932 struct ath_hal *ah = sc->sc_ah;
1933
1934 key_txmic = key + 16;
1935 key_rxmic = key + 24;
1936
1937 if (mac == NULL) {
1938 /* Group key installation */
1939 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1940 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1941 mac, B_FALSE));
1942 }
1943 if (!sc->sc_splitmic) {
1944 /*
1945 * data key goes at first index,
1946 * the hal handles the MIC keys at index+64.
1947 */
1948 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1949 (void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1950 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1951 mac, B_FALSE));
1952 }
1953 /*
1954 * TX key goes at first index, RX key at +32.
1955 * The hal handles the MIC keys at index+64.
1956 */
1957 (void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
1958 if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
1959 B_FALSE))) {
1960 /* Txmic entry failed. No need to proceed further */
1961 ARN_DBG((ARN_DBG_KEYCACHE,
1962 "%s Setting TX MIC Key Failed\n", __func__));
1963 return (0);
1964 }
1965
1966 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1967
1968 /* XXX delete tx key on failure? */
1969 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
1970
1971 }
1972
1973 int
arn_key_set(ieee80211com_t * ic,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1974 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1975 const uint8_t mac[IEEE80211_ADDR_LEN])
1976 {
1977 struct arn_softc *sc = (struct arn_softc *)ic;
1978 const struct ieee80211_cipher *cip = k->wk_cipher;
1979 struct ath9k_keyval hk;
1980
1981 /* cipher table */
1982 static const uint8_t ciphermap[] = {
1983 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
1984 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
1985 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
1986 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
1987 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
1988 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
1989 };
1990
1991 bzero(&hk, sizeof (hk));
1992
1993 /*
1994 * Software crypto uses a "clear key" so non-crypto
1995 * state kept in the key cache are maintainedd so that
1996 * rx frames have an entry to match.
1997 */
1998 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
1999 ASSERT(cip->ic_cipher < 6);
2000 hk.kv_type = ciphermap[cip->ic_cipher];
2001 hk.kv_len = k->wk_keylen;
2002 bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2003 } else {
2004 hk.kv_type = ATH9K_CIPHER_CLR;
2005 }
2006
2007 if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2008 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2009 return (arn_keyset_tkip(sc, k, &hk, mac));
2010 } else {
2011 return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2012 k->wk_keyix, &hk, mac, B_FALSE));
2013 }
2014 }
2015
2016 /*
2017 * Enable/Disable short slot timing
2018 */
2019 void
arn_set_shortslot(ieee80211com_t * ic,int onoff)2020 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2021 {
2022 struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2023
2024 if (onoff)
2025 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2026 else
2027 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2028 }
2029
2030 static int
arn_open(struct arn_softc * sc)2031 arn_open(struct arn_softc *sc)
2032 {
2033 ieee80211com_t *ic = (ieee80211com_t *)sc;
2034 struct ieee80211_channel *curchan = ic->ic_curchan;
2035 struct ath9k_channel *init_channel;
2036 int error = 0, pos, status;
2037
2038 ARN_LOCK_ASSERT(sc);
2039
2040 pos = arn_get_channel(sc, curchan);
2041 if (pos == -1) {
2042 ARN_DBG((ARN_DBG_FATAL, "arn: "
2043 "%s: Invalid channel\n", __func__));
2044 error = EINVAL;
2045 goto error;
2046 }
2047
2048 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2049
2050 if (sc->sc_curmode == ATH9K_MODE_11A) {
2051 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2052 } else {
2053 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2054 }
2055
2056 init_channel = &sc->sc_ah->ah_channels[pos];
2057
2058 /* Reset SERDES registers */
2059 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2060
2061 /*
2062 * The basic interface to setting the hardware in a good
2063 * state is ``reset''. On return the hardware is known to
2064 * be powered up and with interrupts disabled. This must
2065 * be followed by initialization of the appropriate bits
2066 * and then setup of the interrupt mask.
2067 */
2068 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2069 sc->tx_chan_width, sc->sc_tx_chainmask,
2070 sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2071 B_FALSE, &status)) {
2072 ARN_DBG((ARN_DBG_FATAL, "arn: "
2073 "%s: unable to reset hardware; hal status %u "
2074 "(freq %u flags 0x%x)\n", __func__, status,
2075 init_channel->channel, init_channel->channelFlags));
2076
2077 error = EIO;
2078 goto error;
2079 }
2080
2081 /*
2082 * This is needed only to setup initial state
2083 * but it's best done after a reset.
2084 */
2085 arn_update_txpow(sc);
2086
2087 /*
2088 * Setup the hardware after reset:
2089 * The receive engine is set going.
2090 * Frame transmit is handled entirely
2091 * in the frame output path; there's nothing to do
2092 * here except setup the interrupt mask.
2093 */
2094 if (arn_startrecv(sc) != 0) {
2095 ARN_DBG((ARN_DBG_INIT, "arn: "
2096 "%s: unable to start recv logic\n", __func__));
2097 error = EIO;
2098 goto error;
2099 }
2100
2101 /* Setup our intr mask. */
2102 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2103 ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2104 ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2105 #ifdef ARN_ATH9K_HW_CAP_GTT
2106 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2107 sc->sc_imask |= ATH9K_INT_GTT;
2108 #endif
2109
2110 #ifdef ARN_ATH9K_HW_CAP_GTT
2111 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2112 sc->sc_imask |= ATH9K_INT_CST;
2113 #endif
2114
2115 /*
2116 * Enable MIB interrupts when there are hardware phy counters.
2117 * Note we only do this (at the moment) for station mode.
2118 */
2119 #ifdef ARN_ATH9K_INT_MIB
2120 if (ath9k_hw_phycounters(sc->sc_ah) &&
2121 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2122 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2123 sc->sc_imask |= ATH9K_INT_MIB;
2124 #endif
2125 /*
2126 * Some hardware processes the TIM IE and fires an
2127 * interrupt when the TIM bit is set. For hardware
2128 * that does, if not overridden by configuration,
2129 * enable the TIM interrupt when operating as station.
2130 */
2131 #ifdef ARN_ATH9K_INT_TIM
2132 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2133 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2134 !sc->sc_config.swBeaconProcess)
2135 sc->sc_imask |= ATH9K_INT_TIM;
2136 #endif
2137 if (arn_chan2mode(init_channel) != sc->sc_curmode)
2138 arn_setcurmode(sc, arn_chan2mode(init_channel));
2139 ARN_DBG((ARN_DBG_INIT, "arn: "
2140 "%s: current mode after arn_setcurmode is %d\n",
2141 __func__, sc->sc_curmode));
2142
2143 sc->sc_isrunning = 1;
2144
2145 /* Disable BMISS interrupt when we're not associated */
2146 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2147 (void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2148
2149 return (0);
2150
2151 error:
2152 return (error);
2153 }
2154
2155 static void
arn_close(struct arn_softc * sc)2156 arn_close(struct arn_softc *sc)
2157 {
2158 ieee80211com_t *ic = (ieee80211com_t *)sc;
2159 struct ath_hal *ah = sc->sc_ah;
2160
2161 ARN_LOCK_ASSERT(sc);
2162
2163 if (!sc->sc_isrunning)
2164 return;
2165
2166 /*
2167 * Shutdown the hardware and driver
2168 * Note that some of this work is not possible if the
2169 * hardware is gone (invalid).
2170 */
2171 ARN_UNLOCK(sc);
2172 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2173 ieee80211_stop_watchdog(ic);
2174 ARN_LOCK(sc);
2175
2176 /*
2177 * make sure h/w will not generate any interrupt
2178 * before setting the invalid flag.
2179 */
2180 (void) ath9k_hw_set_interrupts(ah, 0);
2181
2182 if (!(sc->sc_flags & SC_OP_INVALID)) {
2183 arn_draintxq(sc, 0);
2184 (void) arn_stoprecv(sc);
2185 (void) ath9k_hw_phy_disable(ah);
2186 } else {
2187 sc->sc_rxlink = NULL;
2188 }
2189
2190 sc->sc_isrunning = 0;
2191 }
2192
2193 /*
2194 * MAC callback functions
2195 */
2196 static int
arn_m_stat(void * arg,uint_t stat,uint64_t * val)2197 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2198 {
2199 struct arn_softc *sc = arg;
2200 ieee80211com_t *ic = (ieee80211com_t *)sc;
2201 struct ieee80211_node *in;
2202 struct ieee80211_rateset *rs;
2203
2204 ARN_LOCK(sc);
2205 switch (stat) {
2206 case MAC_STAT_IFSPEED:
2207 in = ic->ic_bss;
2208 rs = &in->in_rates;
2209 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2210 1000000ull;
2211 break;
2212 case MAC_STAT_NOXMTBUF:
2213 *val = sc->sc_stats.ast_tx_nobuf +
2214 sc->sc_stats.ast_tx_nobufmgt;
2215 break;
2216 case MAC_STAT_IERRORS:
2217 *val = sc->sc_stats.ast_rx_tooshort;
2218 break;
2219 case MAC_STAT_RBYTES:
2220 *val = ic->ic_stats.is_rx_bytes;
2221 break;
2222 case MAC_STAT_IPACKETS:
2223 *val = ic->ic_stats.is_rx_frags;
2224 break;
2225 case MAC_STAT_OBYTES:
2226 *val = ic->ic_stats.is_tx_bytes;
2227 break;
2228 case MAC_STAT_OPACKETS:
2229 *val = ic->ic_stats.is_tx_frags;
2230 break;
2231 case MAC_STAT_OERRORS:
2232 case WIFI_STAT_TX_FAILED:
2233 *val = sc->sc_stats.ast_tx_fifoerr +
2234 sc->sc_stats.ast_tx_xretries +
2235 sc->sc_stats.ast_tx_discard;
2236 break;
2237 case WIFI_STAT_TX_RETRANS:
2238 *val = sc->sc_stats.ast_tx_xretries;
2239 break;
2240 case WIFI_STAT_FCS_ERRORS:
2241 *val = sc->sc_stats.ast_rx_crcerr;
2242 break;
2243 case WIFI_STAT_WEP_ERRORS:
2244 *val = sc->sc_stats.ast_rx_badcrypt;
2245 break;
2246 case WIFI_STAT_TX_FRAGS:
2247 case WIFI_STAT_MCAST_TX:
2248 case WIFI_STAT_RTS_SUCCESS:
2249 case WIFI_STAT_RTS_FAILURE:
2250 case WIFI_STAT_ACK_FAILURE:
2251 case WIFI_STAT_RX_FRAGS:
2252 case WIFI_STAT_MCAST_RX:
2253 case WIFI_STAT_RX_DUPS:
2254 ARN_UNLOCK(sc);
2255 return (ieee80211_stat(ic, stat, val));
2256 default:
2257 ARN_UNLOCK(sc);
2258 return (ENOTSUP);
2259 }
2260 ARN_UNLOCK(sc);
2261
2262 return (0);
2263 }
2264
2265 int
arn_m_start(void * arg)2266 arn_m_start(void *arg)
2267 {
2268 struct arn_softc *sc = arg;
2269 int err = 0;
2270
2271 ARN_LOCK(sc);
2272
2273 /*
2274 * Stop anything previously setup. This is safe
2275 * whether this is the first time through or not.
2276 */
2277
2278 arn_close(sc);
2279
2280 if ((err = arn_open(sc)) != 0) {
2281 ARN_UNLOCK(sc);
2282 return (err);
2283 }
2284
2285 /* H/W is reday now */
2286 sc->sc_flags &= ~SC_OP_INVALID;
2287
2288 ARN_UNLOCK(sc);
2289
2290 return (0);
2291 }
2292
2293 static void
arn_m_stop(void * arg)2294 arn_m_stop(void *arg)
2295 {
2296 struct arn_softc *sc = arg;
2297
2298 ARN_LOCK(sc);
2299 arn_close(sc);
2300
2301 /* disable HAL and put h/w to sleep */
2302 (void) ath9k_hw_disable(sc->sc_ah);
2303 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2304
2305 /* XXX: hardware will not be ready in suspend state */
2306 sc->sc_flags |= SC_OP_INVALID;
2307 ARN_UNLOCK(sc);
2308 }
2309
2310 static int
arn_m_promisc(void * arg,boolean_t on)2311 arn_m_promisc(void *arg, boolean_t on)
2312 {
2313 struct arn_softc *sc = arg;
2314 struct ath_hal *ah = sc->sc_ah;
2315 uint32_t rfilt;
2316
2317 ARN_LOCK(sc);
2318
2319 rfilt = ath9k_hw_getrxfilter(ah);
2320 if (on)
2321 rfilt |= ATH9K_RX_FILTER_PROM;
2322 else
2323 rfilt &= ~ATH9K_RX_FILTER_PROM;
2324 sc->sc_promisc = on;
2325 ath9k_hw_setrxfilter(ah, rfilt);
2326
2327 ARN_UNLOCK(sc);
2328
2329 return (0);
2330 }
2331
2332 static int
arn_m_multicst(void * arg,boolean_t add,const uint8_t * mca)2333 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2334 {
2335 struct arn_softc *sc = arg;
2336 struct ath_hal *ah = sc->sc_ah;
2337 uint32_t val, index, bit;
2338 uint8_t pos;
2339 uint32_t *mfilt = sc->sc_mcast_hash;
2340
2341 ARN_LOCK(sc);
2342
2343 /* calculate XOR of eight 6bit values */
2344 val = ARN_LE_READ_32(mca + 0);
2345 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2346 val = ARN_LE_READ_32(mca + 3);
2347 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2348 pos &= 0x3f;
2349 index = pos / 32;
2350 bit = 1 << (pos % 32);
2351
2352 if (add) { /* enable multicast */
2353 sc->sc_mcast_refs[pos]++;
2354 mfilt[index] |= bit;
2355 } else { /* disable multicast */
2356 if (--sc->sc_mcast_refs[pos] == 0)
2357 mfilt[index] &= ~bit;
2358 }
2359 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2360
2361 ARN_UNLOCK(sc);
2362 return (0);
2363 }
2364
2365 static int
arn_m_unicst(void * arg,const uint8_t * macaddr)2366 arn_m_unicst(void *arg, const uint8_t *macaddr)
2367 {
2368 struct arn_softc *sc = arg;
2369 struct ath_hal *ah = sc->sc_ah;
2370 ieee80211com_t *ic = (ieee80211com_t *)sc;
2371
2372 ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2373 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2374 macaddr[0], macaddr[1], macaddr[2],
2375 macaddr[3], macaddr[4], macaddr[5]));
2376
2377 ARN_LOCK(sc);
2378 IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2379 (void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2380 (void) arn_reset(ic);
2381 ARN_UNLOCK(sc);
2382 return (0);
2383 }
2384
2385 static mblk_t *
arn_m_tx(void * arg,mblk_t * mp)2386 arn_m_tx(void *arg, mblk_t *mp)
2387 {
2388 struct arn_softc *sc = arg;
2389 int error = 0;
2390 mblk_t *next;
2391 ieee80211com_t *ic = (ieee80211com_t *)sc;
2392
2393 /*
2394 * No data frames go out unless we're associated; this
2395 * should not happen as the 802.11 layer does not enable
2396 * the xmit queue until we enter the RUN state.
2397 */
2398 if (ic->ic_state != IEEE80211_S_RUN) {
2399 ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2400 "discard, state %u\n", ic->ic_state));
2401 sc->sc_stats.ast_tx_discard++;
2402 freemsgchain(mp);
2403 return (NULL);
2404 }
2405
2406 while (mp != NULL) {
2407 next = mp->b_next;
2408 mp->b_next = NULL;
2409 error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2410 if (error != 0) {
2411 mp->b_next = next;
2412 if (error == ENOMEM) {
2413 break;
2414 } else {
2415 freemsgchain(mp);
2416 return (NULL);
2417 }
2418 }
2419 mp = next;
2420 }
2421
2422 return (mp);
2423 }
2424
2425 static void
arn_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2426 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2427 {
2428 struct arn_softc *sc = arg;
2429 int32_t err;
2430
2431 err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2432
2433 ARN_LOCK(sc);
2434 if (err == ENETRESET) {
2435 if (!(sc->sc_flags & SC_OP_INVALID)) {
2436 ARN_UNLOCK(sc);
2437
2438 (void) arn_m_start(sc);
2439
2440 (void) ieee80211_new_state(&sc->sc_isc,
2441 IEEE80211_S_SCAN, -1);
2442 ARN_LOCK(sc);
2443 }
2444 }
2445 ARN_UNLOCK(sc);
2446 }
2447
2448 static int
arn_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)2449 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2450 uint_t wldp_length, const void *wldp_buf)
2451 {
2452 struct arn_softc *sc = arg;
2453 int err;
2454
2455 err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2456 wldp_length, wldp_buf);
2457
2458 ARN_LOCK(sc);
2459
2460 if (err == ENETRESET) {
2461 if (!(sc->sc_flags & SC_OP_INVALID)) {
2462 ARN_UNLOCK(sc);
2463 (void) arn_m_start(sc);
2464 (void) ieee80211_new_state(&sc->sc_isc,
2465 IEEE80211_S_SCAN, -1);
2466 ARN_LOCK(sc);
2467 }
2468 err = 0;
2469 }
2470
2471 ARN_UNLOCK(sc);
2472
2473 return (err);
2474 }
2475
2476 /* ARGSUSED */
2477 static int
arn_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)2478 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2479 uint_t wldp_length, void *wldp_buf)
2480 {
2481 struct arn_softc *sc = arg;
2482 int err = 0;
2483
2484 err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2485 wldp_length, wldp_buf);
2486
2487 return (err);
2488 }
2489
2490 static void
arn_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t prh)2491 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2492 mac_prop_info_handle_t prh)
2493 {
2494 struct arn_softc *sc = arg;
2495
2496 ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2497 }
2498
2499 /* return bus cachesize in 4B word units */
2500 static void
arn_pci_config_cachesize(struct arn_softc * sc)2501 arn_pci_config_cachesize(struct arn_softc *sc)
2502 {
2503 uint8_t csz;
2504
2505 /*
2506 * Cache line size is used to size and align various
2507 * structures used to communicate with the hardware.
2508 */
2509 csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2510 if (csz == 0) {
2511 /*
2512 * We must have this setup properly for rx buffer
2513 * DMA to work so force a reasonable value here if it
2514 * comes up zero.
2515 */
2516 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2517 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2518 csz);
2519 }
2520 sc->sc_cachelsz = csz << 2;
2521 }
2522
2523 static int
arn_pci_setup(struct arn_softc * sc)2524 arn_pci_setup(struct arn_softc *sc)
2525 {
2526 uint16_t command;
2527
2528 /*
2529 * Enable memory mapping and bus mastering
2530 */
2531 ASSERT(sc != NULL);
2532 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2533 command |= PCI_COMM_MAE | PCI_COMM_ME;
2534 pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2535 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2536 if ((command & PCI_COMM_MAE) == 0) {
2537 arn_problem("arn: arn_pci_setup(): "
2538 "failed to enable memory mapping\n");
2539 return (EIO);
2540 }
2541 if ((command & PCI_COMM_ME) == 0) {
2542 arn_problem("arn: arn_pci_setup(): "
2543 "failed to enable bus mastering\n");
2544 return (EIO);
2545 }
2546 ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2547 "set command reg to 0x%x \n", command));
2548
2549 return (0);
2550 }
2551
2552 static void
arn_get_hw_encap(struct arn_softc * sc)2553 arn_get_hw_encap(struct arn_softc *sc)
2554 {
2555 ieee80211com_t *ic;
2556 struct ath_hal *ah;
2557
2558 ic = (ieee80211com_t *)sc;
2559 ah = sc->sc_ah;
2560
2561 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2562 ATH9K_CIPHER_AES_CCM, NULL))
2563 ic->ic_caps |= IEEE80211_C_AES_CCM;
2564 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2565 ATH9K_CIPHER_AES_OCB, NULL))
2566 ic->ic_caps |= IEEE80211_C_AES;
2567 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2568 ATH9K_CIPHER_TKIP, NULL))
2569 ic->ic_caps |= IEEE80211_C_TKIP;
2570 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2571 ATH9K_CIPHER_WEP, NULL))
2572 ic->ic_caps |= IEEE80211_C_WEP;
2573 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2574 ATH9K_CIPHER_MIC, NULL))
2575 ic->ic_caps |= IEEE80211_C_TKIPMIC;
2576 }
2577
2578 static void
arn_setup_ht_cap(struct arn_softc * sc)2579 arn_setup_ht_cap(struct arn_softc *sc)
2580 {
2581 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
2582 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
2583
2584 uint8_t rx_streams;
2585
2586 arn_ht_conf *ht_info = &sc->sc_ht_conf;
2587
2588 ht_info->ht_supported = B_TRUE;
2589
2590 /* Todo: IEEE80211_HTCAP_SMPS */
2591 ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2592 IEEE80211_HTCAP_SHORTGI40 |
2593 IEEE80211_HTCAP_DSSSCCK40;
2594
2595 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2596 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2597
2598 /* set up supported mcs set */
2599 (void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2600 rx_streams = ISP2(sc->sc_ah->ah_caps.rx_chainmask) ? 1 : 2;
2601
2602 ht_info->rx_mcs_mask[0] = 0xff;
2603 if (rx_streams >= 2)
2604 ht_info->rx_mcs_mask[1] = 0xff;
2605 }
2606
2607 /* xxx should be used for ht rate set negotiating ? */
2608 static void
arn_overwrite_11n_rateset(struct arn_softc * sc)2609 arn_overwrite_11n_rateset(struct arn_softc *sc)
2610 {
2611 uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2612 int mcs_idx, mcs_count = 0;
2613 int i, j;
2614
2615 (void) memset(&ieee80211_rateset_11n, 0,
2616 sizeof (ieee80211_rateset_11n));
2617 for (i = 0; i < 10; i++) {
2618 for (j = 0; j < 8; j++) {
2619 if (ht_rs[i] & (1 << j)) {
2620 mcs_idx = i * 8 + j;
2621 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2622 break;
2623 }
2624
2625 ieee80211_rateset_11n.rs_rates[mcs_idx] =
2626 (uint8_t)mcs_idx;
2627 mcs_count++;
2628 }
2629 }
2630 }
2631
2632 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2633
2634 ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2635 "MCS rate set supported by this station is as follows:\n"));
2636
2637 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2638 ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2639 i, ieee80211_rateset_11n.rs_rates[i]));
2640 }
2641
2642 }
2643
2644 /*
2645 * Update WME parameters for a transmit queue.
2646 */
2647 static int
arn_tx_queue_update(struct arn_softc * sc,int ac)2648 arn_tx_queue_update(struct arn_softc *sc, int ac)
2649 {
2650 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2651 #define ATH_TXOP_TO_US(v) (v<<5)
2652 ieee80211com_t *ic = (ieee80211com_t *)sc;
2653 struct ath_txq *txq;
2654 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2655 struct ath_hal *ah = sc->sc_ah;
2656 struct ath9k_tx_queue_info qi;
2657
2658 txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2659 (void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2660
2661 /*
2662 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2663 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2664 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2665 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2666 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2667 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2668 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2669 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2670 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2671 */
2672
2673 /* xxx should update these flags here? */
2674 #if 0
2675 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2676 TXQ_FLAG_TXERRINT_ENABLE |
2677 TXQ_FLAG_TXDESCINT_ENABLE |
2678 TXQ_FLAG_TXURNINT_ENABLE;
2679 #endif
2680
2681 qi.tqi_aifs = wmep->wmep_aifsn;
2682 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2683 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2684 qi.tqi_readyTime = 0;
2685 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2686
2687 ARN_DBG((ARN_DBG_INIT,
2688 "%s:"
2689 "Q%u"
2690 "qflags 0x%x"
2691 "aifs %u"
2692 "cwmin %u"
2693 "cwmax %u"
2694 "burstTime %u\n",
2695 __func__,
2696 txq->axq_qnum,
2697 qi.tqi_qflags,
2698 qi.tqi_aifs,
2699 qi.tqi_cwmin,
2700 qi.tqi_cwmax,
2701 qi.tqi_burstTime));
2702
2703 if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2704 arn_problem("unable to update hardware queue "
2705 "parameters for %s traffic!\n",
2706 ieee80211_wme_acnames[ac]);
2707 return (0);
2708 } else {
2709 /* push to H/W */
2710 (void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2711 return (1);
2712 }
2713
2714 #undef ATH_TXOP_TO_US
2715 #undef ATH_EXPONENT_TO_VALUE
2716 }
2717
2718 /* Update WME parameters */
2719 static int
arn_wme_update(ieee80211com_t * ic)2720 arn_wme_update(ieee80211com_t *ic)
2721 {
2722 struct arn_softc *sc = (struct arn_softc *)ic;
2723
2724 /* updateing */
2725 return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2726 !arn_tx_queue_update(sc, WME_AC_BK) ||
2727 !arn_tx_queue_update(sc, WME_AC_VI) ||
2728 !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2729 }
2730
2731 /*
2732 * Update tx/rx chainmask. For legacy association,
2733 * hard code chainmask to 1x1, for 11n association, use
2734 * the chainmask configuration.
2735 */
2736 void
arn_update_chainmask(struct arn_softc * sc)2737 arn_update_chainmask(struct arn_softc *sc)
2738 {
2739 boolean_t is_ht = B_FALSE;
2740 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2741
2742 is_ht = sc->sc_ht_conf.ht_supported;
2743 if (is_ht) {
2744 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2745 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2746 } else {
2747 sc->sc_tx_chainmask = 1;
2748 sc->sc_rx_chainmask = 1;
2749 }
2750
2751 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2752 "tx_chainmask = %d, rx_chainmask = %d\n",
2753 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2754 }
2755
2756 static int
arn_resume(dev_info_t * devinfo)2757 arn_resume(dev_info_t *devinfo)
2758 {
2759 struct arn_softc *sc;
2760 int ret = DDI_SUCCESS;
2761
2762 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2763 if (sc == NULL) {
2764 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2765 "failed to get soft state\n"));
2766 return (DDI_FAILURE);
2767 }
2768
2769 ARN_LOCK(sc);
2770 /*
2771 * Set up config space command register(s). Refuse
2772 * to resume on failure.
2773 */
2774 if (arn_pci_setup(sc) != 0) {
2775 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2776 "ath_pci_setup() failed\n"));
2777 ARN_UNLOCK(sc);
2778 return (DDI_FAILURE);
2779 }
2780
2781 if (!(sc->sc_flags & SC_OP_INVALID))
2782 ret = arn_open(sc);
2783 ARN_UNLOCK(sc);
2784
2785 return (ret);
2786 }
2787
2788 static int
arn_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)2789 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2790 {
2791 struct arn_softc *sc;
2792 int instance;
2793 int status;
2794 int32_t err;
2795 uint16_t vendor_id;
2796 uint16_t device_id;
2797 uint32_t i;
2798 uint32_t val;
2799 char strbuf[32];
2800 ieee80211com_t *ic;
2801 struct ath_hal *ah;
2802 wifi_data_t wd = { 0 };
2803 mac_register_t *macp;
2804
2805 switch (cmd) {
2806 case DDI_ATTACH:
2807 break;
2808 case DDI_RESUME:
2809 return (arn_resume(devinfo));
2810 default:
2811 return (DDI_FAILURE);
2812 }
2813
2814 instance = ddi_get_instance(devinfo);
2815 if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2816 ARN_DBG((ARN_DBG_ATTACH, "arn: "
2817 "%s: Unable to alloc softstate\n", __func__));
2818 return (DDI_FAILURE);
2819 }
2820
2821 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2822 ic = (ieee80211com_t *)sc;
2823 sc->sc_dev = devinfo;
2824
2825 mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2826 mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2827 mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2828 mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2829 mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2830 #ifdef ARN_IBSS
2831 mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2832 #endif
2833
2834 sc->sc_flags |= SC_OP_INVALID;
2835
2836 err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2837 if (err != DDI_SUCCESS) {
2838 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2839 "pci_config_setup() failed"));
2840 goto attach_fail0;
2841 }
2842
2843 if (arn_pci_setup(sc) != 0)
2844 goto attach_fail1;
2845
2846 /* Cache line size set up */
2847 arn_pci_config_cachesize(sc);
2848
2849 vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2850 device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2851 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2852 "device id 0x%x, cache size %d\n",
2853 vendor_id, device_id,
2854 pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2855
2856 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2857 val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2858 if ((val & 0x0000ff00) != 0)
2859 pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2860
2861 err = ddi_regs_map_setup(devinfo, 1,
2862 &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2863 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2864 "regs map1 = %x err=%d\n", sc->mem, err));
2865 if (err != DDI_SUCCESS) {
2866 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2867 "ddi_regs_map_setup() failed"));
2868 goto attach_fail1;
2869 }
2870
2871 ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2872 if (ah == NULL) {
2873 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2874 "unable to attach hw: H/W status %u\n",
2875 status));
2876 goto attach_fail2;
2877 }
2878 sc->sc_ah = ah;
2879
2880 ath9k_hw_getmac(ah, ic->ic_macaddr);
2881
2882 /* Get the hardware key cache size. */
2883 sc->sc_keymax = ah->ah_caps.keycache_size;
2884 if (sc->sc_keymax > ATH_KEYMAX) {
2885 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2886 "Warning, using only %u entries in %u key cache\n",
2887 ATH_KEYMAX, sc->sc_keymax));
2888 sc->sc_keymax = ATH_KEYMAX;
2889 }
2890
2891 /*
2892 * Reset the key cache since some parts do not
2893 * reset the contents on initial power up.
2894 */
2895 for (i = 0; i < sc->sc_keymax; i++)
2896 (void) ath9k_hw_keyreset(ah, (uint16_t)i);
2897 /*
2898 * Mark key cache slots associated with global keys
2899 * as in use. If we knew TKIP was not to be used we
2900 * could leave the +32, +64, and +32+64 slots free.
2901 * XXX only for splitmic.
2902 */
2903 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2904 set_bit(i, sc->sc_keymap);
2905 set_bit(i + 32, sc->sc_keymap);
2906 set_bit(i + 64, sc->sc_keymap);
2907 set_bit(i + 32 + 64, sc->sc_keymap);
2908 }
2909
2910 /* Collect the channel list using the default country code */
2911 err = arn_setup_channels(sc);
2912 if (err == EINVAL) {
2913 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2914 "ERR:arn_setup_channels\n"));
2915 goto attach_fail3;
2916 }
2917
2918 /* default to STA mode */
2919 sc->sc_ah->ah_opmode = ATH9K_M_STA;
2920
2921 /* Setup rate tables */
2922 arn_rate_attach(sc);
2923 arn_setup_rates(sc, IEEE80211_MODE_11A);
2924 arn_setup_rates(sc, IEEE80211_MODE_11B);
2925 arn_setup_rates(sc, IEEE80211_MODE_11G);
2926
2927 /* Setup current mode here */
2928 arn_setcurmode(sc, ATH9K_MODE_11G);
2929
2930 /* 802.11g features */
2931 if (sc->sc_have11g)
2932 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2933 IEEE80211_C_SHSLOT; /* short slot time */
2934
2935 /* Temp workaround */
2936 sc->sc_mrretry = 1;
2937 sc->sc_config.ath_aggr_prot = 0;
2938
2939 /* Setup tx/rx descriptors */
2940 err = arn_desc_alloc(devinfo, sc);
2941 if (err != DDI_SUCCESS) {
2942 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2943 "failed to allocate descriptors: %d\n", err));
2944 goto attach_fail3;
2945 }
2946
2947 if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2948 TASKQ_DEFAULTPRI, 0)) == NULL) {
2949 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2950 "ERR:ddi_taskq_create\n"));
2951 goto attach_fail4;
2952 }
2953
2954 /*
2955 * Allocate hardware transmit queues: one queue for
2956 * beacon frames and one data queue for each QoS
2957 * priority. Note that the hal handles reseting
2958 * these queues at the needed time.
2959 */
2960 #ifdef ARN_IBSS
2961 sc->sc_beaconq = arn_beaconq_setup(ah);
2962 if (sc->sc_beaconq == (-1)) {
2963 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2964 "unable to setup a beacon xmit queue\n"));
2965 goto attach_fail4;
2966 }
2967 #endif
2968 #ifdef ARN_HOSTAP
2969 sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
2970 if (sc->sc_cabq == NULL) {
2971 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2972 "unable to setup CAB xmit queue\n"));
2973 goto attach_fail4;
2974 }
2975
2976 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
2977 ath_cabq_update(sc);
2978 #endif
2979
2980 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
2981 sc->sc_haltype2q[i] = -1;
2982
2983 /* Setup data queues */
2984 /* NB: ensure BK queue is the lowest priority h/w queue */
2985 if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
2986 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2987 "unable to setup xmit queue for BK traffic\n"));
2988 goto attach_fail4;
2989 }
2990 if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
2991 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2992 "unable to setup xmit queue for BE traffic\n"));
2993 goto attach_fail4;
2994 }
2995 if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
2996 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2997 "unable to setup xmit queue for VI traffic\n"));
2998 goto attach_fail4;
2999 }
3000 if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3001 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3002 "unable to setup xmit queue for VO traffic\n"));
3003 goto attach_fail4;
3004 }
3005
3006 /*
3007 * Initializes the noise floor to a reasonable default value.
3008 * Later on this will be updated during ANI processing.
3009 */
3010
3011 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3012
3013
3014 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3015 ATH9K_CIPHER_TKIP, NULL)) {
3016 /*
3017 * Whether we should enable h/w TKIP MIC.
3018 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3019 * report WMM capable, so it's always safe to turn on
3020 * TKIP MIC in this case.
3021 */
3022 (void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3023 0, 1, NULL);
3024 }
3025
3026 /* Get cipher releated capability information */
3027 arn_get_hw_encap(sc);
3028
3029 /*
3030 * Check whether the separate key cache entries
3031 * are required to handle both tx+rx MIC keys.
3032 * With split mic keys the number of stations is limited
3033 * to 27 otherwise 59.
3034 */
3035 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3036 ATH9K_CIPHER_TKIP, NULL) &&
3037 ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3038 ATH9K_CIPHER_MIC, NULL) &&
3039 ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3040 0, NULL))
3041 sc->sc_splitmic = 1;
3042
3043 /* turn on mcast key search if possible */
3044 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3045 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3046 1, NULL);
3047
3048 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3049 sc->sc_config.txpowlimit_override = 0;
3050
3051 /* 11n Capabilities */
3052 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3053 sc->sc_flags |= SC_OP_TXAGGR;
3054 sc->sc_flags |= SC_OP_RXAGGR;
3055 arn_setup_ht_cap(sc);
3056 arn_overwrite_11n_rateset(sc);
3057 }
3058
3059 sc->sc_tx_chainmask = 1;
3060 sc->sc_rx_chainmask = 1;
3061 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3062 "tx_chainmask = %d, rx_chainmask = %d\n",
3063 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3064
3065 /* arn_update_chainmask(sc); */
3066
3067 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3068 sc->sc_defant = ath9k_hw_getdefantenna(ah);
3069
3070 ath9k_hw_getmac(ah, sc->sc_myaddr);
3071 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3072 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3073 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3074 (void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3075 }
3076
3077 /* set default value to short slot time */
3078 sc->sc_slottime = ATH9K_SLOT_TIME_9;
3079 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3080
3081 /* initialize beacon slots */
3082 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3083 sc->sc_bslot[i] = ATH_IF_ID_ANY;
3084
3085 /* Save MISC configurations */
3086 sc->sc_config.swBeaconProcess = 1;
3087
3088 /* Support QoS/WME */
3089 ic->ic_caps |= IEEE80211_C_WME;
3090 ic->ic_wme.wme_update = arn_wme_update;
3091
3092 /* Support 802.11n/HT */
3093 if (sc->sc_ht_conf.ht_supported) {
3094 ic->ic_htcaps =
3095 IEEE80211_HTCAP_CHWIDTH40 |
3096 IEEE80211_HTCAP_SHORTGI40 |
3097 IEEE80211_HTCAP_DSSSCCK40 |
3098 IEEE80211_HTCAP_MAXAMSDU_7935 |
3099 IEEE80211_HTC_HT |
3100 IEEE80211_HTC_AMSDU |
3101 IEEE80211_HTCAP_RXSTBC_2STREAM;
3102
3103 #ifdef ARN_TX_AGGREGATION
3104 ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3105 #endif
3106 }
3107
3108 /* Header padding requested by driver */
3109 ic->ic_flags |= IEEE80211_F_DATAPAD;
3110 /* Support WPA/WPA2 */
3111 ic->ic_caps |= IEEE80211_C_WPA;
3112 #if 0
3113 ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3114 ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3115 #endif
3116 ic->ic_phytype = IEEE80211_T_HT;
3117 ic->ic_opmode = IEEE80211_M_STA;
3118 ic->ic_state = IEEE80211_S_INIT;
3119 ic->ic_maxrssi = ARN_MAX_RSSI;
3120 ic->ic_set_shortslot = arn_set_shortslot;
3121 ic->ic_xmit = arn_tx;
3122 ieee80211_attach(ic);
3123
3124 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3125 "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3126
3127 /* different instance has different WPA door */
3128 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3129 ddi_driver_name(devinfo),
3130 ddi_get_instance(devinfo));
3131
3132 if (sc->sc_ht_conf.ht_supported) {
3133 sc->sc_recv_action = ic->ic_recv_action;
3134 ic->ic_recv_action = arn_ampdu_recv_action;
3135 // sc->sc_send_action = ic->ic_send_action;
3136 // ic->ic_send_action = arn_ampdu_send_action;
3137
3138 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3139 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3140 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3141 }
3142
3143 /* Override 80211 default routines */
3144 sc->sc_newstate = ic->ic_newstate;
3145 ic->ic_newstate = arn_newstate;
3146 #ifdef ARN_IBSS
3147 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3148 ic->ic_recv_mgmt = arn_recv_mgmt;
3149 #endif
3150 ic->ic_watchdog = arn_watchdog;
3151 ic->ic_node_alloc = arn_node_alloc;
3152 ic->ic_node_free = arn_node_free;
3153 ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3154 ic->ic_crypto.cs_key_delete = arn_key_delete;
3155 ic->ic_crypto.cs_key_set = arn_key_set;
3156
3157 ieee80211_media_init(ic);
3158
3159 /*
3160 * initialize default tx key
3161 */
3162 ic->ic_def_txkey = 0;
3163
3164 sc->sc_rx_pend = 0;
3165 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3166 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3167 &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3168 if (err != DDI_SUCCESS) {
3169 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3170 "ddi_add_softintr() failed....\n"));
3171 goto attach_fail5;
3172 }
3173
3174 if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3175 != DDI_SUCCESS) {
3176 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3177 "Can not get iblock cookie for INT\n"));
3178 goto attach_fail6;
3179 }
3180
3181 if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3182 (caddr_t)sc) != DDI_SUCCESS) {
3183 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3184 "Can not set intr for ARN driver\n"));
3185 goto attach_fail6;
3186 }
3187
3188 /*
3189 * Provide initial settings for the WiFi plugin; whenever this
3190 * information changes, we need to call mac_plugindata_update()
3191 */
3192 wd.wd_opmode = ic->ic_opmode;
3193 wd.wd_secalloc = WIFI_SEC_NONE;
3194 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3195
3196 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3197 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3198 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3199 wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3200 wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3201
3202 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3203 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3204 "MAC version mismatch\n"));
3205 goto attach_fail7;
3206 }
3207
3208 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
3209 macp->m_driver = sc;
3210 macp->m_dip = devinfo;
3211 macp->m_src_addr = ic->ic_macaddr;
3212 macp->m_callbacks = &arn_m_callbacks;
3213 macp->m_min_sdu = 0;
3214 macp->m_max_sdu = IEEE80211_MTU;
3215 macp->m_pdata = &wd;
3216 macp->m_pdata_size = sizeof (wd);
3217
3218 err = mac_register(macp, &ic->ic_mach);
3219 mac_free(macp);
3220 if (err != 0) {
3221 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3222 "mac_register err %x\n", err));
3223 goto attach_fail7;
3224 }
3225
3226 /* Create minor node of type DDI_NT_NET_WIFI */
3227 (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3228 ARN_NODENAME, instance);
3229 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3230 instance + 1, DDI_NT_NET_WIFI, 0);
3231 if (err != DDI_SUCCESS)
3232 ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3233 "Create minor node failed - %d\n", err));
3234
3235 /* Notify link is down now */
3236 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3237
3238 sc->sc_promisc = B_FALSE;
3239 bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3240 bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3241
3242 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3243 "Atheros AR%s MAC/BB Rev:%x "
3244 "AR%s RF Rev:%x: mem=0x%lx\n",
3245 arn_mac_bb_name(ah->ah_macVersion),
3246 ah->ah_macRev,
3247 arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3248 ah->ah_phyRev,
3249 (unsigned long)sc->mem));
3250
3251 /* XXX: hardware will not be ready until arn_open() being called */
3252 sc->sc_flags |= SC_OP_INVALID;
3253 sc->sc_isrunning = 0;
3254
3255 return (DDI_SUCCESS);
3256
3257 attach_fail7:
3258 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3259 attach_fail6:
3260 ddi_remove_softintr(sc->sc_softint_id);
3261 attach_fail5:
3262 (void) ieee80211_detach(ic);
3263 attach_fail4:
3264 arn_desc_free(sc);
3265 if (sc->sc_tq)
3266 ddi_taskq_destroy(sc->sc_tq);
3267 attach_fail3:
3268 ath9k_hw_detach(ah);
3269 attach_fail2:
3270 ddi_regs_map_free(&sc->sc_io_handle);
3271 attach_fail1:
3272 pci_config_teardown(&sc->sc_cfg_handle);
3273 attach_fail0:
3274 sc->sc_flags |= SC_OP_INVALID;
3275 /* cleanup tx queues */
3276 mutex_destroy(&sc->sc_txbuflock);
3277 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3278 if (ARN_TXQ_SETUP(sc, i)) {
3279 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3280 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3281 }
3282 }
3283 mutex_destroy(&sc->sc_rxbuflock);
3284 mutex_destroy(&sc->sc_serial_rw);
3285 mutex_destroy(&sc->sc_genlock);
3286 mutex_destroy(&sc->sc_resched_lock);
3287 #ifdef ARN_IBSS
3288 mutex_destroy(&sc->sc_bcbuflock);
3289 #endif
3290
3291 ddi_soft_state_free(arn_soft_state_p, instance);
3292
3293 return (DDI_FAILURE);
3294
3295 }
3296
3297 /*
3298 * Suspend transmit/receive for powerdown
3299 */
3300 static int
arn_suspend(struct arn_softc * sc)3301 arn_suspend(struct arn_softc *sc)
3302 {
3303 ARN_LOCK(sc);
3304 arn_close(sc);
3305 ARN_UNLOCK(sc);
3306
3307 return (DDI_SUCCESS);
3308 }
3309
3310 static int32_t
arn_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)3311 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3312 {
3313 struct arn_softc *sc;
3314 int i;
3315
3316 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3317 ASSERT(sc != NULL);
3318
3319 switch (cmd) {
3320 case DDI_DETACH:
3321 break;
3322
3323 case DDI_SUSPEND:
3324 return (arn_suspend(sc));
3325
3326 default:
3327 return (DDI_FAILURE);
3328 }
3329
3330 if (mac_disable(sc->sc_isc.ic_mach) != 0)
3331 return (DDI_FAILURE);
3332
3333 arn_stop_scantimer(sc);
3334 arn_stop_caltimer(sc);
3335
3336 /* disable interrupts */
3337 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3338
3339 /*
3340 * Unregister from the MAC layer subsystem
3341 */
3342 (void) mac_unregister(sc->sc_isc.ic_mach);
3343
3344 /* free intterrupt resources */
3345 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3346 ddi_remove_softintr(sc->sc_softint_id);
3347
3348 /*
3349 * NB: the order of these is important:
3350 * o call the 802.11 layer before detaching the hal to
3351 * insure callbacks into the driver to delete global
3352 * key cache entries can be handled
3353 * o reclaim the tx queue data structures after calling
3354 * the 802.11 layer as we'll get called back to reclaim
3355 * node state and potentially want to use them
3356 * o to cleanup the tx queues the hal is called, so detach
3357 * it last
3358 */
3359 ieee80211_detach(&sc->sc_isc);
3360
3361 arn_desc_free(sc);
3362
3363 ddi_taskq_destroy(sc->sc_tq);
3364
3365 if (!(sc->sc_flags & SC_OP_INVALID))
3366 (void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3367
3368 /* cleanup tx queues */
3369 mutex_destroy(&sc->sc_txbuflock);
3370 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3371 if (ARN_TXQ_SETUP(sc, i)) {
3372 arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3373 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3374 }
3375 }
3376
3377 ath9k_hw_detach(sc->sc_ah);
3378
3379 /* free io handle */
3380 ddi_regs_map_free(&sc->sc_io_handle);
3381 pci_config_teardown(&sc->sc_cfg_handle);
3382
3383 /* destroy locks */
3384 mutex_destroy(&sc->sc_genlock);
3385 mutex_destroy(&sc->sc_serial_rw);
3386 mutex_destroy(&sc->sc_rxbuflock);
3387 mutex_destroy(&sc->sc_resched_lock);
3388 #ifdef ARN_IBSS
3389 mutex_destroy(&sc->sc_bcbuflock);
3390 #endif
3391
3392 ddi_remove_minor_node(devinfo, NULL);
3393 ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3394
3395 return (DDI_SUCCESS);
3396 }
3397
3398 /*
3399 * quiesce(9E) entry point.
3400 *
3401 * This function is called when the system is single-threaded at high
3402 * PIL with preemption disabled. Therefore, this function must not be
3403 * blocked.
3404 *
3405 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3406 * DDI_FAILURE indicates an error condition and should almost never happen.
3407 */
3408 static int32_t
arn_quiesce(dev_info_t * devinfo)3409 arn_quiesce(dev_info_t *devinfo)
3410 {
3411 struct arn_softc *sc;
3412 int i;
3413 struct ath_hal *ah;
3414
3415 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3416
3417 if (sc == NULL || (ah = sc->sc_ah) == NULL)
3418 return (DDI_FAILURE);
3419
3420 /*
3421 * Disable interrupts
3422 */
3423 (void) ath9k_hw_set_interrupts(ah, 0);
3424
3425 /*
3426 * Disable TX HW
3427 */
3428 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3429 if (ARN_TXQ_SETUP(sc, i))
3430 (void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3431 }
3432
3433 /*
3434 * Disable RX HW
3435 */
3436 ath9k_hw_stoppcurecv(ah);
3437 ath9k_hw_setrxfilter(ah, 0);
3438 (void) ath9k_hw_stopdmarecv(ah);
3439 drv_usecwait(3000);
3440
3441 /*
3442 * Power down HW
3443 */
3444 (void) ath9k_hw_phy_disable(ah);
3445
3446 return (DDI_SUCCESS);
3447 }
3448
3449 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3450 nodev, NULL, D_MP, NULL, arn_quiesce);
3451
3452 static struct modldrv arn_modldrv = {
3453 &mod_driverops, /* Type of module. This one is a driver */
3454 "Atheros 9000 series driver", /* short description */
3455 &arn_dev_ops /* driver specific ops */
3456 };
3457
3458 static struct modlinkage modlinkage = {
3459 MODREV_1, (void *)&arn_modldrv, NULL
3460 };
3461
3462 int
_info(struct modinfo * modinfop)3463 _info(struct modinfo *modinfop)
3464 {
3465 return (mod_info(&modlinkage, modinfop));
3466 }
3467
3468 int
_init(void)3469 _init(void)
3470 {
3471 int status;
3472
3473 status = ddi_soft_state_init
3474 (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3475 if (status != 0)
3476 return (status);
3477
3478 mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3479 mac_init_ops(&arn_dev_ops, "arn");
3480 status = mod_install(&modlinkage);
3481 if (status != 0) {
3482 mac_fini_ops(&arn_dev_ops);
3483 mutex_destroy(&arn_loglock);
3484 ddi_soft_state_fini(&arn_soft_state_p);
3485 }
3486
3487 return (status);
3488 }
3489
3490 int
_fini(void)3491 _fini(void)
3492 {
3493 int status;
3494
3495 status = mod_remove(&modlinkage);
3496 if (status == 0) {
3497 mac_fini_ops(&arn_dev_ops);
3498 mutex_destroy(&arn_loglock);
3499 ddi_soft_state_fini(&arn_soft_state_p);
3500 }
3501 return (status);
3502 }
3503