1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2009, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Intel(R) WiFi Link 5100/5300 Driver
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/net80211_ht.h>
56 #include <sys/varargs.h>
57 #include <sys/policy.h>
58 #include <sys/pci.h>
59
60 #include "iwh_calibration.h"
61 #include "iwh_hw.h"
62 #include "iwh_eeprom.h"
63 #include "iwh_var.h"
64 #include <inet/wifi_ioctl.h>
65
66 #ifdef DEBUG
67 #define IWH_DEBUG_80211 (1 << 0)
68 #define IWH_DEBUG_CMD (1 << 1)
69 #define IWH_DEBUG_DMA (1 << 2)
70 #define IWH_DEBUG_EEPROM (1 << 3)
71 #define IWH_DEBUG_FW (1 << 4)
72 #define IWH_DEBUG_HW (1 << 5)
73 #define IWH_DEBUG_INTR (1 << 6)
74 #define IWH_DEBUG_MRR (1 << 7)
75 #define IWH_DEBUG_PIO (1 << 8)
76 #define IWH_DEBUG_RX (1 << 9)
77 #define IWH_DEBUG_SCAN (1 << 10)
78 #define IWH_DEBUG_TX (1 << 11)
79 #define IWH_DEBUG_RATECTL (1 << 12)
80 #define IWH_DEBUG_RADIO (1 << 13)
81 #define IWH_DEBUG_RESUME (1 << 14)
82 #define IWH_DEBUG_CALIBRATION (1 << 15)
83 #define IWH_DEBUG_BA (1 << 16)
84 #define IWH_DEBUG_RXON (1 << 17)
85 #define IWH_DEBUG_HWRATE (1 << 18)
86 #define IWH_DEBUG_HTRATE (1 << 19)
87 #define IWH_DEBUG_QOS (1 << 20)
88 /*
89 * if want to see debug message of a given section,
90 * please set this flag to one of above values
91 */
92 uint32_t iwh_dbg_flags = 0;
93 #define IWH_DBG(x) \
94 iwh_dbg x
95 #else
96 #define IWH_DBG(x)
97 #endif
98
99 #define MS(v, f) (((v) & f) >> f##_S)
100
101 static void *iwh_soft_state_p = NULL;
102
103 /*
104 * ucode will be compiled into driver image
105 */
106 static uint8_t iwh_fw_5000_bin[] = {
107 #include "fw-iw/fw_5000/iwh_5000.ucode"
108 };
109
110 static uint8_t iwh_fw_5150_bin[] = {
111 #include "fw-iw/fw_5150/iwh_5150.ucode"
112 };
113
114 /*
115 * DMA attributes for a shared page
116 */
117 static ddi_dma_attr_t sh_dma_attr = {
118 DMA_ATTR_V0, /* version of this structure */
119 0, /* lowest usable address */
120 0xffffffffU, /* highest usable address */
121 0xffffffffU, /* maximum DMAable byte count */
122 0x1000, /* alignment in bytes */
123 0x1000, /* burst sizes (any?) */
124 1, /* minimum transfer */
125 0xffffffffU, /* maximum transfer */
126 0xffffffffU, /* maximum segment length */
127 1, /* maximum number of segments */
128 1, /* granularity */
129 0, /* flags (reserved) */
130 };
131
132 /*
133 * DMA attributes for a keep warm DRAM descriptor
134 */
135 static ddi_dma_attr_t kw_dma_attr = {
136 DMA_ATTR_V0, /* version of this structure */
137 0, /* lowest usable address */
138 0xffffffffU, /* highest usable address */
139 0xffffffffU, /* maximum DMAable byte count */
140 0x1000, /* alignment in bytes */
141 0x1000, /* burst sizes (any?) */
142 1, /* minimum transfer */
143 0xffffffffU, /* maximum transfer */
144 0xffffffffU, /* maximum segment length */
145 1, /* maximum number of segments */
146 1, /* granularity */
147 0, /* flags (reserved) */
148 };
149
150 /*
151 * DMA attributes for a ring descriptor
152 */
153 static ddi_dma_attr_t ring_desc_dma_attr = {
154 DMA_ATTR_V0, /* version of this structure */
155 0, /* lowest usable address */
156 0xffffffffU, /* highest usable address */
157 0xffffffffU, /* maximum DMAable byte count */
158 0x100, /* alignment in bytes */
159 0x100, /* burst sizes (any?) */
160 1, /* minimum transfer */
161 0xffffffffU, /* maximum transfer */
162 0xffffffffU, /* maximum segment length */
163 1, /* maximum number of segments */
164 1, /* granularity */
165 0, /* flags (reserved) */
166 };
167
168 /*
169 * DMA attributes for a cmd
170 */
171 static ddi_dma_attr_t cmd_dma_attr = {
172 DMA_ATTR_V0, /* version of this structure */
173 0, /* lowest usable address */
174 0xffffffffU, /* highest usable address */
175 0xffffffffU, /* maximum DMAable byte count */
176 4, /* alignment in bytes */
177 0x100, /* burst sizes (any?) */
178 1, /* minimum transfer */
179 0xffffffffU, /* maximum transfer */
180 0xffffffffU, /* maximum segment length */
181 1, /* maximum number of segments */
182 1, /* granularity */
183 0, /* flags (reserved) */
184 };
185
186 /*
187 * DMA attributes for a rx buffer
188 */
189 static ddi_dma_attr_t rx_buffer_dma_attr = {
190 DMA_ATTR_V0, /* version of this structure */
191 0, /* lowest usable address */
192 0xffffffffU, /* highest usable address */
193 0xffffffffU, /* maximum DMAable byte count */
194 0x100, /* alignment in bytes */
195 0x100, /* burst sizes (any?) */
196 1, /* minimum transfer */
197 0xffffffffU, /* maximum transfer */
198 0xffffffffU, /* maximum segment length */
199 1, /* maximum number of segments */
200 1, /* granularity */
201 0, /* flags (reserved) */
202 };
203
204 /*
205 * DMA attributes for a tx buffer.
206 * the maximum number of segments is 4 for the hardware.
207 * now all the wifi drivers put the whole frame in a single
208 * descriptor, so we define the maximum number of segments 1,
209 * just the same as the rx_buffer. we consider leverage the HW
210 * ability in the future, that is why we don't define rx and tx
211 * buffer_dma_attr as the same.
212 */
213 static ddi_dma_attr_t tx_buffer_dma_attr = {
214 DMA_ATTR_V0, /* version of this structure */
215 0, /* lowest usable address */
216 0xffffffffU, /* highest usable address */
217 0xffffffffU, /* maximum DMAable byte count */
218 4, /* alignment in bytes */
219 0x100, /* burst sizes (any?) */
220 1, /* minimum transfer */
221 0xffffffffU, /* maximum transfer */
222 0xffffffffU, /* maximum segment length */
223 1, /* maximum number of segments */
224 1, /* granularity */
225 0, /* flags (reserved) */
226 };
227
228 /*
229 * DMA attributes for text and data part in the firmware
230 */
231 static ddi_dma_attr_t fw_dma_attr = {
232 DMA_ATTR_V0, /* version of this structure */
233 0, /* lowest usable address */
234 0xffffffffU, /* highest usable address */
235 0x7fffffff, /* maximum DMAable byte count */
236 0x10, /* alignment in bytes */
237 0x100, /* burst sizes (any?) */
238 1, /* minimum transfer */
239 0xffffffffU, /* maximum transfer */
240 0xffffffffU, /* maximum segment length */
241 1, /* maximum number of segments */
242 1, /* granularity */
243 0, /* flags (reserved) */
244 };
245
246 /*
247 * regs access attributes
248 */
249 static ddi_device_acc_attr_t iwh_reg_accattr = {
250 DDI_DEVICE_ATTR_V0,
251 DDI_STRUCTURE_LE_ACC,
252 DDI_STRICTORDER_ACC,
253 DDI_DEFAULT_ACC
254 };
255
256 /*
257 * DMA access attributes for descriptor
258 */
259 static ddi_device_acc_attr_t iwh_dma_descattr = {
260 DDI_DEVICE_ATTR_V0,
261 DDI_STRUCTURE_LE_ACC,
262 DDI_STRICTORDER_ACC,
263 DDI_DEFAULT_ACC
264 };
265
266 /*
267 * DMA access attributes
268 */
269 static ddi_device_acc_attr_t iwh_dma_accattr = {
270 DDI_DEVICE_ATTR_V0,
271 DDI_NEVERSWAP_ACC,
272 DDI_STRICTORDER_ACC,
273 DDI_DEFAULT_ACC
274 };
275
276 static int iwh_ring_init(iwh_sc_t *);
277 static void iwh_ring_free(iwh_sc_t *);
278 static int iwh_alloc_shared(iwh_sc_t *);
279 static void iwh_free_shared(iwh_sc_t *);
280 static int iwh_alloc_kw(iwh_sc_t *);
281 static void iwh_free_kw(iwh_sc_t *);
282 static int iwh_alloc_fw_dma(iwh_sc_t *);
283 static void iwh_free_fw_dma(iwh_sc_t *);
284 static int iwh_alloc_rx_ring(iwh_sc_t *);
285 static void iwh_reset_rx_ring(iwh_sc_t *);
286 static void iwh_free_rx_ring(iwh_sc_t *);
287 static int iwh_alloc_tx_ring(iwh_sc_t *, iwh_tx_ring_t *,
288 int, int);
289 static void iwh_reset_tx_ring(iwh_sc_t *, iwh_tx_ring_t *);
290 static void iwh_free_tx_ring(iwh_tx_ring_t *);
291 static ieee80211_node_t *iwh_node_alloc(ieee80211com_t *);
292 static void iwh_node_free(ieee80211_node_t *);
293 static int iwh_newstate(ieee80211com_t *, enum ieee80211_state, int);
294 static void iwh_mac_access_enter(iwh_sc_t *);
295 static void iwh_mac_access_exit(iwh_sc_t *);
296 static uint32_t iwh_reg_read(iwh_sc_t *, uint32_t);
297 static void iwh_reg_write(iwh_sc_t *, uint32_t, uint32_t);
298 static int iwh_load_init_firmware(iwh_sc_t *);
299 static int iwh_load_run_firmware(iwh_sc_t *);
300 static void iwh_tx_intr(iwh_sc_t *, iwh_rx_desc_t *);
301 static void iwh_cmd_intr(iwh_sc_t *, iwh_rx_desc_t *);
302 static uint_t iwh_intr(caddr_t, caddr_t);
303 static int iwh_eep_load(iwh_sc_t *);
304 static void iwh_get_mac_from_eep(iwh_sc_t *);
305 static int iwh_eep_sem_down(iwh_sc_t *);
306 static void iwh_eep_sem_up(iwh_sc_t *);
307 static uint_t iwh_rx_softintr(caddr_t, caddr_t);
308 static uint8_t iwh_rate_to_plcp(int);
309 static int iwh_cmd(iwh_sc_t *, int, const void *, int, int);
310 static void iwh_set_led(iwh_sc_t *, uint8_t, uint8_t, uint8_t);
311 static int iwh_hw_set_before_auth(iwh_sc_t *);
312 static int iwh_scan(iwh_sc_t *);
313 static int iwh_config(iwh_sc_t *);
314 static void iwh_stop_master(iwh_sc_t *);
315 static int iwh_power_up(iwh_sc_t *);
316 static int iwh_preinit(iwh_sc_t *);
317 static int iwh_init(iwh_sc_t *);
318 static void iwh_stop(iwh_sc_t *);
319 static int iwh_quiesce(dev_info_t *t);
320 static void iwh_amrr_init(iwh_amrr_t *);
321 static void iwh_amrr_timeout(iwh_sc_t *);
322 static void iwh_amrr_ratectl(void *, ieee80211_node_t *);
323 static void iwh_ucode_alive(iwh_sc_t *, iwh_rx_desc_t *);
324 static void iwh_rx_phy_intr(iwh_sc_t *, iwh_rx_desc_t *);
325 static void iwh_rx_mpdu_intr(iwh_sc_t *, iwh_rx_desc_t *);
326 static void iwh_release_calib_buffer(iwh_sc_t *);
327 static int iwh_init_common(iwh_sc_t *);
328 static uint8_t *iwh_eep_addr_trans(iwh_sc_t *, uint32_t);
329 static int iwh_put_seg_fw(iwh_sc_t *, uint32_t, uint32_t, uint32_t);
330 static int iwh_alive_common(iwh_sc_t *);
331 static void iwh_save_calib_result(iwh_sc_t *, iwh_rx_desc_t *);
332 static int iwh_tx_power_table(iwh_sc_t *, int);
333 static int iwh_attach(dev_info_t *, ddi_attach_cmd_t);
334 static int iwh_detach(dev_info_t *, ddi_detach_cmd_t);
335 static void iwh_destroy_locks(iwh_sc_t *);
336 static int iwh_send(ieee80211com_t *, mblk_t *, uint8_t);
337 static void iwh_thread(iwh_sc_t *);
338 static int iwh_run_state_config(iwh_sc_t *);
339 static int iwh_fast_recover(iwh_sc_t *);
340 static int iwh_wme_update(ieee80211com_t *);
341 static int iwh_qosparam_to_hw(iwh_sc_t *, int);
342 static int iwh_wme_to_qos_ac(int);
343 static uint16_t iwh_cw_e_to_cw(uint8_t);
344 static int iwh_wmeparam_check(struct wmeParams *);
345 static inline int iwh_wme_tid_qos_ac(int);
346 static inline int iwh_qos_ac_to_txq(int);
347 static int iwh_wme_tid_to_txq(int);
348 static void iwh_init_ht_conf(iwh_sc_t *);
349 static void iwh_overwrite_11n_rateset(iwh_sc_t *);
350 static void iwh_overwrite_ic_default(iwh_sc_t *);
351 static void iwh_config_rxon_chain(iwh_sc_t *);
352 static int iwh_add_ap_sta(iwh_sc_t *);
353 static int iwh_ap_lq(iwh_sc_t *);
354 static void iwh_recv_action(struct ieee80211_node *,
355 const uint8_t *, const uint8_t *);
356 static int iwh_send_action(struct ieee80211_node *,
357 int, int, uint16_t[4]);
358 static int iwh_is_max_rate(ieee80211_node_t *);
359 static int iwh_is_min_rate(ieee80211_node_t *);
360 static void iwh_increase_rate(ieee80211_node_t *);
361 static void iwh_decrease_rate(ieee80211_node_t *);
362 static int iwh_alloc_dma_mem(iwh_sc_t *, size_t,
363 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
364 uint_t, iwh_dma_t *);
365 static void iwh_free_dma_mem(iwh_dma_t *);
366 static int iwh_reset_hw(iwh_sc_t *);
367
368 /*
369 * GLD specific operations
370 */
371 static int iwh_m_stat(void *, uint_t, uint64_t *);
372 static int iwh_m_start(void *);
373 static void iwh_m_stop(void *);
374 static int iwh_m_unicst(void *, const uint8_t *);
375 static int iwh_m_multicst(void *, boolean_t, const uint8_t *);
376 static int iwh_m_promisc(void *, boolean_t);
377 static mblk_t *iwh_m_tx(void *, mblk_t *);
378 static void iwh_m_ioctl(void *, queue_t *, mblk_t *);
379 static int iwh_m_setprop(void *arg, const char *pr_name,
380 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
381 static int iwh_m_getprop(void *arg, const char *pr_name,
382 mac_prop_id_t wldp_pr_num, uint_t wldp_length,
383 void *wldp_buf);
384 static void iwh_m_propinfo(void *arg, const char *pr_name,
385 mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
386
387 /*
388 * Supported rates for 802.11b/g modes (in 500Kbps unit).
389 */
390 static const struct ieee80211_rateset iwh_rateset_11b =
391 { 4, { 2, 4, 11, 22 } };
392
393 static const struct ieee80211_rateset iwh_rateset_11g =
394 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
395
396 /*
397 * Default 11n reates supported by this station.
398 */
399 extern struct ieee80211_htrateset ieee80211_rateset_11n;
400
401 /*
402 * For mfthread only
403 */
404 extern pri_t minclsyspri;
405
406 #define DRV_NAME_SP "iwh"
407
408 /*
409 * Module Loading Data & Entry Points
410 */
411 DDI_DEFINE_STREAM_OPS(iwh_devops, nulldev, nulldev, iwh_attach,
412 iwh_detach, nodev, NULL, D_MP, NULL, iwh_quiesce);
413
414 static struct modldrv iwh_modldrv = {
415 &mod_driverops,
416 "Intel(R) ShirleyPeak/EchoPeak driver(N)",
417 &iwh_devops
418 };
419
420 static struct modlinkage iwh_modlinkage = {
421 MODREV_1,
422 &iwh_modldrv,
423 NULL
424 };
425
426 int
_init(void)427 _init(void)
428 {
429 int status;
430
431 status = ddi_soft_state_init(&iwh_soft_state_p,
432 sizeof (iwh_sc_t), 1);
433 if (status != DDI_SUCCESS) {
434 return (status);
435 }
436
437 mac_init_ops(&iwh_devops, DRV_NAME_SP);
438 status = mod_install(&iwh_modlinkage);
439 if (status != DDI_SUCCESS) {
440 mac_fini_ops(&iwh_devops);
441 ddi_soft_state_fini(&iwh_soft_state_p);
442 }
443
444 return (status);
445 }
446
447 int
_fini(void)448 _fini(void)
449 {
450 int status;
451
452 status = mod_remove(&iwh_modlinkage);
453 if (DDI_SUCCESS == status) {
454 mac_fini_ops(&iwh_devops);
455 ddi_soft_state_fini(&iwh_soft_state_p);
456 }
457
458 return (status);
459 }
460
461 int
_info(struct modinfo * mip)462 _info(struct modinfo *mip)
463 {
464 return (mod_info(&iwh_modlinkage, mip));
465 }
466
467 /*
468 * Mac Call Back entries
469 */
470 mac_callbacks_t iwh_m_callbacks = {
471 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
472 iwh_m_stat,
473 iwh_m_start,
474 iwh_m_stop,
475 iwh_m_promisc,
476 iwh_m_multicst,
477 iwh_m_unicst,
478 iwh_m_tx,
479 NULL,
480 iwh_m_ioctl,
481 NULL,
482 NULL,
483 NULL,
484 iwh_m_setprop,
485 iwh_m_getprop,
486 iwh_m_propinfo
487 };
488
489 #ifdef DEBUG
490 void
iwh_dbg(uint32_t flags,const char * fmt,...)491 iwh_dbg(uint32_t flags, const char *fmt, ...)
492 {
493 va_list ap;
494
495 if (flags & iwh_dbg_flags) {
496 va_start(ap, fmt);
497 vcmn_err(CE_NOTE, fmt, ap);
498 va_end(ap);
499 }
500 }
501 #endif /* DEBUG */
502
503 /*
504 * device operations
505 */
506 int
iwh_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)507 iwh_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
508 {
509 iwh_sc_t *sc;
510 ieee80211com_t *ic;
511 int instance, i;
512 char strbuf[32];
513 wifi_data_t wd = { 0 };
514 mac_register_t *macp;
515 int intr_type;
516 int intr_count;
517 int intr_actual;
518 int err = DDI_FAILURE;
519
520 switch (cmd) {
521 case DDI_ATTACH:
522 break;
523
524 case DDI_RESUME:
525 instance = ddi_get_instance(dip);
526 sc = ddi_get_soft_state(iwh_soft_state_p,
527 instance);
528 ASSERT(sc != NULL);
529
530 if (sc->sc_flags & IWH_F_RUNNING) {
531 (void) iwh_init(sc);
532 }
533
534 atomic_and_32(&sc->sc_flags, ~IWH_F_SUSPEND);
535
536 IWH_DBG((IWH_DEBUG_RESUME, "iwh_attach(): "
537 "resume\n"));
538 return (DDI_SUCCESS);
539
540 default:
541 goto attach_fail1;
542 }
543
544 instance = ddi_get_instance(dip);
545 err = ddi_soft_state_zalloc(iwh_soft_state_p, instance);
546 if (err != DDI_SUCCESS) {
547 cmn_err(CE_WARN, "iwh_attach(): "
548 "failed to allocate soft state\n");
549 goto attach_fail1;
550 }
551
552 sc = ddi_get_soft_state(iwh_soft_state_p, instance);
553 ASSERT(sc != NULL);
554
555 sc->sc_dip = dip;
556
557 /*
558 * map configure space
559 */
560 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
561 &iwh_reg_accattr, &sc->sc_cfg_handle);
562 if (err != DDI_SUCCESS) {
563 cmn_err(CE_WARN, "iwh_attach(): "
564 "failed to map config spaces regs\n");
565 goto attach_fail2;
566 }
567
568 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
569 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
570 if ((sc->sc_dev_id != 0x4232) &&
571 (sc->sc_dev_id != 0x4235) &&
572 (sc->sc_dev_id != 0x4236) &&
573 (sc->sc_dev_id != 0x4237) &&
574 (sc->sc_dev_id != 0x423a) &&
575 (sc->sc_dev_id != 0x423b) &&
576 (sc->sc_dev_id != 0x423c) &&
577 (sc->sc_dev_id != 0x423d)) {
578 cmn_err(CE_WARN, "iwh_attach(): "
579 "Do not support this device\n");
580 goto attach_fail3;
581 }
582
583 iwh_init_ht_conf(sc);
584 iwh_overwrite_11n_rateset(sc);
585
586 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
587 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
588
589 /*
590 * keep from disturbing C3 state of CPU
591 */
592 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
593 PCI_CFG_RETRY_TIMEOUT), 0);
594
595 /*
596 * determine the size of buffer for frame and command to ucode
597 */
598 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
599 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
600 if (!sc->sc_clsz) {
601 sc->sc_clsz = 16;
602 }
603 sc->sc_clsz = (sc->sc_clsz << 2);
604
605 sc->sc_dmabuf_sz = roundup(0x2000 + sizeof (struct ieee80211_frame) +
606 IEEE80211_MTU + IEEE80211_CRC_LEN +
607 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
608 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
609
610 /*
611 * Map operating registers
612 */
613 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
614 0, 0, &iwh_reg_accattr, &sc->sc_handle);
615 if (err != DDI_SUCCESS) {
616 cmn_err(CE_WARN, "iwh_attach(): "
617 "failed to map device regs\n");
618 goto attach_fail3;
619 }
620
621 /*
622 * this is used to differentiate type of hardware
623 */
624 sc->sc_hw_rev = IWH_READ(sc, CSR_HW_REV);
625
626 err = ddi_intr_get_supported_types(dip, &intr_type);
627 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
628 cmn_err(CE_WARN, "iwh_attach(): "
629 "fixed type interrupt is not supported\n");
630 goto attach_fail4;
631 }
632
633 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
634 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
635 cmn_err(CE_WARN, "iwh_attach(): "
636 "no fixed interrupts\n");
637 goto attach_fail4;
638 }
639
640 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
641
642 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
643 intr_count, &intr_actual, 0);
644 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
645 cmn_err(CE_WARN, "iwh_attach(): "
646 "ddi_intr_alloc() failed 0x%x\n", err);
647 goto attach_fail5;
648 }
649
650 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
651 if (err != DDI_SUCCESS) {
652 cmn_err(CE_WARN, "iwh_attach(): "
653 "ddi_intr_get_pri() failed 0x%x\n", err);
654 goto attach_fail6;
655 }
656
657 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
658 DDI_INTR_PRI(sc->sc_intr_pri));
659 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
660 DDI_INTR_PRI(sc->sc_intr_pri));
661 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
662 DDI_INTR_PRI(sc->sc_intr_pri));
663
664 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
665 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
666 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
667
668 /*
669 * initialize the mfthread
670 */
671 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
672 sc->sc_mf_thread = NULL;
673 sc->sc_mf_thread_switch = 0;
674
675 /*
676 * Allocate shared buffer for communication between driver and ucode.
677 */
678 err = iwh_alloc_shared(sc);
679 if (err != DDI_SUCCESS) {
680 cmn_err(CE_WARN, "iwh_attach(): "
681 "failed to allocate shared page\n");
682 goto attach_fail7;
683 }
684
685 (void) memset(sc->sc_shared, 0, sizeof (iwh_shared_t));
686
687 /*
688 * Allocate keep warm page.
689 */
690 err = iwh_alloc_kw(sc);
691 if (err != DDI_SUCCESS) {
692 cmn_err(CE_WARN, "iwh_attach(): "
693 "failed to allocate keep warm page\n");
694 goto attach_fail8;
695 }
696
697 err = iwh_reset_hw(sc);
698 if (err != IWH_SUCCESS) {
699 cmn_err(CE_WARN, "iwh_attach(): "
700 "failed to reset hardware\n");
701 goto attach_fail9;
702 }
703
704 /*
705 * Do some necessary hardware initializations.
706 */
707 err = iwh_preinit(sc);
708 if (err != IWH_SUCCESS) {
709 cmn_err(CE_WARN, "iwh_attach(): "
710 "failed to initialize hardware\n");
711 goto attach_fail9;
712 }
713
714 /*
715 * get hardware configurations from eeprom
716 */
717 err = iwh_eep_load(sc);
718 if (err != IWH_SUCCESS) {
719 cmn_err(CE_WARN, "iwh_attach(): "
720 "failed to load eeprom\n");
721 goto attach_fail9;
722 }
723
724 if (IWH_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) {
725 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_attach(): "
726 "unsupported eeprom detected\n"));
727 goto attach_fail9;
728 }
729
730 /*
731 * get MAC address of this chipset
732 */
733 iwh_get_mac_from_eep(sc);
734
735 /*
736 * calibration information from EEPROM
737 */
738 sc->sc_eep_calib = (struct iwh_eep_calibration *)
739 iwh_eep_addr_trans(sc, EEP_CALIBRATION);
740
741 /*
742 * initialize TX and RX ring buffers
743 */
744 err = iwh_ring_init(sc);
745 if (err != DDI_SUCCESS) {
746 cmn_err(CE_WARN, "iwh_attach(): "
747 "failed to allocate and initialize ring\n");
748 goto attach_fail9;
749 }
750
751 if ((0x423c == sc->sc_dev_id) || (0x423d == sc->sc_dev_id)) {
752 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5150_bin;
753 } else {
754 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5000_bin;
755 }
756
757 /*
758 * copy ucode to dma buffer
759 */
760 err = iwh_alloc_fw_dma(sc);
761 if (err != DDI_SUCCESS) {
762 cmn_err(CE_WARN, "iwh_attach(): "
763 "failed to allocate firmware dma\n");
764 goto attach_fail10;
765 }
766
767 /*
768 * Initialize the wifi part, which will be used by
769 * 802.11 module
770 */
771 ic = &sc->sc_ic;
772 ic->ic_phytype = IEEE80211_T_HT;
773 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
774 ic->ic_state = IEEE80211_S_INIT;
775 ic->ic_maxrssi = 100; /* experimental number */
776 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
777 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
778
779 /*
780 * Support WPA/WPA2
781 */
782 ic->ic_caps |= IEEE80211_C_WPA;
783
784 /*
785 * Support QoS/WME
786 */
787 ic->ic_caps |= IEEE80211_C_WME;
788 ic->ic_wme.wme_update = iwh_wme_update;
789
790 /*
791 * Support 802.11n/HT
792 */
793 if (sc->sc_ht_conf.ht_support) {
794 ic->ic_htcaps = IEEE80211_HTC_HT |
795 IEEE80211_HTC_AMSDU;
796 ic->ic_htcaps |= IEEE80211_HTCAP_MAXAMSDU_7935;
797 }
798
799 /*
800 * set supported .11b and .11g rates
801 */
802 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwh_rateset_11b;
803 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwh_rateset_11g;
804
805 /*
806 * set supported .11b and .11g channels (1 through 11)
807 */
808 for (i = 1; i <= 11; i++) {
809 ic->ic_sup_channels[i].ich_freq =
810 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
811 ic->ic_sup_channels[i].ich_flags =
812 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
813 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
814 IEEE80211_CHAN_PASSIVE;
815
816 if (sc->sc_ht_conf.cap & HT_CAP_SUP_WIDTH) {
817 ic->ic_sup_channels[i].ich_flags |=
818 IEEE80211_CHAN_HT40;
819 } else {
820 ic->ic_sup_channels[i].ich_flags |=
821 IEEE80211_CHAN_HT20;
822 }
823 }
824
825 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
826 ic->ic_xmit = iwh_send;
827
828 /*
829 * attach to 802.11 module
830 */
831 ieee80211_attach(ic);
832
833 /*
834 * different instance has different WPA door
835 */
836 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
837 ddi_driver_name(dip),
838 ddi_get_instance(dip));
839
840 /*
841 * Overwrite 80211 default configurations.
842 */
843 iwh_overwrite_ic_default(sc);
844
845 /*
846 * initialize 802.11 module
847 */
848 ieee80211_media_init(ic);
849
850 /*
851 * initialize default tx key
852 */
853 ic->ic_def_txkey = 0;
854
855 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
856 iwh_rx_softintr, (caddr_t)sc);
857 if (err != DDI_SUCCESS) {
858 cmn_err(CE_WARN, "iwh_attach(): "
859 "add soft interrupt failed\n");
860 goto attach_fail12;
861 }
862
863 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwh_intr,
864 (caddr_t)sc, NULL);
865 if (err != DDI_SUCCESS) {
866 cmn_err(CE_WARN, "iwh_attach(): "
867 "ddi_intr_add_handle() failed\n");
868 goto attach_fail13;
869 }
870
871 err = ddi_intr_enable(sc->sc_intr_htable[0]);
872 if (err != DDI_SUCCESS) {
873 cmn_err(CE_WARN, "iwh_attach(): "
874 "ddi_intr_enable() failed\n");
875 goto attach_fail14;
876 }
877
878 /*
879 * Initialize pointer to device specific functions
880 */
881 wd.wd_secalloc = WIFI_SEC_NONE;
882 wd.wd_opmode = ic->ic_opmode;
883 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
884
885 /*
886 * create relation to GLD
887 */
888 macp = mac_alloc(MAC_VERSION);
889 if (NULL == macp) {
890 cmn_err(CE_WARN, "iwh_attach(): "
891 "failed to do mac_alloc()\n");
892 goto attach_fail15;
893 }
894
895 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
896 macp->m_driver = sc;
897 macp->m_dip = dip;
898 macp->m_src_addr = ic->ic_macaddr;
899 macp->m_callbacks = &iwh_m_callbacks;
900 macp->m_min_sdu = 0;
901 macp->m_max_sdu = IEEE80211_MTU;
902 macp->m_pdata = &wd;
903 macp->m_pdata_size = sizeof (wd);
904
905 /*
906 * Register the macp to mac
907 */
908 err = mac_register(macp, &ic->ic_mach);
909 mac_free(macp);
910 if (err != DDI_SUCCESS) {
911 cmn_err(CE_WARN, "iwh_attach(): "
912 "failed to do mac_register()\n");
913 goto attach_fail15;
914 }
915
916 /*
917 * Create minor node of type DDI_NT_NET_WIFI
918 */
919 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
920 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
921 instance + 1, DDI_NT_NET_WIFI, 0);
922 if (err != DDI_SUCCESS) {
923 cmn_err(CE_WARN, "iwh_attach(): "
924 "failed to do ddi_create_minor_node()\n");
925 }
926
927 /*
928 * Notify link is down now
929 */
930 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
931
932 /*
933 * create the mf thread to handle the link status,
934 * recovery fatal error, etc.
935 */
936 sc->sc_mf_thread_switch = 1;
937 if (NULL == sc->sc_mf_thread) {
938 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
939 iwh_thread, sc, 0, &p0, TS_RUN, minclsyspri);
940 }
941
942 atomic_or_32(&sc->sc_flags, IWH_F_ATTACHED);
943
944 return (DDI_SUCCESS);
945
946 attach_fail15:
947 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
948
949 attach_fail14:
950 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
951
952 attach_fail13:
953 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
954 sc->sc_soft_hdl = NULL;
955
956 attach_fail12:
957 ieee80211_detach(ic);
958
959 attach_fail11:
960 iwh_free_fw_dma(sc);
961
962 attach_fail10:
963 iwh_ring_free(sc);
964
965 attach_fail9:
966 iwh_free_kw(sc);
967
968 attach_fail8:
969 iwh_free_shared(sc);
970
971 attach_fail7:
972 iwh_destroy_locks(sc);
973
974 attach_fail6:
975 (void) ddi_intr_free(sc->sc_intr_htable[0]);
976
977 attach_fail5:
978 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
979
980 attach_fail4:
981 ddi_regs_map_free(&sc->sc_handle);
982
983 attach_fail3:
984 ddi_regs_map_free(&sc->sc_cfg_handle);
985
986 attach_fail2:
987 ddi_soft_state_free(iwh_soft_state_p, instance);
988
989 attach_fail1:
990 return (DDI_FAILURE);
991 }
992
993 int
iwh_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)994 iwh_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
995 {
996 iwh_sc_t *sc;
997 ieee80211com_t *ic;
998 int err;
999
1000 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
1001 ASSERT(sc != NULL);
1002 ic = &sc->sc_ic;
1003
1004 switch (cmd) {
1005 case DDI_DETACH:
1006 break;
1007
1008 case DDI_SUSPEND:
1009 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
1010 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
1011
1012 atomic_or_32(&sc->sc_flags, IWH_F_SUSPEND);
1013
1014 if (sc->sc_flags & IWH_F_RUNNING) {
1015 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1016 iwh_stop(sc);
1017 }
1018
1019 IWH_DBG((IWH_DEBUG_RESUME, "iwh_detach(): "
1020 "suspend\n"));
1021 return (DDI_SUCCESS);
1022
1023 default:
1024 return (DDI_FAILURE);
1025 }
1026
1027 if (!(sc->sc_flags & IWH_F_ATTACHED)) {
1028 return (DDI_FAILURE);
1029 }
1030
1031 /*
1032 * Destroy the mf_thread
1033 */
1034 sc->sc_mf_thread_switch = 0;
1035
1036 mutex_enter(&sc->sc_mt_lock);
1037 while (sc->sc_mf_thread != NULL) {
1038 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
1039 break;
1040 }
1041 }
1042 mutex_exit(&sc->sc_mt_lock);
1043
1044 err = mac_disable(sc->sc_ic.ic_mach);
1045 if (err != DDI_SUCCESS) {
1046 return (err);
1047 }
1048
1049 /*
1050 * stop chipset
1051 */
1052 iwh_stop(sc);
1053
1054 DELAY(500000);
1055
1056 /*
1057 * release buffer for calibration
1058 */
1059 iwh_release_calib_buffer(sc);
1060
1061 /*
1062 * Unregiste from GLD
1063 */
1064 (void) mac_unregister(sc->sc_ic.ic_mach);
1065
1066 mutex_enter(&sc->sc_glock);
1067 iwh_free_fw_dma(sc);
1068 iwh_ring_free(sc);
1069 iwh_free_kw(sc);
1070 iwh_free_shared(sc);
1071 mutex_exit(&sc->sc_glock);
1072
1073 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
1074 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
1075 (void) ddi_intr_free(sc->sc_intr_htable[0]);
1076 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1077
1078 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
1079 sc->sc_soft_hdl = NULL;
1080
1081 /*
1082 * detach from 80211 module
1083 */
1084 ieee80211_detach(&sc->sc_ic);
1085
1086 iwh_destroy_locks(sc);
1087
1088 ddi_regs_map_free(&sc->sc_handle);
1089 ddi_regs_map_free(&sc->sc_cfg_handle);
1090 ddi_remove_minor_node(dip, NULL);
1091 ddi_soft_state_free(iwh_soft_state_p, ddi_get_instance(dip));
1092
1093 return (DDI_SUCCESS);
1094 }
1095
1096 /*
1097 * destroy all locks
1098 */
1099 static void
iwh_destroy_locks(iwh_sc_t * sc)1100 iwh_destroy_locks(iwh_sc_t *sc)
1101 {
1102 cv_destroy(&sc->sc_mt_cv);
1103 cv_destroy(&sc->sc_cmd_cv);
1104 cv_destroy(&sc->sc_put_seg_cv);
1105 cv_destroy(&sc->sc_ucode_cv);
1106 mutex_destroy(&sc->sc_mt_lock);
1107 mutex_destroy(&sc->sc_tx_lock);
1108 mutex_destroy(&sc->sc_glock);
1109 }
1110
1111 /*
1112 * Allocate an area of memory and a DMA handle for accessing it
1113 */
1114 static int
iwh_alloc_dma_mem(iwh_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwh_dma_t * dma_p)1115 iwh_alloc_dma_mem(iwh_sc_t *sc, size_t memsize,
1116 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1117 uint_t dma_flags, iwh_dma_t *dma_p)
1118 {
1119 caddr_t vaddr;
1120 int err = DDI_FAILURE;
1121
1122 /*
1123 * Allocate handle
1124 */
1125 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1126 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1127 if (err != DDI_SUCCESS) {
1128 dma_p->dma_hdl = NULL;
1129 return (DDI_FAILURE);
1130 }
1131
1132 /*
1133 * Allocate memory
1134 */
1135 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1136 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1137 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1138 if (err != DDI_SUCCESS) {
1139 ddi_dma_free_handle(&dma_p->dma_hdl);
1140 dma_p->dma_hdl = NULL;
1141 dma_p->acc_hdl = NULL;
1142 return (DDI_FAILURE);
1143 }
1144
1145 /*
1146 * Bind the two together
1147 */
1148 dma_p->mem_va = vaddr;
1149 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1150 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1151 &dma_p->cookie, &dma_p->ncookies);
1152 if (err != DDI_DMA_MAPPED) {
1153 ddi_dma_mem_free(&dma_p->acc_hdl);
1154 ddi_dma_free_handle(&dma_p->dma_hdl);
1155 dma_p->acc_hdl = NULL;
1156 dma_p->dma_hdl = NULL;
1157 return (DDI_FAILURE);
1158 }
1159
1160 dma_p->nslots = ~0U;
1161 dma_p->size = ~0U;
1162 dma_p->token = ~0U;
1163 dma_p->offset = 0;
1164 return (DDI_SUCCESS);
1165 }
1166
1167 /*
1168 * Free one allocated area of DMAable memory
1169 */
1170 static void
iwh_free_dma_mem(iwh_dma_t * dma_p)1171 iwh_free_dma_mem(iwh_dma_t *dma_p)
1172 {
1173 if (dma_p->dma_hdl != NULL) {
1174 if (dma_p->ncookies) {
1175 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1176 dma_p->ncookies = 0;
1177 }
1178 ddi_dma_free_handle(&dma_p->dma_hdl);
1179 dma_p->dma_hdl = NULL;
1180 }
1181
1182 if (dma_p->acc_hdl != NULL) {
1183 ddi_dma_mem_free(&dma_p->acc_hdl);
1184 dma_p->acc_hdl = NULL;
1185 }
1186 }
1187
1188 /*
1189 * copy ucode into dma buffers
1190 */
1191 static int
iwh_alloc_fw_dma(iwh_sc_t * sc)1192 iwh_alloc_fw_dma(iwh_sc_t *sc)
1193 {
1194 int err = DDI_FAILURE;
1195 iwh_dma_t *dma_p;
1196 char *t;
1197
1198 /*
1199 * firmware image layout:
1200 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1201 */
1202
1203 /*
1204 * copy text of runtime ucode
1205 */
1206 t = (char *)(sc->sc_hdr + 1);
1207 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1208 &fw_dma_attr, &iwh_dma_accattr,
1209 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1210 &sc->sc_dma_fw_text);
1211 if (err != DDI_SUCCESS) {
1212 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1213 "failed to allocate text dma memory.\n");
1214 goto fail;
1215 }
1216
1217 dma_p = &sc->sc_dma_fw_text;
1218
1219 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1220 "text[ncookies:%d addr:%lx size:%lx]\n",
1221 dma_p->ncookies, dma_p->cookie.dmac_address,
1222 dma_p->cookie.dmac_size));
1223
1224 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->textsz));
1225
1226 /*
1227 * copy data and bak-data of runtime ucode
1228 */
1229 t += LE_32(sc->sc_hdr->textsz);
1230 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1231 &fw_dma_attr, &iwh_dma_accattr,
1232 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1233 &sc->sc_dma_fw_data);
1234 if (err != DDI_SUCCESS) {
1235 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1236 "failed to allocate data dma memory\n");
1237 goto fail;
1238 }
1239
1240 dma_p = &sc->sc_dma_fw_data;
1241
1242 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1243 "data[ncookies:%d addr:%lx size:%lx]\n",
1244 dma_p->ncookies, dma_p->cookie.dmac_address,
1245 dma_p->cookie.dmac_size));
1246
1247 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1248
1249 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1250 &fw_dma_attr, &iwh_dma_accattr,
1251 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1252 &sc->sc_dma_fw_data_bak);
1253 if (err != DDI_SUCCESS) {
1254 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1255 "failed to allocate data bakup dma memory\n");
1256 goto fail;
1257 }
1258
1259 dma_p = &sc->sc_dma_fw_data_bak;
1260
1261 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1262 "data_bak[ncookies:%d addr:%lx "
1263 "size:%lx]\n",
1264 dma_p->ncookies, dma_p->cookie.dmac_address,
1265 dma_p->cookie.dmac_size));
1266
1267 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1268
1269 /*
1270 * copy text of init ucode
1271 */
1272 t += LE_32(sc->sc_hdr->datasz);
1273 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1274 &fw_dma_attr, &iwh_dma_accattr,
1275 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1276 &sc->sc_dma_fw_init_text);
1277 if (err != DDI_SUCCESS) {
1278 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1279 "failed to allocate init text dma memory\n");
1280 goto fail;
1281 }
1282
1283 dma_p = &sc->sc_dma_fw_init_text;
1284
1285 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1286 "init_text[ncookies:%d addr:%lx "
1287 "size:%lx]\n",
1288 dma_p->ncookies, dma_p->cookie.dmac_address,
1289 dma_p->cookie.dmac_size));
1290
1291 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_textsz));
1292
1293 /*
1294 * copy data of init ucode
1295 */
1296 t += LE_32(sc->sc_hdr->init_textsz);
1297 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1298 &fw_dma_attr, &iwh_dma_accattr,
1299 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1300 &sc->sc_dma_fw_init_data);
1301 if (err != DDI_SUCCESS) {
1302 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1303 "failed to allocate init data dma memory\n");
1304 goto fail;
1305 }
1306
1307 dma_p = &sc->sc_dma_fw_init_data;
1308
1309 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1310 "init_data[ncookies:%d addr:%lx "
1311 "size:%lx]\n",
1312 dma_p->ncookies, dma_p->cookie.dmac_address,
1313 dma_p->cookie.dmac_size));
1314
1315 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_datasz));
1316
1317 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1318
1319 fail:
1320 return (err);
1321 }
1322
1323 static void
iwh_free_fw_dma(iwh_sc_t * sc)1324 iwh_free_fw_dma(iwh_sc_t *sc)
1325 {
1326 iwh_free_dma_mem(&sc->sc_dma_fw_text);
1327 iwh_free_dma_mem(&sc->sc_dma_fw_data);
1328 iwh_free_dma_mem(&sc->sc_dma_fw_data_bak);
1329 iwh_free_dma_mem(&sc->sc_dma_fw_init_text);
1330 iwh_free_dma_mem(&sc->sc_dma_fw_init_data);
1331 }
1332
1333 /*
1334 * Allocate a shared buffer between host and NIC.
1335 */
1336 static int
iwh_alloc_shared(iwh_sc_t * sc)1337 iwh_alloc_shared(iwh_sc_t *sc)
1338 {
1339 #ifdef DEBUG
1340 iwh_dma_t *dma_p;
1341 #endif
1342 int err = DDI_FAILURE;
1343
1344 /*
1345 * must be aligned on a 4K-page boundary
1346 */
1347 err = iwh_alloc_dma_mem(sc, sizeof (iwh_shared_t),
1348 &sh_dma_attr, &iwh_dma_descattr,
1349 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1350 &sc->sc_dma_sh);
1351 if (err != DDI_SUCCESS) {
1352 goto fail;
1353 }
1354
1355 sc->sc_shared = (iwh_shared_t *)sc->sc_dma_sh.mem_va;
1356
1357 #ifdef DEBUG
1358 dma_p = &sc->sc_dma_sh;
1359 #endif
1360 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_shared(): "
1361 "sh[ncookies:%d addr:%lx size:%lx]\n",
1362 dma_p->ncookies, dma_p->cookie.dmac_address,
1363 dma_p->cookie.dmac_size));
1364
1365 return (err);
1366
1367 fail:
1368 iwh_free_shared(sc);
1369 return (err);
1370 }
1371
1372 static void
iwh_free_shared(iwh_sc_t * sc)1373 iwh_free_shared(iwh_sc_t *sc)
1374 {
1375 iwh_free_dma_mem(&sc->sc_dma_sh);
1376 }
1377
1378 /*
1379 * Allocate a keep warm page.
1380 */
1381 static int
iwh_alloc_kw(iwh_sc_t * sc)1382 iwh_alloc_kw(iwh_sc_t *sc)
1383 {
1384 #ifdef DEBUG
1385 iwh_dma_t *dma_p;
1386 #endif
1387 int err = DDI_FAILURE;
1388
1389 /*
1390 * must be aligned on a 4K-page boundary
1391 */
1392 err = iwh_alloc_dma_mem(sc, IWH_KW_SIZE,
1393 &kw_dma_attr, &iwh_dma_descattr,
1394 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1395 &sc->sc_dma_kw);
1396 if (err != DDI_SUCCESS) {
1397 goto fail;
1398 }
1399
1400 #ifdef DEBUG
1401 dma_p = &sc->sc_dma_kw;
1402 #endif
1403 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_kw(): "
1404 "kw[ncookies:%d addr:%lx size:%lx]\n",
1405 dma_p->ncookies, dma_p->cookie.dmac_address,
1406 dma_p->cookie.dmac_size));
1407
1408 return (err);
1409
1410 fail:
1411 iwh_free_kw(sc);
1412 return (err);
1413 }
1414
1415 static void
iwh_free_kw(iwh_sc_t * sc)1416 iwh_free_kw(iwh_sc_t *sc)
1417 {
1418 iwh_free_dma_mem(&sc->sc_dma_kw);
1419 }
1420
1421 /*
1422 * initialize RX ring buffers
1423 */
1424 static int
iwh_alloc_rx_ring(iwh_sc_t * sc)1425 iwh_alloc_rx_ring(iwh_sc_t *sc)
1426 {
1427 iwh_rx_ring_t *ring;
1428 iwh_rx_data_t *data;
1429 #ifdef DEBUG
1430 iwh_dma_t *dma_p;
1431 #endif
1432 int i, err = DDI_FAILURE;
1433
1434 ring = &sc->sc_rxq;
1435 ring->cur = 0;
1436
1437 /*
1438 * allocate RX description ring buffer
1439 */
1440 err = iwh_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1441 &ring_desc_dma_attr, &iwh_dma_descattr,
1442 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1443 &ring->dma_desc);
1444 if (err != DDI_SUCCESS) {
1445 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1446 "dma alloc rx ring desc "
1447 "failed\n"));
1448 goto fail;
1449 }
1450
1451 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1452 #ifdef DEBUG
1453 dma_p = &ring->dma_desc;
1454 #endif
1455 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1456 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1457 dma_p->ncookies, dma_p->cookie.dmac_address,
1458 dma_p->cookie.dmac_size));
1459
1460 /*
1461 * Allocate Rx frame buffers.
1462 */
1463 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1464 data = &ring->data[i];
1465 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1466 &rx_buffer_dma_attr, &iwh_dma_accattr,
1467 DDI_DMA_READ | DDI_DMA_STREAMING,
1468 &data->dma_data);
1469 if (err != DDI_SUCCESS) {
1470 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1471 "dma alloc rx ring "
1472 "buf[%d] failed\n", i));
1473 goto fail;
1474 }
1475 /*
1476 * the physical address bit [8-36] are used,
1477 * instead of bit [0-31] in 3945.
1478 */
1479 ring->desc[i] = (uint32_t)
1480 (data->dma_data.cookie.dmac_address >> 8);
1481 }
1482
1483 #ifdef DEBUG
1484 dma_p = &ring->data[0].dma_data;
1485 #endif
1486 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1487 "rx buffer[0][ncookies:%d addr:%lx "
1488 "size:%lx]\n",
1489 dma_p->ncookies, dma_p->cookie.dmac_address,
1490 dma_p->cookie.dmac_size));
1491
1492 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1493
1494 return (err);
1495
1496 fail:
1497 iwh_free_rx_ring(sc);
1498 return (err);
1499 }
1500
1501 /*
1502 * disable RX ring
1503 */
1504 static void
iwh_reset_rx_ring(iwh_sc_t * sc)1505 iwh_reset_rx_ring(iwh_sc_t *sc)
1506 {
1507 int n;
1508
1509 iwh_mac_access_enter(sc);
1510 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1511 for (n = 0; n < 2000; n++) {
1512 if (IWH_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1513 break;
1514 }
1515 DELAY(1000);
1516 }
1517 #ifdef DEBUG
1518 if (2000 == n) {
1519 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_rx_ring(): "
1520 "timeout resetting Rx ring\n"));
1521 }
1522 #endif
1523 iwh_mac_access_exit(sc);
1524
1525 sc->sc_rxq.cur = 0;
1526 }
1527
1528 static void
iwh_free_rx_ring(iwh_sc_t * sc)1529 iwh_free_rx_ring(iwh_sc_t *sc)
1530 {
1531 int i;
1532
1533 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1534 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1535 IWH_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1536 DDI_DMA_SYNC_FORCPU);
1537 }
1538
1539 iwh_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1540 }
1541
1542 if (sc->sc_rxq.dma_desc.dma_hdl) {
1543 IWH_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1544 }
1545
1546 iwh_free_dma_mem(&sc->sc_rxq.dma_desc);
1547 }
1548
1549 /*
1550 * initialize TX ring buffers
1551 */
1552 static int
iwh_alloc_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring,int slots,int qid)1553 iwh_alloc_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring,
1554 int slots, int qid)
1555 {
1556 iwh_tx_data_t *data;
1557 iwh_tx_desc_t *desc_h;
1558 uint32_t paddr_desc_h;
1559 iwh_cmd_t *cmd_h;
1560 uint32_t paddr_cmd_h;
1561 #ifdef DEBUG
1562 iwh_dma_t *dma_p;
1563 #endif
1564 int i, err = DDI_FAILURE;
1565
1566 ring->qid = qid;
1567 ring->count = TFD_QUEUE_SIZE_MAX;
1568 ring->window = slots;
1569 ring->queued = 0;
1570 ring->cur = 0;
1571 ring->desc_cur = 0;
1572
1573 /*
1574 * allocate buffer for TX descriptor ring
1575 */
1576 err = iwh_alloc_dma_mem(sc,
1577 TFD_QUEUE_SIZE_MAX * sizeof (iwh_tx_desc_t),
1578 &ring_desc_dma_attr, &iwh_dma_descattr,
1579 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1580 &ring->dma_desc);
1581 if (err != DDI_SUCCESS) {
1582 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1583 "dma alloc tx ring desc[%d] "
1584 "failed\n", qid));
1585 goto fail;
1586 }
1587
1588 #ifdef DEBUG
1589 dma_p = &ring->dma_desc;
1590 #endif
1591 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1592 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1593 dma_p->ncookies, dma_p->cookie.dmac_address,
1594 dma_p->cookie.dmac_size));
1595
1596 desc_h = (iwh_tx_desc_t *)ring->dma_desc.mem_va;
1597 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1598
1599 /*
1600 * allocate buffer for ucode command
1601 */
1602 err = iwh_alloc_dma_mem(sc,
1603 TFD_QUEUE_SIZE_MAX * sizeof (iwh_cmd_t),
1604 &cmd_dma_attr, &iwh_dma_accattr,
1605 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1606 &ring->dma_cmd);
1607 if (err != DDI_SUCCESS) {
1608 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1609 "dma alloc tx ring cmd[%d]"
1610 " failed\n", qid));
1611 goto fail;
1612 }
1613
1614 #ifdef DEBUG
1615 dma_p = &ring->dma_cmd;
1616 #endif
1617 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1618 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1619 dma_p->ncookies, dma_p->cookie.dmac_address,
1620 dma_p->cookie.dmac_size));
1621
1622 cmd_h = (iwh_cmd_t *)ring->dma_cmd.mem_va;
1623 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1624
1625 /*
1626 * Allocate Tx frame buffers.
1627 */
1628 ring->data = kmem_zalloc(sizeof (iwh_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1629 KM_NOSLEEP);
1630 if (NULL == ring->data) {
1631 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1632 "could not allocate "
1633 "tx data slots\n"));
1634 goto fail;
1635 }
1636
1637 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1638 data = &ring->data[i];
1639 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1640 &tx_buffer_dma_attr, &iwh_dma_accattr,
1641 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1642 &data->dma_data);
1643 if (err != DDI_SUCCESS) {
1644 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1645 "dma alloc tx "
1646 "ring buf[%d] failed\n", i));
1647 goto fail;
1648 }
1649
1650 data->desc = desc_h + i;
1651 data->paddr_desc = paddr_desc_h +
1652 _PTRDIFF(data->desc, desc_h);
1653 data->cmd = cmd_h + i;
1654 data->paddr_cmd = paddr_cmd_h +
1655 _PTRDIFF(data->cmd, cmd_h);
1656 }
1657 #ifdef DEBUG
1658 dma_p = &ring->data[0].dma_data;
1659 #endif
1660 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1661 "tx buffer[0][ncookies:%d addr:%lx "
1662 "size:%lx]\n",
1663 dma_p->ncookies, dma_p->cookie.dmac_address,
1664 dma_p->cookie.dmac_size));
1665
1666 return (err);
1667
1668 fail:
1669 iwh_free_tx_ring(ring);
1670
1671 return (err);
1672 }
1673
1674 /*
1675 * disable TX ring
1676 */
1677 static void
iwh_reset_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring)1678 iwh_reset_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring)
1679 {
1680 iwh_tx_data_t *data;
1681 int i, n;
1682
1683 iwh_mac_access_enter(sc);
1684
1685 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1686 for (n = 0; n < 200; n++) {
1687 if (IWH_READ(sc, IWH_FH_TSSR_TX_STATUS_REG) &
1688 IWH_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1689 break;
1690 }
1691 DELAY(10);
1692 }
1693
1694 #ifdef DEBUG
1695 if (200 == n) {
1696 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_tx_ring(): "
1697 "timeout reset tx ring %d\n",
1698 ring->qid));
1699 }
1700 #endif
1701
1702 iwh_mac_access_exit(sc);
1703
1704 /*
1705 * by pass, if it's quiesce
1706 */
1707 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
1708 for (i = 0; i < ring->count; i++) {
1709 data = &ring->data[i];
1710 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1711 }
1712 }
1713
1714 ring->queued = 0;
1715 ring->cur = 0;
1716 ring->desc_cur = 0;
1717 }
1718
1719 static void
iwh_free_tx_ring(iwh_tx_ring_t * ring)1720 iwh_free_tx_ring(iwh_tx_ring_t *ring)
1721 {
1722 int i;
1723
1724 if (ring->dma_desc.dma_hdl != NULL) {
1725 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1726 }
1727 iwh_free_dma_mem(&ring->dma_desc);
1728
1729 if (ring->dma_cmd.dma_hdl != NULL) {
1730 IWH_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1731 }
1732 iwh_free_dma_mem(&ring->dma_cmd);
1733
1734 if (ring->data != NULL) {
1735 for (i = 0; i < ring->count; i++) {
1736 if (ring->data[i].dma_data.dma_hdl) {
1737 IWH_DMA_SYNC(ring->data[i].dma_data,
1738 DDI_DMA_SYNC_FORDEV);
1739 }
1740 iwh_free_dma_mem(&ring->data[i].dma_data);
1741 }
1742 kmem_free(ring->data, ring->count * sizeof (iwh_tx_data_t));
1743 }
1744 }
1745
1746 /*
1747 * initialize TX and RX ring
1748 */
1749 static int
iwh_ring_init(iwh_sc_t * sc)1750 iwh_ring_init(iwh_sc_t *sc)
1751 {
1752 int i, err = DDI_FAILURE;
1753
1754 for (i = 0; i < IWH_NUM_QUEUES; i++) {
1755 if (IWH_CMD_QUEUE_NUM == i) {
1756 continue;
1757 }
1758
1759 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1760 i);
1761 if (err != DDI_SUCCESS) {
1762 goto fail;
1763 }
1764 }
1765
1766 /*
1767 * initialize command queue
1768 */
1769 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[IWH_CMD_QUEUE_NUM],
1770 TFD_CMD_SLOTS, IWH_CMD_QUEUE_NUM);
1771 if (err != DDI_SUCCESS) {
1772 goto fail;
1773 }
1774
1775 err = iwh_alloc_rx_ring(sc);
1776 if (err != DDI_SUCCESS) {
1777 goto fail;
1778 }
1779
1780 fail:
1781 return (err);
1782 }
1783
1784 static void
iwh_ring_free(iwh_sc_t * sc)1785 iwh_ring_free(iwh_sc_t *sc)
1786 {
1787 int i = IWH_NUM_QUEUES;
1788
1789 iwh_free_rx_ring(sc);
1790 while (--i >= 0) {
1791 iwh_free_tx_ring(&sc->sc_txq[i]);
1792 }
1793 }
1794
1795 /* ARGSUSED */
1796 static ieee80211_node_t *
iwh_node_alloc(ieee80211com_t * ic)1797 iwh_node_alloc(ieee80211com_t *ic)
1798 {
1799 iwh_amrr_t *amrr;
1800
1801 amrr = kmem_zalloc(sizeof (iwh_amrr_t), KM_SLEEP);
1802 if (NULL == amrr) {
1803 cmn_err(CE_WARN, "iwh_node_alloc(): "
1804 "failed to allocate memory for amrr structure\n");
1805 return (NULL);
1806 }
1807
1808 iwh_amrr_init(amrr);
1809
1810 return (&amrr->in);
1811 }
1812
1813 static void
iwh_node_free(ieee80211_node_t * in)1814 iwh_node_free(ieee80211_node_t *in)
1815 {
1816 ieee80211com_t *ic;
1817
1818 if ((NULL == in) ||
1819 (NULL == in->in_ic)) {
1820 cmn_err(CE_WARN, "iwh_node_free() "
1821 "Got a NULL point from Net80211 module\n");
1822 return;
1823 }
1824 ic = in->in_ic;
1825
1826 if (ic->ic_node_cleanup != NULL) {
1827 ic->ic_node_cleanup(in);
1828 }
1829
1830 if (in->in_wpa_ie != NULL) {
1831 ieee80211_free(in->in_wpa_ie);
1832 }
1833
1834 if (in->in_wme_ie != NULL) {
1835 ieee80211_free(in->in_wme_ie);
1836 }
1837
1838 if (in->in_htcap_ie != NULL) {
1839 ieee80211_free(in->in_htcap_ie);
1840 }
1841
1842 kmem_free(in, sizeof (iwh_amrr_t));
1843 }
1844
1845 /*
1846 * change station's state. this function will be invoked by 80211 module
1847 * when need to change staton's state.
1848 */
1849 static int
iwh_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1850 iwh_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1851 {
1852 iwh_sc_t *sc;
1853 ieee80211_node_t *in;
1854 enum ieee80211_state ostate;
1855 iwh_add_sta_t node;
1856 iwh_amrr_t *amrr;
1857 uint8_t r;
1858 int i, err = IWH_FAIL;
1859
1860 if (NULL == ic) {
1861 return (err);
1862 }
1863 sc = (iwh_sc_t *)ic;
1864 in = ic->ic_bss;
1865 ostate = ic->ic_state;
1866
1867 mutex_enter(&sc->sc_glock);
1868
1869 switch (nstate) {
1870 case IEEE80211_S_SCAN:
1871 switch (ostate) {
1872 case IEEE80211_S_INIT:
1873 atomic_or_32(&sc->sc_flags, IWH_F_SCANNING);
1874 iwh_set_led(sc, 2, 10, 2);
1875
1876 /*
1877 * clear association to receive beacons from
1878 * all BSS'es
1879 */
1880 sc->sc_config.assoc_id = 0;
1881 sc->sc_config.filter_flags &=
1882 ~LE_32(RXON_FILTER_ASSOC_MSK);
1883
1884 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1885 "config chan %d "
1886 "flags %x filter_flags %x\n",
1887 LE_16(sc->sc_config.chan),
1888 LE_32(sc->sc_config.flags),
1889 LE_32(sc->sc_config.filter_flags)));
1890
1891 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
1892 sizeof (iwh_rxon_cmd_t), 1);
1893 if (err != IWH_SUCCESS) {
1894 cmn_err(CE_WARN, "iwh_newstate(): "
1895 "could not clear association\n");
1896 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1897 mutex_exit(&sc->sc_glock);
1898 return (err);
1899 }
1900
1901 /*
1902 * add broadcast node to send probe request
1903 */
1904 (void) memset(&node, 0, sizeof (node));
1905 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1906 node.sta.sta_id = IWH_BROADCAST_ID;
1907 err = iwh_cmd(sc, REPLY_ADD_STA, &node,
1908 sizeof (node), 1);
1909 if (err != IWH_SUCCESS) {
1910 cmn_err(CE_WARN, "iwh_newstate(): "
1911 "could not add broadcast node\n");
1912 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1913 mutex_exit(&sc->sc_glock);
1914 return (err);
1915 }
1916 break;
1917 case IEEE80211_S_SCAN:
1918 mutex_exit(&sc->sc_glock);
1919 /* step to next channel before actual FW scan */
1920 err = sc->sc_newstate(ic, nstate, arg);
1921 mutex_enter(&sc->sc_glock);
1922 if ((err != 0) || ((err = iwh_scan(sc)) != 0)) {
1923 cmn_err(CE_WARN, "iwh_newstate(): "
1924 "could not initiate scan\n");
1925 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1926 ieee80211_cancel_scan(ic);
1927 }
1928 mutex_exit(&sc->sc_glock);
1929 return (err);
1930 default:
1931 break;
1932 }
1933 sc->sc_clk = 0;
1934 break;
1935
1936 case IEEE80211_S_AUTH:
1937 if (ostate == IEEE80211_S_SCAN) {
1938 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1939 }
1940
1941 /*
1942 * reset state to handle reassociations correctly
1943 */
1944 sc->sc_config.assoc_id = 0;
1945 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1946
1947 /*
1948 * before sending authentication and association request frame,
1949 * we need do something in the hardware, such as setting the
1950 * channel same to the target AP...
1951 */
1952 if ((err = iwh_hw_set_before_auth(sc)) != 0) {
1953 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1954 "could not send authentication request\n"));
1955 mutex_exit(&sc->sc_glock);
1956 return (err);
1957 }
1958 break;
1959
1960 case IEEE80211_S_RUN:
1961 if (ostate == IEEE80211_S_SCAN) {
1962 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1963 }
1964
1965 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1966 /*
1967 * let LED blink when monitoring
1968 */
1969 iwh_set_led(sc, 2, 10, 10);
1970 break;
1971 }
1972
1973 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1974 "associated.\n"));
1975
1976 err = iwh_run_state_config(sc);
1977 if (err != IWH_SUCCESS) {
1978 cmn_err(CE_WARN, "iwh_newstate(): "
1979 "failed to set up association\n");
1980 mutex_exit(&sc->sc_glock);
1981 return (err);
1982 }
1983
1984 /*
1985 * start automatic rate control
1986 */
1987 if ((in->in_flags & IEEE80211_NODE_HT) &&
1988 (sc->sc_ht_conf.ht_support) &&
1989 (in->in_htrates.rs_nrates > 0) &&
1990 (in->in_htrates.rs_nrates <= IEEE80211_HTRATE_MAXSIZE)) {
1991 amrr = (iwh_amrr_t *)in;
1992
1993 for (i = in->in_htrates.rs_nrates - 1; i > 0; i--) {
1994
1995 r = in->in_htrates.rs_rates[i] &
1996 IEEE80211_RATE_VAL;
1997 if ((r != 0) && (r <= 0xd) &&
1998 (sc->sc_ht_conf.tx_support_mcs[r/8] &
1999 (1 << (r%8)))) {
2000 amrr->ht_mcs_idx = r;
2001 atomic_or_32(&sc->sc_flags,
2002 IWH_F_RATE_AUTO_CTL);
2003 break;
2004 }
2005 }
2006 } else {
2007 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
2008 atomic_or_32(&sc->sc_flags,
2009 IWH_F_RATE_AUTO_CTL);
2010
2011 /*
2012 * set rate to some reasonable initial value
2013 */
2014 i = in->in_rates.ir_nrates - 1;
2015 while (i > 0 && IEEE80211_RATE(i) > 72) {
2016 i--;
2017 }
2018 in->in_txrate = i;
2019
2020 } else {
2021 atomic_and_32(&sc->sc_flags,
2022 ~IWH_F_RATE_AUTO_CTL);
2023 }
2024 }
2025
2026 /*
2027 * set LED on after associated
2028 */
2029 iwh_set_led(sc, 2, 0, 1);
2030 break;
2031
2032 case IEEE80211_S_INIT:
2033 if (ostate == IEEE80211_S_SCAN) {
2034 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2035 }
2036 /*
2037 * set LED off after init
2038 */
2039 iwh_set_led(sc, 2, 1, 0);
2040 break;
2041
2042 case IEEE80211_S_ASSOC:
2043 if (ostate == IEEE80211_S_SCAN) {
2044 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2045 }
2046 break;
2047 }
2048
2049 mutex_exit(&sc->sc_glock);
2050
2051 return (sc->sc_newstate(ic, nstate, arg));
2052 }
2053
2054 /*
2055 * exclusive access to mac begin.
2056 */
2057 static void
iwh_mac_access_enter(iwh_sc_t * sc)2058 iwh_mac_access_enter(iwh_sc_t *sc)
2059 {
2060 uint32_t tmp;
2061 int n;
2062
2063 tmp = IWH_READ(sc, CSR_GP_CNTRL);
2064 IWH_WRITE(sc, CSR_GP_CNTRL,
2065 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2066
2067 /*
2068 * wait until we succeed
2069 */
2070 for (n = 0; n < 1000; n++) {
2071 if ((IWH_READ(sc, CSR_GP_CNTRL) &
2072 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2073 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
2074 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
2075 break;
2076 }
2077 DELAY(10);
2078 }
2079
2080 #ifdef DEBUG
2081 if (1000 == n) {
2082 IWH_DBG((IWH_DEBUG_PIO, "iwh_mac_access_enter(): "
2083 "could not lock memory\n"));
2084 }
2085 #endif
2086 }
2087
2088 /*
2089 * exclusive access to mac end.
2090 */
2091 static void
iwh_mac_access_exit(iwh_sc_t * sc)2092 iwh_mac_access_exit(iwh_sc_t *sc)
2093 {
2094 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2095 IWH_WRITE(sc, CSR_GP_CNTRL,
2096 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2097 }
2098
2099 /*
2100 * this function defined here for future use.
2101 * static uint32_t
2102 * iwh_mem_read(iwh_sc_t *sc, uint32_t addr)
2103 * {
2104 * IWH_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2105 * return (IWH_READ(sc, HBUS_TARG_MEM_RDAT));
2106 * }
2107 */
2108
2109 /*
2110 * write mac memory
2111 */
2112 static void
iwh_mem_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2113 iwh_mem_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2114 {
2115 IWH_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2116 IWH_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2117 }
2118
2119 /*
2120 * read mac register
2121 */
2122 static uint32_t
iwh_reg_read(iwh_sc_t * sc,uint32_t addr)2123 iwh_reg_read(iwh_sc_t *sc, uint32_t addr)
2124 {
2125 IWH_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2126 return (IWH_READ(sc, HBUS_TARG_PRPH_RDAT));
2127 }
2128
2129 /*
2130 * write mac register
2131 */
2132 static void
iwh_reg_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2133 iwh_reg_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2134 {
2135 IWH_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2136 IWH_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2137 }
2138
2139
2140 /*
2141 * steps of loading ucode:
2142 * load init ucode=>init alive=>calibrate=>
2143 * receive calibration result=>reinitialize NIC=>
2144 * load runtime ucode=>runtime alive=>
2145 * send calibration result=>running.
2146 */
2147 static int
iwh_load_init_firmware(iwh_sc_t * sc)2148 iwh_load_init_firmware(iwh_sc_t *sc)
2149 {
2150 int err = IWH_FAIL;
2151 clock_t clk;
2152
2153 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2154
2155 /*
2156 * load init_text section of uCode to hardware
2157 */
2158 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2159 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2160 if (err != IWH_SUCCESS) {
2161 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2162 "failed to write init uCode.\n");
2163 return (err);
2164 }
2165
2166 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2167
2168 /*
2169 * wait loading init_text until completed or timeout
2170 */
2171 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2172 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2173 break;
2174 }
2175 }
2176
2177 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2178 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2179 "timeout waiting for init uCode load.\n");
2180 return (IWH_FAIL);
2181 }
2182
2183 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2184
2185 /*
2186 * load init_data section of uCode to hardware
2187 */
2188 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2189 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2190 if (err != IWH_SUCCESS) {
2191 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2192 "failed to write init_data uCode.\n");
2193 return (err);
2194 }
2195
2196 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2197
2198 /*
2199 * wait loading init_data until completed or timeout
2200 */
2201 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2202 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2203 break;
2204 }
2205 }
2206
2207 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2208 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2209 "timeout waiting for init_data uCode load.\n");
2210 return (IWH_FAIL);
2211 }
2212
2213 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2214
2215 return (err);
2216 }
2217
2218 static int
iwh_load_run_firmware(iwh_sc_t * sc)2219 iwh_load_run_firmware(iwh_sc_t *sc)
2220 {
2221 int err = IWH_FAIL;
2222 clock_t clk;
2223
2224 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2225
2226 /*
2227 * load init_text section of uCode to hardware
2228 */
2229 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2230 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2231 if (err != IWH_SUCCESS) {
2232 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2233 "failed to write run uCode.\n");
2234 return (err);
2235 }
2236
2237 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2238
2239 /*
2240 * wait loading run_text until completed or timeout
2241 */
2242 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2243 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2244 break;
2245 }
2246 }
2247
2248 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2249 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2250 "timeout waiting for run uCode load.\n");
2251 return (IWH_FAIL);
2252 }
2253
2254 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2255
2256 /*
2257 * load run_data section of uCode to hardware
2258 */
2259 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2260 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2261 if (err != IWH_SUCCESS) {
2262 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2263 "failed to write run_data uCode.\n");
2264 return (err);
2265 }
2266
2267 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2268
2269 /*
2270 * wait loading run_data until completed or timeout
2271 */
2272 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2273 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2274 break;
2275 }
2276 }
2277
2278 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2279 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2280 "timeout waiting for run_data uCode load.\n");
2281 return (IWH_FAIL);
2282 }
2283
2284 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2285
2286 return (err);
2287 }
2288
2289 /*
2290 * this function will be invoked to receive phy information
2291 * when a frame is received.
2292 */
2293 static void
iwh_rx_phy_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2294 iwh_rx_phy_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2295 {
2296
2297 sc->sc_rx_phy_res.flag = 1;
2298
2299 bcopy((uint8_t *)(desc + 1), sc->sc_rx_phy_res.buf,
2300 sizeof (iwh_rx_phy_res_t));
2301 }
2302
2303 /*
2304 * this function will be invoked to receive body of frame when
2305 * a frame is received.
2306 */
2307 static void
iwh_rx_mpdu_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2308 iwh_rx_mpdu_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2309 {
2310 ieee80211com_t *ic = &sc->sc_ic;
2311 #ifdef DEBUG
2312 iwh_rx_ring_t *ring = &sc->sc_rxq;
2313 #endif
2314 struct ieee80211_frame *wh;
2315 struct iwh_rx_non_cfg_phy *phyinfo;
2316 struct iwh_rx_mpdu_body_size *mpdu_size;
2317 mblk_t *mp;
2318 int16_t t;
2319 uint16_t len, rssi, agc;
2320 uint32_t temp, crc, *tail;
2321 uint32_t arssi, brssi, crssi, mrssi;
2322 iwh_rx_phy_res_t *stat;
2323 ieee80211_node_t *in;
2324
2325 /*
2326 * assuming not 11n here. cope with 11n in phase-II
2327 */
2328 mpdu_size = (struct iwh_rx_mpdu_body_size *)(desc + 1);
2329 stat = (iwh_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2330 if (stat->cfg_phy_cnt > 20) {
2331 return;
2332 }
2333
2334 phyinfo = (struct iwh_rx_non_cfg_phy *)stat->non_cfg_phy;
2335 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_AGC_IDX]);
2336 agc = (temp & IWH_OFDM_AGC_MSK) >> IWH_OFDM_AGC_BIT_POS;
2337
2338 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_AB_IDX]);
2339 arssi = (temp & IWH_OFDM_RSSI_A_MSK) >> IWH_OFDM_RSSI_A_BIT_POS;
2340 brssi = (temp & IWH_OFDM_RSSI_B_MSK) >> IWH_OFDM_RSSI_B_BIT_POS;
2341
2342 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_C_IDX]);
2343 crssi = (temp & IWH_OFDM_RSSI_C_MSK) >> IWH_OFDM_RSSI_C_BIT_POS;
2344
2345 mrssi = MAX(arssi, brssi);
2346 mrssi = MAX(mrssi, crssi);
2347
2348 t = mrssi - agc - IWH_RSSI_OFFSET;
2349 /*
2350 * convert dBm to percentage
2351 */
2352 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2353 / (75 * 75);
2354 if (rssi > 100) {
2355 rssi = 100;
2356 }
2357 if (rssi < 1) {
2358 rssi = 1;
2359 }
2360
2361 /*
2362 * size of frame, not include FCS
2363 */
2364 len = LE_16(mpdu_size->byte_count);
2365 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2366 sizeof (struct iwh_rx_mpdu_body_size) + len);
2367 bcopy(tail, &crc, 4);
2368
2369 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2370 "rx intr: idx=%d phy_len=%x len=%d "
2371 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2372 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2373 len, stat->rate.r.s.rate, stat->channel,
2374 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2375 stat->cfg_phy_cnt, LE_32(crc)));
2376
2377 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2378 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2379 "rx frame oversize\n"));
2380 return;
2381 }
2382
2383 /*
2384 * discard Rx frames with bad CRC
2385 */
2386 if ((LE_32(crc) &
2387 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2388 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2389 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2390 "rx crc error tail: %x\n",
2391 LE_32(crc)));
2392 sc->sc_rx_err++;
2393 return;
2394 }
2395
2396 wh = (struct ieee80211_frame *)
2397 ((uint8_t *)(desc + 1)+ sizeof (struct iwh_rx_mpdu_body_size));
2398
2399 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2400 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2401 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2402 "rx : association id = %x\n",
2403 sc->sc_assoc_id));
2404 }
2405
2406 #ifdef DEBUG
2407 if (iwh_dbg_flags & IWH_DEBUG_RX) {
2408 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2409 }
2410 #endif
2411
2412 in = ieee80211_find_rxnode(ic, wh);
2413 mp = allocb(len, BPRI_MED);
2414 if (mp) {
2415 bcopy(wh, mp->b_wptr, len);
2416 mp->b_wptr += len;
2417
2418 /*
2419 * send the frame to the 802.11 layer
2420 */
2421 (void) ieee80211_input(ic, mp, in, rssi, 0);
2422 } else {
2423 sc->sc_rx_nobuf++;
2424 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2425 "alloc rx buf failed\n"));
2426 }
2427
2428 /*
2429 * release node reference
2430 */
2431 ieee80211_free_node(in);
2432 }
2433
2434 /*
2435 * process correlative affairs after a frame is sent.
2436 */
2437 static void
iwh_tx_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2438 iwh_tx_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2439 {
2440 ieee80211com_t *ic = &sc->sc_ic;
2441 iwh_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2442 iwh_tx_stat_t *stat = (iwh_tx_stat_t *)(desc + 1);
2443 iwh_amrr_t *amrr;
2444
2445 if (NULL == ic->ic_bss) {
2446 return;
2447 }
2448
2449 amrr = (iwh_amrr_t *)ic->ic_bss;
2450
2451 amrr->txcnt++;
2452 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_tx_intr(): "
2453 "tx: %d cnt\n", amrr->txcnt));
2454
2455 if (stat->ntries > 0) {
2456 amrr->retrycnt++;
2457 sc->sc_tx_retries++;
2458 IWH_DBG((IWH_DEBUG_TX, "iwh_tx_intr(): "
2459 "tx: %d retries\n",
2460 sc->sc_tx_retries));
2461 }
2462
2463 mutex_enter(&sc->sc_mt_lock);
2464 sc->sc_tx_timer = 0;
2465 mutex_exit(&sc->sc_mt_lock);
2466
2467 mutex_enter(&sc->sc_tx_lock);
2468
2469 ring->queued--;
2470 if (ring->queued < 0) {
2471 ring->queued = 0;
2472 }
2473
2474 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2475 sc->sc_need_reschedule = 0;
2476 mutex_exit(&sc->sc_tx_lock);
2477 mac_tx_update(ic->ic_mach);
2478 mutex_enter(&sc->sc_tx_lock);
2479 }
2480
2481 mutex_exit(&sc->sc_tx_lock);
2482 }
2483
2484 /*
2485 * inform a given command has been executed
2486 */
2487 static void
iwh_cmd_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2488 iwh_cmd_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2489 {
2490 if ((desc->hdr.qid & 7) != 4) {
2491 return;
2492 }
2493
2494 if (sc->sc_cmd_accum > 0) {
2495 sc->sc_cmd_accum--;
2496 return;
2497 }
2498
2499 mutex_enter(&sc->sc_glock);
2500
2501 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2502
2503 cv_signal(&sc->sc_cmd_cv);
2504
2505 mutex_exit(&sc->sc_glock);
2506
2507 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd_intr(): "
2508 "qid=%x idx=%d flags=%x type=0x%x\n",
2509 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2510 desc->hdr.type));
2511 }
2512
2513 /*
2514 * this function will be invoked when alive notification occur.
2515 */
2516 static void
iwh_ucode_alive(iwh_sc_t * sc,iwh_rx_desc_t * desc)2517 iwh_ucode_alive(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2518 {
2519 uint32_t rv;
2520 struct iwh_calib_cfg_cmd cmd;
2521 struct iwh_alive_resp *ar =
2522 (struct iwh_alive_resp *)(desc + 1);
2523 struct iwh_calib_results *res_p = &sc->sc_calib_results;
2524
2525 /*
2526 * the microcontroller is ready
2527 */
2528 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2529 "microcode alive notification minor: %x major: %x type: "
2530 "%x subtype: %x\n",
2531 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2532
2533 #ifdef DEBUG
2534 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2535 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2536 "microcontroller initialization failed\n"));
2537 }
2538 #endif
2539
2540 /*
2541 * determine if init alive or runtime alive.
2542 */
2543 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2544 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2545 "initialization alive received.\n"));
2546
2547 bcopy(ar, &sc->sc_card_alive_init,
2548 sizeof (struct iwh_init_alive_resp));
2549
2550 /*
2551 * necessary configuration to NIC
2552 */
2553 mutex_enter(&sc->sc_glock);
2554
2555 rv = iwh_alive_common(sc);
2556 if (rv != IWH_SUCCESS) {
2557 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2558 "common alive process failed in init alive.\n");
2559 mutex_exit(&sc->sc_glock);
2560 return;
2561 }
2562
2563 (void) memset(&cmd, 0, sizeof (cmd));
2564
2565 cmd.ucd_calib_cfg.once.is_enable = IWH_CALIB_INIT_CFG_ALL;
2566 cmd.ucd_calib_cfg.once.start = IWH_CALIB_INIT_CFG_ALL;
2567 cmd.ucd_calib_cfg.once.send_res = IWH_CALIB_INIT_CFG_ALL;
2568 cmd.ucd_calib_cfg.flags = IWH_CALIB_INIT_CFG_ALL;
2569
2570 /*
2571 * require ucode execute calibration
2572 */
2573 rv = iwh_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2574 if (rv != IWH_SUCCESS) {
2575 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2576 "failed to send calibration configure command.\n");
2577 mutex_exit(&sc->sc_glock);
2578 return;
2579 }
2580
2581 mutex_exit(&sc->sc_glock);
2582
2583 } else { /* runtime alive */
2584
2585 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2586 "runtime alive received.\n"));
2587
2588 bcopy(ar, &sc->sc_card_alive_run,
2589 sizeof (struct iwh_alive_resp));
2590
2591 mutex_enter(&sc->sc_glock);
2592
2593 /*
2594 * necessary configuration to NIC
2595 */
2596 rv = iwh_alive_common(sc);
2597 if (rv != IWH_SUCCESS) {
2598 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2599 "common alive process failed in run alive.\n");
2600 mutex_exit(&sc->sc_glock);
2601 return;
2602 }
2603
2604 /*
2605 * send the result of local oscilator calibration to uCode.
2606 */
2607 if (res_p->lo_res != NULL) {
2608 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2609 res_p->lo_res, res_p->lo_res_len, 1);
2610 if (rv != IWH_SUCCESS) {
2611 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2612 "failed to send local"
2613 "oscilator calibration command.\n");
2614 mutex_exit(&sc->sc_glock);
2615 return;
2616 }
2617
2618 DELAY(1000);
2619 }
2620
2621 /*
2622 * send the result of TX IQ calibration to uCode.
2623 */
2624 if (res_p->tx_iq_res != NULL) {
2625 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2626 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2627 if (rv != IWH_SUCCESS) {
2628 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2629 "failed to send TX IQ"
2630 "calibration command.\n");
2631 mutex_exit(&sc->sc_glock);
2632 return;
2633 }
2634
2635 DELAY(1000);
2636 }
2637
2638 /*
2639 * sned the result of TX IQ perd calibration to uCode.
2640 */
2641 if (res_p->tx_iq_perd_res != NULL) {
2642 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2643 res_p->tx_iq_perd_res,
2644 res_p->tx_iq_perd_res_len, 1);
2645 if (rv != IWH_SUCCESS) {
2646 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2647 "failed to send TX IQ perd"
2648 "calibration command.\n");
2649 mutex_exit(&sc->sc_glock);
2650 return;
2651 }
2652
2653 DELAY(1000);
2654 }
2655
2656 /*
2657 * send the result of DC calibration to uCode.
2658 */
2659 if (res_p->dc_res != NULL) {
2660 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2661 res_p->dc_res,
2662 res_p->dc_res_len, 1);
2663 if (rv != IWH_SUCCESS) {
2664 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2665 "failed to send DC"
2666 "calibration command.\n");
2667 mutex_exit(&sc->sc_glock);
2668 return;
2669 }
2670
2671 DELAY(1000);
2672 }
2673
2674 /*
2675 * send the result of BASE BAND calibration to uCode.
2676 */
2677 if (res_p->base_band_res != NULL) {
2678 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2679 res_p->base_band_res,
2680 res_p->base_band_res_len, 1);
2681 if (rv != IWH_SUCCESS) {
2682 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2683 "failed to send BASE BAND"
2684 "calibration command.\n");
2685 mutex_exit(&sc->sc_glock);
2686 return;
2687 }
2688
2689 DELAY(1000);
2690 }
2691
2692 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2693 cv_signal(&sc->sc_ucode_cv);
2694
2695 mutex_exit(&sc->sc_glock);
2696 }
2697
2698 }
2699
2700 /*
2701 * deal with receiving frames, command response
2702 * and all notifications from ucode.
2703 */
2704 /* ARGSUSED */
2705 static uint_t
iwh_rx_softintr(caddr_t arg,caddr_t unused)2706 iwh_rx_softintr(caddr_t arg, caddr_t unused)
2707 {
2708 iwh_sc_t *sc;
2709 ieee80211com_t *ic;
2710 iwh_rx_desc_t *desc;
2711 iwh_rx_data_t *data;
2712 uint32_t index;
2713
2714 if (NULL == arg) {
2715 return (DDI_INTR_UNCLAIMED);
2716 }
2717 sc = (iwh_sc_t *)arg;
2718 ic = &sc->sc_ic;
2719
2720 /*
2721 * firmware has moved the index of the rx queue, driver get it,
2722 * and deal with it.
2723 */
2724 index = (sc->sc_shared->val0) & 0xfff;
2725
2726 while (sc->sc_rxq.cur != index) {
2727 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2728 desc = (iwh_rx_desc_t *)data->dma_data.mem_va;
2729
2730 IWH_DBG((IWH_DEBUG_INTR, "iwh_rx_softintr(): "
2731 "rx notification index = %d"
2732 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2733 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2734 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2735
2736 /*
2737 * a command other than a tx need to be replied
2738 */
2739 if (!(desc->hdr.qid & 0x80) &&
2740 (desc->hdr.type != REPLY_SCAN_CMD) &&
2741 (desc->hdr.type != REPLY_TX)) {
2742 iwh_cmd_intr(sc, desc);
2743 }
2744
2745 switch (desc->hdr.type) {
2746 case REPLY_RX_PHY_CMD:
2747 iwh_rx_phy_intr(sc, desc);
2748 break;
2749
2750 case REPLY_RX_MPDU_CMD:
2751 iwh_rx_mpdu_intr(sc, desc);
2752 break;
2753
2754 case REPLY_TX:
2755 iwh_tx_intr(sc, desc);
2756 break;
2757
2758 case REPLY_ALIVE:
2759 iwh_ucode_alive(sc, desc);
2760 break;
2761
2762 case CARD_STATE_NOTIFICATION:
2763 {
2764 uint32_t *status = (uint32_t *)(desc + 1);
2765
2766 IWH_DBG((IWH_DEBUG_RADIO, "iwh_rx_softintr(): "
2767 "state changed to %x\n",
2768 LE_32(*status)));
2769
2770 if (LE_32(*status) & 1) {
2771 /*
2772 * the radio button has to be pushed(OFF). It
2773 * is considered as a hw error, the
2774 * iwh_thread() tries to recover it after the
2775 * button is pushed again(ON)
2776 */
2777 cmn_err(CE_NOTE, "iwh_rx_softintr(): "
2778 "radio transmitter is off\n");
2779 sc->sc_ostate = sc->sc_ic.ic_state;
2780 ieee80211_new_state(&sc->sc_ic,
2781 IEEE80211_S_INIT, -1);
2782 atomic_or_32(&sc->sc_flags,
2783 (IWH_F_HW_ERR_RECOVER | IWH_F_RADIO_OFF));
2784 }
2785
2786 break;
2787 }
2788
2789 case SCAN_START_NOTIFICATION:
2790 {
2791 iwh_start_scan_t *scan =
2792 (iwh_start_scan_t *)(desc + 1);
2793
2794 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2795 "scanning channel %d status %x\n",
2796 scan->chan, LE_32(scan->status)));
2797
2798 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2799 break;
2800 }
2801
2802 case SCAN_COMPLETE_NOTIFICATION:
2803 {
2804 #ifdef DEBUG
2805 iwh_stop_scan_t *scan =
2806 (iwh_stop_scan_t *)(desc + 1);
2807
2808 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2809 "completed channel %d (burst of %d) status %02x\n",
2810 scan->chan, scan->nchan, scan->status));
2811 #endif
2812
2813 sc->sc_scan_pending++;
2814 break;
2815 }
2816
2817 case STATISTICS_NOTIFICATION:
2818 {
2819 /*
2820 * handle statistics notification
2821 */
2822 break;
2823 }
2824
2825 case CALIBRATION_RES_NOTIFICATION:
2826 iwh_save_calib_result(sc, desc);
2827 break;
2828
2829 case CALIBRATION_COMPLETE_NOTIFICATION:
2830 mutex_enter(&sc->sc_glock);
2831 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2832 cv_signal(&sc->sc_ucode_cv);
2833 mutex_exit(&sc->sc_glock);
2834 break;
2835
2836 case MISSED_BEACONS_NOTIFICATION:
2837 /* handle beacon miss by software mechanism */
2838 break;
2839 }
2840
2841 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2842 }
2843
2844 /*
2845 * driver dealt with what received in rx queue and tell the information
2846 * to the firmware.
2847 */
2848 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2849 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2850
2851 /*
2852 * re-enable interrupts
2853 */
2854 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2855
2856 return (DDI_INTR_CLAIMED);
2857 }
2858
2859 /*
2860 * the handle of interrupt
2861 */
2862 /* ARGSUSED */
2863 static uint_t
iwh_intr(caddr_t arg,caddr_t unused)2864 iwh_intr(caddr_t arg, caddr_t unused)
2865 {
2866 iwh_sc_t *sc;
2867 uint32_t r, rfh;
2868
2869 if (NULL == arg) {
2870 return (DDI_INTR_UNCLAIMED);
2871 }
2872 sc = (iwh_sc_t *)arg;
2873
2874 r = IWH_READ(sc, CSR_INT);
2875 if (0 == r || 0xffffffff == r) {
2876 return (DDI_INTR_UNCLAIMED);
2877 }
2878
2879 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2880 "interrupt reg %x\n", r));
2881
2882 rfh = IWH_READ(sc, CSR_FH_INT_STATUS);
2883
2884 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2885 "FH interrupt reg %x\n", rfh));
2886
2887 /*
2888 * disable interrupts
2889 */
2890 IWH_WRITE(sc, CSR_INT_MASK, 0);
2891
2892 /*
2893 * ack interrupts
2894 */
2895 IWH_WRITE(sc, CSR_INT, r);
2896 IWH_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2897
2898 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2899 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2900 "fatal firmware error\n"));
2901 iwh_stop(sc);
2902 sc->sc_ostate = sc->sc_ic.ic_state;
2903
2904 /*
2905 * notify upper layer
2906 */
2907 if (!IWH_CHK_FAST_RECOVER(sc)) {
2908 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2909 }
2910
2911 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
2912 return (DDI_INTR_CLAIMED);
2913 }
2914
2915 if (r & BIT_INT_RF_KILL) {
2916 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2917 if (tmp & (1 << 27)) {
2918 cmn_err(CE_NOTE, "RF switch: radio on\n");
2919 }
2920 }
2921
2922 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2923 (rfh & FH_INT_RX_MASK)) {
2924 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2925 return (DDI_INTR_CLAIMED);
2926 }
2927
2928 if (r & BIT_INT_FH_TX) {
2929 mutex_enter(&sc->sc_glock);
2930 atomic_or_32(&sc->sc_flags, IWH_F_PUT_SEG);
2931 cv_signal(&sc->sc_put_seg_cv);
2932 mutex_exit(&sc->sc_glock);
2933 }
2934
2935 #ifdef DEBUG
2936 if (r & BIT_INT_ALIVE) {
2937 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2938 "firmware initialized.\n"));
2939 }
2940 #endif
2941
2942 /*
2943 * re-enable interrupts
2944 */
2945 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2946
2947 return (DDI_INTR_CLAIMED);
2948 }
2949
2950 static uint8_t
iwh_rate_to_plcp(int rate)2951 iwh_rate_to_plcp(int rate)
2952 {
2953 uint8_t ret;
2954
2955 switch (rate) {
2956 /*
2957 * CCK rates
2958 */
2959 case 2:
2960 ret = 0xa;
2961 break;
2962
2963 case 4:
2964 ret = 0x14;
2965 break;
2966
2967 case 11:
2968 ret = 0x37;
2969 break;
2970
2971 case 22:
2972 ret = 0x6e;
2973 break;
2974
2975 /*
2976 * OFDM rates
2977 */
2978 case 12:
2979 ret = 0xd;
2980 break;
2981
2982 case 18:
2983 ret = 0xf;
2984 break;
2985
2986 case 24:
2987 ret = 0x5;
2988 break;
2989
2990 case 36:
2991 ret = 0x7;
2992 break;
2993
2994 case 48:
2995 ret = 0x9;
2996 break;
2997
2998 case 72:
2999 ret = 0xb;
3000 break;
3001
3002 case 96:
3003 ret = 0x1;
3004 break;
3005
3006 case 108:
3007 ret = 0x3;
3008 break;
3009
3010 default:
3011 ret = 0;
3012 break;
3013 }
3014
3015 return (ret);
3016 }
3017
3018 /*
3019 * invoked by GLD send frames
3020 */
3021 static mblk_t *
iwh_m_tx(void * arg,mblk_t * mp)3022 iwh_m_tx(void *arg, mblk_t *mp)
3023 {
3024 iwh_sc_t *sc;
3025 ieee80211com_t *ic;
3026 mblk_t *next;
3027
3028 if (NULL == arg) {
3029 return (NULL);
3030 }
3031 sc = (iwh_sc_t *)arg;
3032 ic = &sc->sc_ic;
3033
3034 if (sc->sc_flags & IWH_F_SUSPEND) {
3035 freemsgchain(mp);
3036 return (NULL);
3037 }
3038
3039 if (ic->ic_state != IEEE80211_S_RUN) {
3040 freemsgchain(mp);
3041 return (NULL);
3042 }
3043
3044 if ((sc->sc_flags & IWH_F_HW_ERR_RECOVER) &&
3045 IWH_CHK_FAST_RECOVER(sc)) {
3046 IWH_DBG((IWH_DEBUG_FW, "iwh_m_tx(): "
3047 "hold queue\n"));
3048 return (mp);
3049 }
3050
3051 while (mp != NULL) {
3052 next = mp->b_next;
3053 mp->b_next = NULL;
3054 if (iwh_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
3055 mp->b_next = next;
3056 break;
3057 }
3058 mp = next;
3059 }
3060
3061 return (mp);
3062 }
3063
3064 /*
3065 * send frames
3066 */
3067 static int
iwh_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)3068 iwh_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3069 {
3070 iwh_sc_t *sc;
3071 iwh_tx_ring_t *ring;
3072 iwh_tx_desc_t *desc;
3073 iwh_tx_data_t *data;
3074 iwh_tx_data_t *desc_data;
3075 iwh_cmd_t *cmd;
3076 iwh_tx_cmd_t *tx;
3077 ieee80211_node_t *in;
3078 struct ieee80211_frame *wh, *mp_wh;
3079 struct ieee80211_key *k = NULL;
3080 mblk_t *m, *m0;
3081 int hdrlen, len, len0, mblen, off, err = IWH_SUCCESS;
3082 uint16_t masks = 0;
3083 uint32_t rate, s_id = 0;
3084 int txq_id = NON_QOS_TXQ;
3085 struct ieee80211_qosframe *qwh = NULL;
3086 int tid = WME_TID_INVALID;
3087
3088 if (NULL == ic) {
3089 return (IWH_FAIL);
3090 }
3091 sc = (iwh_sc_t *)ic;
3092
3093 if (sc->sc_flags & IWH_F_SUSPEND) {
3094 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3095 IEEE80211_FC0_TYPE_DATA) {
3096 freemsg(mp);
3097 }
3098 err = IWH_FAIL;
3099 goto exit;
3100 }
3101
3102 if ((NULL == mp) || (MBLKL(mp) <= 0)) {
3103 return (IWH_FAIL);
3104 }
3105
3106 mp_wh = (struct ieee80211_frame *)mp->b_rptr;
3107
3108 /*
3109 * Determine send which AP or station in IBSS
3110 */
3111 in = ieee80211_find_txnode(ic, mp_wh->i_addr1);
3112 if (NULL == in) {
3113 cmn_err(CE_WARN, "iwh_send(): "
3114 "failed to find tx node\n");
3115 freemsg(mp);
3116 sc->sc_tx_err++;
3117 err = IWH_SUCCESS;
3118 goto exit;
3119 }
3120
3121 /*
3122 * Determine TX queue according to traffic ID in frame
3123 * if working in QoS mode.
3124 */
3125 if (in->in_flags & IEEE80211_NODE_QOS) {
3126
3127 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3128 IEEE80211_FC0_TYPE_DATA) {
3129
3130 if (mp_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3131 qwh = (struct ieee80211_qosframe *)mp_wh;
3132
3133 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3134 txq_id = iwh_wme_tid_to_txq(tid);
3135
3136 if (txq_id < TXQ_FOR_AC_MIN ||
3137 (txq_id > TXQ_FOR_AC_MAX)) {
3138 freemsg(mp);
3139 sc->sc_tx_err++;
3140 err = IWH_SUCCESS;
3141 goto exit;
3142 }
3143
3144 } else {
3145 txq_id = NON_QOS_TXQ;
3146 }
3147
3148 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3149 IEEE80211_FC0_TYPE_MGT) {
3150 txq_id = QOS_TXQ_FOR_MGT;
3151 } else {
3152 txq_id = NON_QOS_TXQ;
3153 }
3154
3155 } else {
3156 txq_id = NON_QOS_TXQ;
3157 }
3158
3159 mutex_enter(&sc->sc_tx_lock);
3160 ring = &sc->sc_txq[txq_id];
3161 data = &ring->data[ring->cur];
3162 cmd = data->cmd;
3163 bzero(cmd, sizeof (*cmd));
3164
3165 ring->cur = (ring->cur + 1) % ring->count;
3166
3167 /*
3168 * Need reschedule TX if TX buffer is full.
3169 */
3170 if (ring->queued > ring->count - IWH_MAX_WIN_SIZE) {
3171 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3172 "no txbuf\n"));
3173
3174 sc->sc_need_reschedule = 1;
3175 mutex_exit(&sc->sc_tx_lock);
3176
3177 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3178 IEEE80211_FC0_TYPE_DATA) {
3179 freemsg(mp);
3180 }
3181 sc->sc_tx_nobuf++;
3182 err = IWH_FAIL;
3183 goto exit;
3184 }
3185
3186 ring->queued++;
3187
3188 mutex_exit(&sc->sc_tx_lock);
3189
3190 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3191
3192 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3193 if (NULL == m) { /* can not alloc buf, drop this package */
3194 cmn_err(CE_WARN, "iwh_send(): "
3195 "failed to allocate msgbuf\n");
3196 freemsg(mp);
3197
3198 mutex_enter(&sc->sc_tx_lock);
3199 ring->queued--;
3200 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3201 sc->sc_need_reschedule = 0;
3202 mutex_exit(&sc->sc_tx_lock);
3203 mac_tx_update(ic->ic_mach);
3204 mutex_enter(&sc->sc_tx_lock);
3205 }
3206 mutex_exit(&sc->sc_tx_lock);
3207
3208 err = IWH_SUCCESS;
3209 goto exit;
3210 }
3211
3212 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3213 mblen = MBLKL(m0);
3214 bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3215 off += mblen;
3216 }
3217
3218 m->b_wptr += off;
3219
3220 wh = (struct ieee80211_frame *)m->b_rptr;
3221
3222 /*
3223 * Net80211 module encapsulate outbound data frames.
3224 * Add some feilds of 80211 frame.
3225 */
3226 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3227 IEEE80211_FC0_TYPE_DATA) {
3228 (void) ieee80211_encap(ic, m, in);
3229 }
3230
3231 freemsg(mp);
3232
3233 cmd->hdr.type = REPLY_TX;
3234 cmd->hdr.flags = 0;
3235 cmd->hdr.qid = ring->qid;
3236
3237 tx = (iwh_tx_cmd_t *)cmd->data;
3238 tx->tx_flags = 0;
3239
3240 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3241 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3242 } else {
3243 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3244 }
3245
3246 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3247 k = ieee80211_crypto_encap(ic, m);
3248 if (NULL == k) {
3249 freemsg(m);
3250 sc->sc_tx_err++;
3251
3252 mutex_enter(&sc->sc_tx_lock);
3253 ring->queued--;
3254 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3255 sc->sc_need_reschedule = 0;
3256 mutex_exit(&sc->sc_tx_lock);
3257 mac_tx_update(ic->ic_mach);
3258 mutex_enter(&sc->sc_tx_lock);
3259 }
3260 mutex_exit(&sc->sc_tx_lock);
3261
3262 err = IWH_SUCCESS;
3263 goto exit;
3264 }
3265
3266 /*
3267 * packet header may have moved, reset our local pointer
3268 */
3269 wh = (struct ieee80211_frame *)m->b_rptr;
3270 }
3271
3272 len = msgdsize(m);
3273
3274 #ifdef DEBUG
3275 if (iwh_dbg_flags & IWH_DEBUG_TX) {
3276 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3277 }
3278 #endif
3279
3280 tx->rts_retry_limit = IWH_TX_RTS_RETRY_LIMIT;
3281 tx->data_retry_limit = IWH_TX_DATA_RETRY_LIMIT;
3282
3283 /*
3284 * specific TX parameters for management frames
3285 */
3286 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3287 IEEE80211_FC0_TYPE_MGT) {
3288 /*
3289 * mgmt frames are sent at 1M
3290 */
3291 if ((in->in_rates.ir_rates[0] &
3292 IEEE80211_RATE_VAL) != 0) {
3293 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3294 } else {
3295 rate = 2;
3296 }
3297
3298 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3299
3300 /*
3301 * tell h/w to set timestamp in probe responses
3302 */
3303 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3304 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3305 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3306
3307 tx->data_retry_limit = 3;
3308 if (tx->data_retry_limit < tx->rts_retry_limit) {
3309 tx->rts_retry_limit = tx->data_retry_limit;
3310 }
3311 }
3312
3313 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3314 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3315 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3316 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3317 tx->timeout.pm_frame_timeout = LE_16(3);
3318 } else {
3319 tx->timeout.pm_frame_timeout = LE_16(2);
3320 }
3321
3322 } else {
3323 /*
3324 * do it here for the software way rate scaling.
3325 * later for rate scaling in hardware.
3326 *
3327 * now the txrate is determined in tx cmd flags, set to the
3328 * max value 54M for 11g and 11M for 11b and 96M for 11n
3329 * originally.
3330 */
3331 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3332 rate = ic->ic_fixed_rate;
3333 } else {
3334 if ((in->in_flags & IEEE80211_NODE_HT) &&
3335 (sc->sc_ht_conf.ht_support)) {
3336 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
3337 rate = amrr->ht_mcs_idx;
3338 } else {
3339 if ((in->in_rates.ir_rates[in->in_txrate] &
3340 IEEE80211_RATE_VAL) != 0) {
3341 rate = in->in_rates.
3342 ir_rates[in->in_txrate] &
3343 IEEE80211_RATE_VAL;
3344 }
3345 }
3346 }
3347
3348 if (tid != WME_TID_INVALID) {
3349 tx->tid_tspec = (uint8_t)tid;
3350 tx->tx_flags &= LE_32(~TX_CMD_FLG_SEQ_CTL_MSK);
3351 } else {
3352 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3353 }
3354
3355 tx->timeout.pm_frame_timeout = 0;
3356 }
3357
3358 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3359 "tx rate[%d of %d] = %x",
3360 in->in_txrate, in->in_rates.ir_nrates, rate));
3361
3362 len0 = roundup(4 + sizeof (iwh_tx_cmd_t) + hdrlen, 4);
3363 if (len0 != (4 + sizeof (iwh_tx_cmd_t) + hdrlen)) {
3364 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3365 }
3366
3367 /*
3368 * retrieve destination node's id
3369 */
3370 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3371 tx->sta_id = IWH_BROADCAST_ID;
3372 } else {
3373 tx->sta_id = IWH_AP_ID;
3374 }
3375
3376 if ((in->in_flags & IEEE80211_NODE_HT) &&
3377 (sc->sc_ht_conf.ht_support) &&
3378 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3379 IEEE80211_FC0_TYPE_DATA)) {
3380 if (rate >= HT_2CHAIN_RATE_MIN_IDX) {
3381 rate |= LE_32(RATE_MCS_ANT_AB_MSK);
3382 } else {
3383 rate |= LE_32(RATE_MCS_ANT_B_MSK);
3384 }
3385
3386 rate |= LE_32((1 << RATE_MCS_HT_POS));
3387
3388 tx->rate.r.rate_n_flags = rate;
3389
3390 } else {
3391 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3392 masks |= RATE_MCS_CCK_MSK;
3393 }
3394
3395 masks |= RATE_MCS_ANT_B_MSK;
3396 tx->rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(rate) | masks);
3397 }
3398
3399 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3400 "tx flag = %x",
3401 tx->tx_flags));
3402
3403 tx->stop_time.life_time = LE_32(0xffffffff);
3404
3405 tx->len = LE_16(len);
3406
3407 tx->dram_lsb_ptr =
3408 LE_32(data->paddr_cmd + 4 + offsetof(iwh_tx_cmd_t, scratch));
3409 tx->dram_msb_ptr = 0;
3410 tx->driver_txop = 0;
3411 tx->next_frame_len = 0;
3412
3413 bcopy(m->b_rptr, tx + 1, hdrlen);
3414 m->b_rptr += hdrlen;
3415 bcopy(m->b_rptr, data->dma_data.mem_va, (len - hdrlen));
3416
3417 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3418 "sending data: qid=%d idx=%d len=%d",
3419 ring->qid, ring->cur, len));
3420
3421 /*
3422 * first segment includes the tx cmd plus the 802.11 header,
3423 * the second includes the remaining of the 802.11 frame.
3424 */
3425
3426 mutex_enter(&sc->sc_tx_lock);
3427 cmd->hdr.idx = ring->desc_cur;
3428 desc_data = &ring->data[ring->desc_cur];
3429 desc = desc_data->desc;
3430 bzero(desc, sizeof (*desc));
3431 desc->val0 = 2 << 24;
3432 desc->pa[0].tb1_addr = data->paddr_cmd;
3433 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3434 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3435 desc->pa[0].val2 =
3436 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3437 ((len - hdrlen) << 20);
3438 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3439 "phy addr1 = 0x%x phy addr2 = 0x%x "
3440 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3441 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3442 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3443
3444 /*
3445 * kick ring
3446 */
3447 s_id = tx->sta_id;
3448
3449 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3450 tfd_offset[ring->desc_cur].val =
3451 (8 + len) | (s_id << 12);
3452 if (ring->desc_cur < IWH_MAX_WIN_SIZE) {
3453 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3454 tfd_offset[IWH_QUEUE_SIZE + ring->desc_cur].val =
3455 (8 + len) | (s_id << 12);
3456 }
3457
3458 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3459 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3460
3461 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3462 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3463
3464 mutex_exit(&sc->sc_tx_lock);
3465 freemsg(m);
3466
3467 /*
3468 * release node reference
3469 */
3470 ieee80211_free_node(in);
3471
3472 ic->ic_stats.is_tx_bytes += len;
3473 ic->ic_stats.is_tx_frags++;
3474
3475 mutex_enter(&sc->sc_mt_lock);
3476 if (0 == sc->sc_tx_timer) {
3477 sc->sc_tx_timer = 4;
3478 }
3479 mutex_exit(&sc->sc_mt_lock);
3480
3481 exit:
3482 return (err);
3483 }
3484
3485 /*
3486 * invoked by GLD to deal with IOCTL affaires
3487 */
3488 static void
iwh_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3489 iwh_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3490 {
3491 iwh_sc_t *sc;
3492 ieee80211com_t *ic;
3493 int err = EINVAL;
3494
3495 if (NULL == arg) {
3496 return;
3497 }
3498 sc = (iwh_sc_t *)arg;
3499 ic = &sc->sc_ic;
3500
3501 err = ieee80211_ioctl(ic, wq, mp);
3502 if (ENETRESET == err) {
3503 /*
3504 * This is special for the hidden AP connection.
3505 * In any case, we should make sure only one 'scan'
3506 * in the driver for a 'connect' CLI command. So
3507 * when connecting to a hidden AP, the scan is just
3508 * sent out to the air when we know the desired
3509 * essid of the AP we want to connect.
3510 */
3511 if (ic->ic_des_esslen) {
3512 if (sc->sc_flags & IWH_F_RUNNING) {
3513 iwh_m_stop(sc);
3514 (void) iwh_m_start(sc);
3515 (void) ieee80211_new_state(ic,
3516 IEEE80211_S_SCAN, -1);
3517 }
3518 }
3519 }
3520 }
3521
3522 /*
3523 * Call back functions for get/set proporty
3524 */
3525 static int
iwh_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3526 iwh_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3527 uint_t wldp_length, void *wldp_buf)
3528 {
3529 iwh_sc_t *sc;
3530 int err = EINVAL;
3531
3532 if (NULL == arg) {
3533 return (EINVAL);
3534 }
3535 sc = (iwh_sc_t *)arg;
3536
3537 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3538 wldp_length, wldp_buf);
3539
3540 return (err);
3541 }
3542
3543 static void
iwh_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t mph)3544 iwh_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3545 mac_prop_info_handle_t mph)
3546 {
3547 iwh_sc_t *sc = (iwh_sc_t *)arg;
3548
3549 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, mph);
3550 }
3551
3552 static int
iwh_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3553 iwh_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3554 uint_t wldp_length, const void *wldp_buf)
3555 {
3556 iwh_sc_t *sc;
3557 ieee80211com_t *ic;
3558 int err = EINVAL;
3559
3560 if (NULL == arg) {
3561 return (EINVAL);
3562 }
3563 sc = (iwh_sc_t *)arg;
3564 ic = &sc->sc_ic;
3565
3566 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3567 wldp_buf);
3568
3569 if (err == ENETRESET) {
3570 if (ic->ic_des_esslen) {
3571 if (sc->sc_flags & IWH_F_RUNNING) {
3572 iwh_m_stop(sc);
3573 (void) iwh_m_start(sc);
3574 (void) ieee80211_new_state(ic,
3575 IEEE80211_S_SCAN, -1);
3576 }
3577 }
3578 err = 0;
3579 }
3580 return (err);
3581 }
3582
3583 /*
3584 * invoked by GLD supply statistics NIC and driver
3585 */
3586 static int
iwh_m_stat(void * arg,uint_t stat,uint64_t * val)3587 iwh_m_stat(void *arg, uint_t stat, uint64_t *val)
3588 {
3589 iwh_sc_t *sc;
3590 ieee80211com_t *ic;
3591 ieee80211_node_t *in;
3592
3593 if (NULL == arg) {
3594 return (EINVAL);
3595 }
3596 sc = (iwh_sc_t *)arg;
3597 ic = &sc->sc_ic;
3598
3599 mutex_enter(&sc->sc_glock);
3600
3601 switch (stat) {
3602 case MAC_STAT_IFSPEED:
3603 in = ic->ic_bss;
3604 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3605 IEEE80211_RATE(in->in_txrate) :
3606 ic->ic_fixed_rate) / 2 * 1000000;
3607 break;
3608
3609 case MAC_STAT_NOXMTBUF:
3610 *val = sc->sc_tx_nobuf;
3611 break;
3612
3613 case MAC_STAT_NORCVBUF:
3614 *val = sc->sc_rx_nobuf;
3615 break;
3616
3617 case MAC_STAT_IERRORS:
3618 *val = sc->sc_rx_err;
3619 break;
3620
3621 case MAC_STAT_RBYTES:
3622 *val = ic->ic_stats.is_rx_bytes;
3623 break;
3624
3625 case MAC_STAT_IPACKETS:
3626 *val = ic->ic_stats.is_rx_frags;
3627 break;
3628
3629 case MAC_STAT_OBYTES:
3630 *val = ic->ic_stats.is_tx_bytes;
3631 break;
3632
3633 case MAC_STAT_OPACKETS:
3634 *val = ic->ic_stats.is_tx_frags;
3635 break;
3636
3637 case MAC_STAT_OERRORS:
3638 case WIFI_STAT_TX_FAILED:
3639 *val = sc->sc_tx_err;
3640 break;
3641
3642 case WIFI_STAT_TX_RETRANS:
3643 *val = sc->sc_tx_retries;
3644 break;
3645
3646 case WIFI_STAT_FCS_ERRORS:
3647 case WIFI_STAT_WEP_ERRORS:
3648 case WIFI_STAT_TX_FRAGS:
3649 case WIFI_STAT_MCAST_TX:
3650 case WIFI_STAT_RTS_SUCCESS:
3651 case WIFI_STAT_RTS_FAILURE:
3652 case WIFI_STAT_ACK_FAILURE:
3653 case WIFI_STAT_RX_FRAGS:
3654 case WIFI_STAT_MCAST_RX:
3655 case WIFI_STAT_RX_DUPS:
3656 mutex_exit(&sc->sc_glock);
3657 return (ieee80211_stat(ic, stat, val));
3658
3659 default:
3660 mutex_exit(&sc->sc_glock);
3661 return (ENOTSUP);
3662 }
3663
3664 mutex_exit(&sc->sc_glock);
3665
3666 return (IWH_SUCCESS);
3667 }
3668
3669 /*
3670 * invoked by GLD to start or open NIC
3671 */
3672 static int
iwh_m_start(void * arg)3673 iwh_m_start(void *arg)
3674 {
3675 iwh_sc_t *sc;
3676 ieee80211com_t *ic;
3677 int err = IWH_FAIL;
3678
3679 if (NULL == arg) {
3680 return (EINVAL);
3681 }
3682 sc = (iwh_sc_t *)arg;
3683 ic = &sc->sc_ic;
3684
3685 err = iwh_init(sc);
3686 if (err != IWH_SUCCESS) {
3687 /*
3688 * The hw init err(eg. RF is OFF). Return Success to make
3689 * the 'plumb' succeed. The iwh_thread() tries to re-init
3690 * background.
3691 */
3692 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
3693 return (IWH_SUCCESS);
3694 }
3695
3696 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3697
3698 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3699
3700 return (IWH_SUCCESS);
3701 }
3702
3703 /*
3704 * invoked by GLD to stop or down NIC
3705 */
3706 static void
iwh_m_stop(void * arg)3707 iwh_m_stop(void *arg)
3708 {
3709 iwh_sc_t *sc;
3710 ieee80211com_t *ic;
3711
3712 if (NULL == arg) {
3713 return;
3714 }
3715 sc = (iwh_sc_t *)arg;
3716 ic = &sc->sc_ic;
3717
3718 iwh_stop(sc);
3719
3720 /*
3721 * release buffer for calibration
3722 */
3723 iwh_release_calib_buffer(sc);
3724
3725 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3726
3727 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
3728 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
3729
3730 atomic_and_32(&sc->sc_flags, ~IWH_F_RUNNING);
3731 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
3732 }
3733
3734 /*
3735 * invoked by GLD to configure NIC
3736 */
3737 static int
iwh_m_unicst(void * arg,const uint8_t * macaddr)3738 iwh_m_unicst(void *arg, const uint8_t *macaddr)
3739 {
3740 iwh_sc_t *sc;
3741 ieee80211com_t *ic;
3742 int err = IWH_SUCCESS;
3743
3744 if (NULL == arg) {
3745 return (EINVAL);
3746 }
3747 sc = (iwh_sc_t *)arg;
3748 ic = &sc->sc_ic;
3749
3750 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3751 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3752 mutex_enter(&sc->sc_glock);
3753 err = iwh_config(sc);
3754 mutex_exit(&sc->sc_glock);
3755 if (err != IWH_SUCCESS) {
3756 cmn_err(CE_WARN, "iwh_m_unicst(): "
3757 "failed to configure device\n");
3758 goto fail;
3759 }
3760 }
3761
3762 fail:
3763 return (err);
3764 }
3765
3766 /* ARGSUSED */
3767 static int
iwh_m_multicst(void * arg,boolean_t add,const uint8_t * m)3768 iwh_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3769 {
3770 return (IWH_SUCCESS);
3771 }
3772
3773 /* ARGSUSED */
3774 static int
iwh_m_promisc(void * arg,boolean_t on)3775 iwh_m_promisc(void *arg, boolean_t on)
3776 {
3777 return (IWH_SUCCESS);
3778 }
3779
3780 /*
3781 * kernel thread to deal with exceptional situation
3782 */
3783 static void
iwh_thread(iwh_sc_t * sc)3784 iwh_thread(iwh_sc_t *sc)
3785 {
3786 ieee80211com_t *ic = &sc->sc_ic;
3787 clock_t clk;
3788 int err, n = 0, timeout = 0;
3789 uint32_t tmp;
3790 #ifdef DEBUG
3791 int times = 0;
3792 #endif
3793
3794 while (sc->sc_mf_thread_switch) {
3795 tmp = IWH_READ(sc, CSR_GP_CNTRL);
3796 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3797 atomic_and_32(&sc->sc_flags, ~IWH_F_RADIO_OFF);
3798 } else {
3799 atomic_or_32(&sc->sc_flags, IWH_F_RADIO_OFF);
3800 }
3801
3802 /*
3803 * If in SUSPEND or the RF is OFF, do nothing.
3804 */
3805 if (sc->sc_flags & IWH_F_RADIO_OFF) {
3806 delay(drv_usectohz(100000));
3807 continue;
3808 }
3809
3810 /*
3811 * recovery fatal error
3812 */
3813 if (ic->ic_mach &&
3814 (sc->sc_flags & IWH_F_HW_ERR_RECOVER)) {
3815
3816 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3817 "try to recover fatal hw error: %d\n", times++));
3818
3819 iwh_stop(sc);
3820
3821 if (IWH_CHK_FAST_RECOVER(sc)) {
3822 /*
3823 * save runtime configuration
3824 */
3825 bcopy(&sc->sc_config, &sc->sc_config_save,
3826 sizeof (sc->sc_config));
3827 } else {
3828 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3829 delay(drv_usectohz(2000000 + n*500000));
3830 }
3831
3832 err = iwh_init(sc);
3833 if (err != IWH_SUCCESS) {
3834 n++;
3835 if (n < 20) {
3836 continue;
3837 }
3838 }
3839
3840 n = 0;
3841 if (!err) {
3842 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3843 }
3844
3845
3846 if (!IWH_CHK_FAST_RECOVER(sc) ||
3847 iwh_fast_recover(sc) != IWH_SUCCESS) {
3848 atomic_and_32(&sc->sc_flags,
3849 ~IWH_F_HW_ERR_RECOVER);
3850
3851 delay(drv_usectohz(2000000));
3852 if (sc->sc_ostate != IEEE80211_S_INIT) {
3853 ieee80211_new_state(ic,
3854 IEEE80211_S_SCAN, 0);
3855 }
3856 }
3857 }
3858
3859 if (ic->ic_mach &&
3860 (sc->sc_flags & IWH_F_SCANNING) && sc->sc_scan_pending) {
3861 IWH_DBG((IWH_DEBUG_SCAN, "iwh_thread(): "
3862 "wait for probe response\n"));
3863
3864 sc->sc_scan_pending--;
3865 delay(drv_usectohz(200000));
3866 ieee80211_next_scan(ic);
3867 }
3868
3869 /*
3870 * rate ctl
3871 */
3872 if (ic->ic_mach &&
3873 (sc->sc_flags & IWH_F_RATE_AUTO_CTL)) {
3874 clk = ddi_get_lbolt();
3875 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3876 iwh_amrr_timeout(sc);
3877 }
3878 }
3879
3880 if ((ic->ic_state == IEEE80211_S_RUN) &&
3881 (ic->ic_beaconmiss++ > 100)) { /* 10 seconds */
3882 cmn_err(CE_WARN, "iwh: beacon missed for 10 seconds\n");
3883 (void) ieee80211_new_state(ic,
3884 IEEE80211_S_INIT, -1);
3885 }
3886
3887 delay(drv_usectohz(100000));
3888
3889 mutex_enter(&sc->sc_mt_lock);
3890 if (sc->sc_tx_timer) {
3891 timeout++;
3892 if (10 == timeout) {
3893 sc->sc_tx_timer--;
3894 if (0 == sc->sc_tx_timer) {
3895 atomic_or_32(&sc->sc_flags,
3896 IWH_F_HW_ERR_RECOVER);
3897 sc->sc_ostate = IEEE80211_S_RUN;
3898 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3899 "try to recover from "
3900 "send fail\n"));
3901 }
3902 timeout = 0;
3903 }
3904 }
3905 mutex_exit(&sc->sc_mt_lock);
3906 }
3907
3908 mutex_enter(&sc->sc_mt_lock);
3909 sc->sc_mf_thread = NULL;
3910 cv_signal(&sc->sc_mt_cv);
3911 mutex_exit(&sc->sc_mt_lock);
3912 }
3913
3914 /*
3915 * Send a command to the ucode.
3916 */
3917 static int
iwh_cmd(iwh_sc_t * sc,int code,const void * buf,int size,int async)3918 iwh_cmd(iwh_sc_t *sc, int code, const void *buf, int size, int async)
3919 {
3920 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
3921 iwh_tx_desc_t *desc;
3922 iwh_cmd_t *cmd;
3923
3924 ASSERT(size <= sizeof (cmd->data));
3925 ASSERT(mutex_owned(&sc->sc_glock));
3926
3927 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd() "
3928 "code[%d]", code));
3929 desc = ring->data[ring->cur].desc;
3930 cmd = ring->data[ring->cur].cmd;
3931
3932 cmd->hdr.type = (uint8_t)code;
3933 cmd->hdr.flags = 0;
3934 cmd->hdr.qid = ring->qid;
3935 cmd->hdr.idx = ring->cur;
3936 bcopy(buf, cmd->data, size);
3937 (void) memset(desc, 0, sizeof (*desc));
3938
3939 desc->val0 = 1 << 24;
3940 desc->pa[0].tb1_addr =
3941 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3942 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3943
3944 if (async) {
3945 sc->sc_cmd_accum++;
3946 }
3947
3948 /*
3949 * kick cmd ring XXX
3950 */
3951 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3952 tfd_offset[ring->cur].val = 8;
3953 if (ring->cur < IWH_MAX_WIN_SIZE) {
3954 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3955 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
3956 }
3957 ring->cur = (ring->cur + 1) % ring->count;
3958 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3959
3960 if (async) {
3961 return (IWH_SUCCESS);
3962 } else {
3963 clock_t clk;
3964
3965 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3966 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3967 if (cv_timedwait(&sc->sc_cmd_cv,
3968 &sc->sc_glock, clk) < 0) {
3969 break;
3970 }
3971 }
3972
3973 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3974 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3975 return (IWH_SUCCESS);
3976 } else {
3977 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3978 return (IWH_FAIL);
3979 }
3980 }
3981 }
3982
3983 /*
3984 * require ucode seting led of NIC
3985 */
3986 static void
iwh_set_led(iwh_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3987 iwh_set_led(iwh_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3988 {
3989 iwh_led_cmd_t led;
3990
3991 led.interval = LE_32(100000); /* unit: 100ms */
3992 led.id = id;
3993 led.off = off;
3994 led.on = on;
3995
3996 (void) iwh_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3997 }
3998
3999 /*
4000 * necessary setting to NIC before authentication
4001 */
4002 static int
iwh_hw_set_before_auth(iwh_sc_t * sc)4003 iwh_hw_set_before_auth(iwh_sc_t *sc)
4004 {
4005 ieee80211com_t *ic = &sc->sc_ic;
4006 ieee80211_node_t *in = ic->ic_bss;
4007 int err = IWH_FAIL;
4008
4009 /*
4010 * update adapter's configuration according
4011 * the info of target AP
4012 */
4013 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
4014 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
4015
4016 if (ic->ic_curmode != IEEE80211_MODE_11NG) {
4017
4018 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
4019 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
4020 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
4021
4022 if (IEEE80211_MODE_11B == ic->ic_curmode) {
4023 sc->sc_config.cck_basic_rates = 0x03;
4024 sc->sc_config.ofdm_basic_rates = 0;
4025 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
4026 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
4027 sc->sc_config.cck_basic_rates = 0;
4028 sc->sc_config.ofdm_basic_rates = 0x15;
4029 } else { /* assume 802.11b/g */
4030 sc->sc_config.cck_basic_rates = 0x0f;
4031 sc->sc_config.ofdm_basic_rates = 0xff;
4032 }
4033 }
4034
4035 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
4036 RXON_FLG_SHORT_SLOT_MSK);
4037
4038 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
4039 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
4040 } else {
4041 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
4042 }
4043
4044 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
4045 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4046 } else {
4047 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
4048 }
4049
4050 IWH_DBG((IWH_DEBUG_80211, "iwh_hw_set_before_auth(): "
4051 "config chan %d flags %x "
4052 "filter_flags %x cck %x ofdm %x"
4053 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
4054 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
4055 LE_32(sc->sc_config.filter_flags),
4056 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
4057 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
4058 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
4059 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
4060
4061 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4062 sizeof (iwh_rxon_cmd_t), 1);
4063 if (err != IWH_SUCCESS) {
4064 cmn_err(CE_WARN, "iwh_hw_set_before_auth(): "
4065 "failed to config chan%d\n", sc->sc_config.chan);
4066 return (err);
4067 }
4068
4069 if ((sc->sc_dev_id != 0x423c) &&
4070 (sc->sc_dev_id != 0x423d)) {
4071 err = iwh_tx_power_table(sc, 1);
4072 if (err != IWH_SUCCESS) {
4073 return (err);
4074 }
4075 }
4076
4077 /*
4078 * add default AP node
4079 */
4080 err = iwh_add_ap_sta(sc);
4081 if (err != IWH_SUCCESS) {
4082 return (err);
4083 }
4084
4085 if ((sc->sc_dev_id != 0x423c) &&
4086 (sc->sc_dev_id != 0x423d)) {
4087 /*
4088 * set up retry rate table for AP node
4089 */
4090 err = iwh_ap_lq(sc);
4091 if (err != IWH_SUCCESS) {
4092 return (err);
4093 }
4094 }
4095
4096 return (err);
4097 }
4098
4099 /*
4100 * Send a scan request(assembly scan cmd) to the firmware.
4101 */
4102 static int
iwh_scan(iwh_sc_t * sc)4103 iwh_scan(iwh_sc_t *sc)
4104 {
4105 ieee80211com_t *ic = &sc->sc_ic;
4106 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
4107 iwh_tx_desc_t *desc;
4108 iwh_tx_data_t *data;
4109 iwh_cmd_t *cmd;
4110 iwh_scan_hdr_t *hdr;
4111 iwh_scan_chan_t chan;
4112 struct ieee80211_frame *wh;
4113 ieee80211_node_t *in = ic->ic_bss;
4114 uint8_t essid[IEEE80211_NWID_LEN+1];
4115 struct ieee80211_rateset *rs;
4116 enum ieee80211_phymode mode;
4117 uint8_t *frm;
4118 int i, pktlen, nrates;
4119
4120 data = &ring->data[ring->cur];
4121 desc = data->desc;
4122 cmd = (iwh_cmd_t *)data->dma_data.mem_va;
4123
4124 cmd->hdr.type = REPLY_SCAN_CMD;
4125 cmd->hdr.flags = 0;
4126 cmd->hdr.qid = ring->qid;
4127 cmd->hdr.idx = ring->cur | 0x40;
4128
4129 hdr = (iwh_scan_hdr_t *)cmd->data;
4130 (void) memset(hdr, 0, sizeof (iwh_scan_hdr_t));
4131 hdr->nchan = 1;
4132 hdr->quiet_time = LE_16(50);
4133 hdr->quiet_plcp_th = LE_16(1);
4134
4135 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
4136 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4137 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4138 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4139 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4140
4141 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
4142 hdr->tx_cmd.sta_id = IWH_BROADCAST_ID;
4143 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
4144 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(2));
4145 hdr->tx_cmd.rate.r.rate_n_flags |=
4146 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
4147 hdr->direct_scan[0].len = ic->ic_des_esslen;
4148 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4149
4150 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4151 RXON_FILTER_BCON_AWARE_MSK);
4152
4153 if (ic->ic_des_esslen) {
4154 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
4155 essid[ic->ic_des_esslen] = '\0';
4156 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4157 "directed scan %s\n", essid));
4158
4159 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
4160 ic->ic_des_esslen);
4161 } else {
4162 bzero(hdr->direct_scan[0].ssid,
4163 sizeof (hdr->direct_scan[0].ssid));
4164 }
4165
4166 /*
4167 * a probe request frame is required after the REPLY_SCAN_CMD
4168 */
4169 wh = (struct ieee80211_frame *)(hdr + 1);
4170 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4171 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4172 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4173 (void) memset(wh->i_addr1, 0xff, 6);
4174 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
4175 (void) memset(wh->i_addr3, 0xff, 6);
4176 *(uint16_t *)&wh->i_dur[0] = 0;
4177 *(uint16_t *)&wh->i_seq[0] = 0;
4178
4179 frm = (uint8_t *)(wh + 1);
4180
4181 /*
4182 * essid IE
4183 */
4184 if (in->in_esslen) {
4185 bcopy(in->in_essid, essid, in->in_esslen);
4186 essid[in->in_esslen] = '\0';
4187 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4188 "probe with ESSID %s\n",
4189 essid));
4190 }
4191 *frm++ = IEEE80211_ELEMID_SSID;
4192 *frm++ = in->in_esslen;
4193 bcopy(in->in_essid, frm, in->in_esslen);
4194 frm += in->in_esslen;
4195
4196 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
4197 rs = &ic->ic_sup_rates[mode];
4198
4199 /*
4200 * supported rates IE
4201 */
4202 *frm++ = IEEE80211_ELEMID_RATES;
4203 nrates = rs->ir_nrates;
4204 if (nrates > IEEE80211_RATE_SIZE) {
4205 nrates = IEEE80211_RATE_SIZE;
4206 }
4207
4208 *frm++ = (uint8_t)nrates;
4209 bcopy(rs->ir_rates, frm, nrates);
4210 frm += nrates;
4211
4212 /*
4213 * supported xrates IE
4214 */
4215 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4216 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4217 *frm++ = IEEE80211_ELEMID_XRATES;
4218 *frm++ = (uint8_t)nrates;
4219 bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
4220 frm += nrates;
4221 }
4222
4223 /*
4224 * optionnal IE (usually for wpa)
4225 */
4226 if (ic->ic_opt_ie != NULL) {
4227 bcopy(ic->ic_opt_ie, frm, ic->ic_opt_ie_len);
4228 frm += ic->ic_opt_ie_len;
4229 }
4230
4231 /* setup length of probe request */
4232 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4233 hdr->len = LE_16(hdr->nchan * sizeof (iwh_scan_chan_t) +
4234 LE_16(hdr->tx_cmd.len) + sizeof (iwh_scan_hdr_t));
4235
4236 /*
4237 * the attribute of the scan channels are required after the probe
4238 * request frame.
4239 */
4240 for (i = 1; i <= hdr->nchan; i++) {
4241 if (ic->ic_des_esslen) {
4242 chan.type = LE_32(3);
4243 } else {
4244 chan.type = LE_32(1);
4245 }
4246
4247 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4248 chan.tpc.tx_gain = 0x28;
4249 chan.tpc.dsp_atten = 110;
4250 chan.active_dwell = LE_16(50);
4251 chan.passive_dwell = LE_16(120);
4252
4253 bcopy(&chan, frm, sizeof (iwh_scan_chan_t));
4254 frm += sizeof (iwh_scan_chan_t);
4255 }
4256
4257 pktlen = _PTRDIFF(frm, cmd);
4258
4259 (void) memset(desc, 0, sizeof (*desc));
4260 desc->val0 = 1 << 24;
4261 desc->pa[0].tb1_addr =
4262 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4263 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4264
4265 /*
4266 * maybe for cmd, filling the byte cnt table is not necessary.
4267 * anyway, we fill it here.
4268 */
4269 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4270 .tfd_offset[ring->cur].val = 8;
4271 if (ring->cur < IWH_MAX_WIN_SIZE) {
4272 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4273 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
4274 }
4275
4276 /*
4277 * kick cmd ring
4278 */
4279 ring->cur = (ring->cur + 1) % ring->count;
4280 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4281
4282 return (IWH_SUCCESS);
4283 }
4284
4285 /*
4286 * configure NIC by using ucode commands after loading ucode.
4287 */
4288 static int
iwh_config(iwh_sc_t * sc)4289 iwh_config(iwh_sc_t *sc)
4290 {
4291 ieee80211com_t *ic = &sc->sc_ic;
4292 iwh_powertable_cmd_t powertable;
4293 iwh_bt_cmd_t bt;
4294 iwh_add_sta_t node;
4295 iwh_rem_sta_t rm_sta;
4296 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4297 iwh_link_quality_cmd_t link_quality;
4298 int i, err = IWH_FAIL;
4299 uint16_t masks = 0;
4300
4301 /*
4302 * set power mode. Disable power management at present, do it later
4303 */
4304 (void) memset(&powertable, 0, sizeof (powertable));
4305 powertable.flags = LE_16(0x8);
4306 err = iwh_cmd(sc, POWER_TABLE_CMD, &powertable,
4307 sizeof (powertable), 0);
4308 if (err != IWH_SUCCESS) {
4309 cmn_err(CE_WARN, "iwh_config(): "
4310 "failed to set power mode\n");
4311 return (err);
4312 }
4313
4314 /*
4315 * configure bt coexistence
4316 */
4317 (void) memset(&bt, 0, sizeof (bt));
4318 bt.flags = 3;
4319 bt.lead_time = 0xaa;
4320 bt.max_kill = 1;
4321 err = iwh_cmd(sc, REPLY_BT_CONFIG, &bt,
4322 sizeof (bt), 0);
4323 if (err != IWH_SUCCESS) {
4324 cmn_err(CE_WARN, "iwh_config(): "
4325 "failed to configurate bt coexistence\n");
4326 return (err);
4327 }
4328
4329 /*
4330 * configure rxon
4331 */
4332 (void) memset(&sc->sc_config, 0, sizeof (iwh_rxon_cmd_t));
4333 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4334 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4335 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4336 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4337 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4338 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4339
4340 switch (ic->ic_opmode) {
4341 case IEEE80211_M_STA:
4342 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4343 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4344 RXON_FILTER_DIS_DECRYPT_MSK |
4345 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4346 break;
4347
4348 case IEEE80211_M_IBSS:
4349 case IEEE80211_M_AHDEMO:
4350 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4351
4352 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4353 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4354 RXON_FILTER_DIS_DECRYPT_MSK |
4355 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4356 break;
4357
4358 case IEEE80211_M_HOSTAP:
4359 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4360 break;
4361
4362 case IEEE80211_M_MONITOR:
4363 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4364 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4365 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4366 break;
4367 }
4368
4369 /*
4370 * Support all CCK rates.
4371 */
4372 sc->sc_config.cck_basic_rates = 0x0f;
4373
4374 /*
4375 * Support all OFDM rates.
4376 */
4377 sc->sc_config.ofdm_basic_rates = 0xff;
4378
4379 /*
4380 * Determine HT supported rates.
4381 */
4382 switch (sc->sc_ht_conf.rx_stream_count) {
4383 case 3:
4384 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0xff;
4385 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4386 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4387 break;
4388 case 2:
4389 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4390 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4391 break;
4392 case 1:
4393 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4394 break;
4395 default:
4396 cmn_err(CE_WARN, "iwh_config(): "
4397 "RX stream count %d is not in suitable range\n",
4398 sc->sc_ht_conf.rx_stream_count);
4399 return (IWH_FAIL);
4400 }
4401
4402 /*
4403 * set RX chains/antennas.
4404 */
4405 iwh_config_rxon_chain(sc);
4406
4407 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4408 sizeof (iwh_rxon_cmd_t), 0);
4409 if (err != IWH_SUCCESS) {
4410 cmn_err(CE_WARN, "iwh_config(): "
4411 "failed to set configure command\n");
4412 return (err);
4413 }
4414
4415 /*
4416 * remove all nodes in NIC
4417 */
4418 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4419 rm_sta.num_sta = 1;
4420 bcopy(bcast, rm_sta.addr, 6);
4421
4422 err = iwh_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwh_rem_sta_t), 0);
4423 if (err != IWH_SUCCESS) {
4424 cmn_err(CE_WARN, "iwh_config(): "
4425 "failed to remove broadcast node in hardware.\n");
4426 return (err);
4427 }
4428
4429 if ((sc->sc_dev_id != 0x423c) &&
4430 (sc->sc_dev_id != 0x423d)) {
4431 /*
4432 * configure TX power table
4433 */
4434 err = iwh_tx_power_table(sc, 0);
4435 if (err != IWH_SUCCESS) {
4436 return (err);
4437 }
4438 }
4439
4440 /*
4441 * add broadcast node so that we can send broadcast frame
4442 */
4443 (void) memset(&node, 0, sizeof (node));
4444 (void) memset(node.sta.addr, 0xff, 6);
4445 node.mode = 0;
4446 node.sta.sta_id = IWH_BROADCAST_ID;
4447 node.station_flags = 0;
4448
4449 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4450 if (err != IWH_SUCCESS) {
4451 cmn_err(CE_WARN, "iwh_config(): "
4452 "failed to add broadcast node\n");
4453 return (err);
4454 }
4455
4456 if ((sc->sc_dev_id != 0x423c) &&
4457 (sc->sc_dev_id != 0x423d)) {
4458 /*
4459 * TX_LINK_QUALITY cmd
4460 */
4461 (void) memset(&link_quality, 0, sizeof (link_quality));
4462 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4463 masks |= RATE_MCS_CCK_MSK;
4464 masks |= RATE_MCS_ANT_B_MSK;
4465 masks &= ~RATE_MCS_ANT_A_MSK;
4466 link_quality.rate_n_flags[i] =
4467 LE_32(iwh_rate_to_plcp(2) | masks);
4468 }
4469
4470 link_quality.general_params.single_stream_ant_msk = 2;
4471 link_quality.general_params.dual_stream_ant_msk = 3;
4472 link_quality.agg_params.agg_dis_start_th = 3;
4473 link_quality.agg_params.agg_time_limit = LE_16(4000);
4474 link_quality.sta_id = IWH_BROADCAST_ID;
4475 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
4476 sizeof (link_quality), 0);
4477 if (err != IWH_SUCCESS) {
4478 cmn_err(CE_WARN, "iwh_config(): "
4479 "failed to config link quality table\n");
4480 return (err);
4481 }
4482 }
4483
4484 return (err);
4485 }
4486
4487 /*
4488 * quiesce(9E) entry point.
4489 * This function is called when the system is single-threaded at high
4490 * PIL with preemption disabled. Therefore, this function must not be
4491 * blocked.
4492 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4493 * DDI_FAILURE indicates an error condition and should almost never happen.
4494 */
4495 static int
iwh_quiesce(dev_info_t * dip)4496 iwh_quiesce(dev_info_t *dip)
4497 {
4498 iwh_sc_t *sc;
4499
4500 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
4501 if (sc == NULL) {
4502 return (DDI_FAILURE);
4503 }
4504
4505 #ifdef DEBUG
4506 /*
4507 * by pass any messages, if it's quiesce
4508 */
4509 iwh_dbg_flags = 0;
4510 #endif
4511
4512 /*
4513 * No more blocking is allowed while we are in the
4514 * quiesce(9E) entry point.
4515 */
4516 atomic_or_32(&sc->sc_flags, IWH_F_QUIESCED);
4517
4518 /*
4519 * Disable and mask all interrupts.
4520 */
4521 iwh_stop(sc);
4522
4523 return (DDI_SUCCESS);
4524 }
4525
4526 static void
iwh_stop_master(iwh_sc_t * sc)4527 iwh_stop_master(iwh_sc_t *sc)
4528 {
4529 uint32_t tmp;
4530 int n;
4531
4532 tmp = IWH_READ(sc, CSR_RESET);
4533 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4534
4535 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4536 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4537 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4538 return;
4539 }
4540
4541 for (n = 0; n < 2000; n++) {
4542 if (IWH_READ(sc, CSR_RESET) &
4543 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4544 break;
4545 }
4546 DELAY(1000);
4547 }
4548
4549 #ifdef DEBUG
4550 if (2000 == n) {
4551 IWH_DBG((IWH_DEBUG_HW, "iwh_stop_master(): "
4552 "timeout waiting for master stop\n"));
4553 }
4554 #endif
4555 }
4556
4557 static int
iwh_power_up(iwh_sc_t * sc)4558 iwh_power_up(iwh_sc_t *sc)
4559 {
4560 uint32_t tmp;
4561
4562 iwh_mac_access_enter(sc);
4563 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4564 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4565 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4566 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4567 iwh_mac_access_exit(sc);
4568
4569 DELAY(5000);
4570 return (IWH_SUCCESS);
4571 }
4572
4573 /*
4574 * hardware initialization
4575 */
4576 static int
iwh_preinit(iwh_sc_t * sc)4577 iwh_preinit(iwh_sc_t *sc)
4578 {
4579 int n;
4580 uint8_t vlink;
4581 uint16_t radio_cfg;
4582 uint32_t tmp;
4583
4584 /*
4585 * clear any pending interrupts
4586 */
4587 IWH_WRITE(sc, CSR_INT, 0xffffffff);
4588
4589 tmp = IWH_READ(sc, CSR_GIO_CHICKEN_BITS);
4590 IWH_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4591 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4592
4593 tmp = IWH_READ(sc, CSR_ANA_PLL_CFG);
4594 IWH_WRITE(sc, CSR_ANA_PLL_CFG, tmp | IWH_CSR_ANA_PLL_CFG);
4595
4596 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4597 IWH_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4598
4599 /*
4600 * wait for clock ready
4601 */
4602 for (n = 0; n < 1000; n++) {
4603 if (IWH_READ(sc, CSR_GP_CNTRL) &
4604 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4605 break;
4606 }
4607 DELAY(10);
4608 }
4609
4610 if (1000 == n) {
4611 return (ETIMEDOUT);
4612 }
4613
4614 iwh_mac_access_enter(sc);
4615
4616 iwh_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4617
4618 DELAY(20);
4619 tmp = iwh_reg_read(sc, ALM_APMG_PCIDEV_STT);
4620 iwh_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4621 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4622 iwh_mac_access_exit(sc);
4623
4624 radio_cfg = IWH_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4625 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4626 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4627 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4628 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4629 SP_RADIO_STEP_MSK(radio_cfg) |
4630 SP_RADIO_DASH_MSK(radio_cfg));
4631 } else {
4632 cmn_err(CE_WARN, "iwh_preinit(): "
4633 "radio configuration information in eeprom is wrong\n");
4634 return (IWH_FAIL);
4635 }
4636
4637
4638 IWH_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4639
4640 (void) iwh_power_up(sc);
4641
4642 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4643 tmp = ddi_get32(sc->sc_cfg_handle,
4644 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4645 ddi_put32(sc->sc_cfg_handle,
4646 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4647 tmp & ~(1 << 11));
4648 }
4649
4650 vlink = ddi_get8(sc->sc_cfg_handle,
4651 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4652 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4653 vlink & ~2);
4654
4655 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4656 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4657 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4658 IWH_WRITE(sc, CSR_SW_VER, tmp);
4659
4660 /*
4661 * make sure power supply on each part of the hardware
4662 */
4663 iwh_mac_access_enter(sc);
4664 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4665 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4666 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4667 DELAY(5);
4668
4669 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4670 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4671 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4672 iwh_mac_access_exit(sc);
4673
4674 return (IWH_SUCCESS);
4675 }
4676
4677 /*
4678 * set up semphore flag to own EEPROM
4679 */
4680 static int
iwh_eep_sem_down(iwh_sc_t * sc)4681 iwh_eep_sem_down(iwh_sc_t *sc)
4682 {
4683 int count1, count2;
4684 uint32_t tmp;
4685
4686 for (count1 = 0; count1 < 1000; count1++) {
4687 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4688 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4689 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4690
4691 for (count2 = 0; count2 < 2; count2++) {
4692 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
4693 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4694 return (IWH_SUCCESS);
4695 }
4696 DELAY(10000);
4697 }
4698 }
4699
4700 return (IWH_FAIL);
4701 }
4702
4703 /*
4704 * reset semphore flag to release EEPROM
4705 */
4706 static void
iwh_eep_sem_up(iwh_sc_t * sc)4707 iwh_eep_sem_up(iwh_sc_t *sc)
4708 {
4709 uint32_t tmp;
4710
4711 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4712 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4713 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4714 }
4715
4716 /*
4717 * This function read all infomation from eeprom
4718 */
4719 static int
iwh_eep_load(iwh_sc_t * sc)4720 iwh_eep_load(iwh_sc_t *sc)
4721 {
4722 int i, rr;
4723 uint32_t rv, tmp, eep_gp;
4724 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4725 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4726
4727 /*
4728 * read eeprom gp register in CSR
4729 */
4730 eep_gp = IWH_READ(sc, CSR_EEPROM_GP);
4731 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4732 CSR_EEPROM_GP_BAD_SIGNATURE) {
4733 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4734 "not find eeprom\n"));
4735 return (IWH_FAIL);
4736 }
4737
4738 rr = iwh_eep_sem_down(sc);
4739 if (rr != 0) {
4740 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4741 "driver failed to own EEPROM\n"));
4742 return (IWH_FAIL);
4743 }
4744
4745 for (addr = 0; addr < eep_sz; addr += 2) {
4746 IWH_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4747 tmp = IWH_READ(sc, CSR_EEPROM_REG);
4748 IWH_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4749
4750 for (i = 0; i < 10; i++) {
4751 rv = IWH_READ(sc, CSR_EEPROM_REG);
4752 if (rv & 1) {
4753 break;
4754 }
4755 DELAY(10);
4756 }
4757
4758 if (!(rv & 1)) {
4759 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4760 "time out when read eeprome\n"));
4761 iwh_eep_sem_up(sc);
4762 return (IWH_FAIL);
4763 }
4764
4765 eep_p[addr/2] = LE_16(rv >> 16);
4766 }
4767
4768 iwh_eep_sem_up(sc);
4769 return (IWH_SUCCESS);
4770 }
4771
4772 /*
4773 * initialize mac address in ieee80211com_t struct
4774 */
4775 static void
iwh_get_mac_from_eep(iwh_sc_t * sc)4776 iwh_get_mac_from_eep(iwh_sc_t *sc)
4777 {
4778 ieee80211com_t *ic = &sc->sc_ic;
4779
4780 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4781
4782 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_get_mac_from_eep(): "
4783 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4784 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4785 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4786 }
4787
4788 /*
4789 * main initialization function
4790 */
4791 static int
iwh_init(iwh_sc_t * sc)4792 iwh_init(iwh_sc_t *sc)
4793 {
4794 int err = IWH_FAIL;
4795 clock_t clk;
4796
4797 /*
4798 * release buffer for calibration
4799 */
4800 iwh_release_calib_buffer(sc);
4801
4802 mutex_enter(&sc->sc_glock);
4803 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4804
4805 err = iwh_init_common(sc);
4806 if (err != IWH_SUCCESS) {
4807 mutex_exit(&sc->sc_glock);
4808 return (IWH_FAIL);
4809 }
4810
4811 /*
4812 * backup ucode data part for future use.
4813 */
4814 bcopy(sc->sc_dma_fw_data.mem_va,
4815 sc->sc_dma_fw_data_bak.mem_va,
4816 sc->sc_dma_fw_data.alength);
4817
4818 /* load firmware init segment into NIC */
4819 err = iwh_load_init_firmware(sc);
4820 if (err != IWH_SUCCESS) {
4821 cmn_err(CE_WARN, "iwh_init(): "
4822 "failed to setup init firmware\n");
4823 mutex_exit(&sc->sc_glock);
4824 return (IWH_FAIL);
4825 }
4826
4827 /*
4828 * now press "execute" start running
4829 */
4830 IWH_WRITE(sc, CSR_RESET, 0);
4831
4832 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4833 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4834 if (cv_timedwait(&sc->sc_ucode_cv,
4835 &sc->sc_glock, clk) < 0) {
4836 break;
4837 }
4838 }
4839
4840 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4841 cmn_err(CE_WARN, "iwh_init(): "
4842 "failed to process init alive.\n");
4843 mutex_exit(&sc->sc_glock);
4844 return (IWH_FAIL);
4845 }
4846
4847 mutex_exit(&sc->sc_glock);
4848
4849 /*
4850 * stop chipset for initializing chipset again
4851 */
4852 iwh_stop(sc);
4853
4854 mutex_enter(&sc->sc_glock);
4855 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4856
4857 err = iwh_init_common(sc);
4858 if (err != IWH_SUCCESS) {
4859 mutex_exit(&sc->sc_glock);
4860 return (IWH_FAIL);
4861 }
4862
4863 /*
4864 * load firmware run segment into NIC
4865 */
4866 err = iwh_load_run_firmware(sc);
4867 if (err != IWH_SUCCESS) {
4868 cmn_err(CE_WARN, "iwh_init(): "
4869 "failed to setup run firmware\n");
4870 mutex_exit(&sc->sc_glock);
4871 return (IWH_FAIL);
4872 }
4873
4874 /*
4875 * now press "execute" start running
4876 */
4877 IWH_WRITE(sc, CSR_RESET, 0);
4878
4879 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4880 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4881 if (cv_timedwait(&sc->sc_ucode_cv,
4882 &sc->sc_glock, clk) < 0) {
4883 break;
4884 }
4885 }
4886
4887 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4888 cmn_err(CE_WARN, "iwh_init(): "
4889 "failed to process runtime alive.\n");
4890 mutex_exit(&sc->sc_glock);
4891 return (IWH_FAIL);
4892 }
4893
4894 mutex_exit(&sc->sc_glock);
4895
4896 DELAY(1000);
4897
4898 mutex_enter(&sc->sc_glock);
4899 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4900
4901 /*
4902 * at this point, the firmware is loaded OK, then config the hardware
4903 * with the ucode API, including rxon, txpower, etc.
4904 */
4905 err = iwh_config(sc);
4906 if (err) {
4907 cmn_err(CE_WARN, "iwh_init(): "
4908 "failed to configure device\n");
4909 mutex_exit(&sc->sc_glock);
4910 return (IWH_FAIL);
4911 }
4912
4913 /*
4914 * at this point, hardware may receive beacons :)
4915 */
4916 mutex_exit(&sc->sc_glock);
4917 return (IWH_SUCCESS);
4918 }
4919
4920 /*
4921 * stop or disable NIC
4922 */
4923 static void
iwh_stop(iwh_sc_t * sc)4924 iwh_stop(iwh_sc_t *sc)
4925 {
4926 uint32_t tmp;
4927 int i;
4928
4929 /*
4930 * by pass if it's quiesced
4931 */
4932 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4933 mutex_enter(&sc->sc_glock);
4934 }
4935
4936 IWH_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4937 /*
4938 * disable interrupts
4939 */
4940 IWH_WRITE(sc, CSR_INT_MASK, 0);
4941 IWH_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4942 IWH_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4943
4944 /*
4945 * reset all Tx rings
4946 */
4947 for (i = 0; i < IWH_NUM_QUEUES; i++) {
4948 iwh_reset_tx_ring(sc, &sc->sc_txq[i]);
4949 }
4950
4951 /*
4952 * reset Rx ring
4953 */
4954 iwh_reset_rx_ring(sc);
4955
4956 iwh_mac_access_enter(sc);
4957 iwh_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4958 iwh_mac_access_exit(sc);
4959
4960 DELAY(5);
4961
4962 iwh_stop_master(sc);
4963
4964 mutex_enter(&sc->sc_mt_lock);
4965 sc->sc_tx_timer = 0;
4966 mutex_exit(&sc->sc_mt_lock);
4967
4968 tmp = IWH_READ(sc, CSR_RESET);
4969 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4970
4971 /*
4972 * by pass if it's quiesced
4973 */
4974 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4975 mutex_exit(&sc->sc_glock);
4976 }
4977 }
4978
4979 /*
4980 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4981 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4982 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4983 * INRIA Sophia - Projet Planete
4984 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4985 */
4986 #define is_success(amrr) \
4987 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4988 #define is_failure(amrr) \
4989 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4990 #define is_enough(amrr) \
4991 ((amrr)->txcnt > 200)
4992 #define not_very_few(amrr) \
4993 ((amrr)->txcnt > 40)
4994 #define is_min_rate(in) \
4995 (0 == (in)->in_txrate)
4996 #define is_max_rate(in) \
4997 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4998 #define increase_rate(in) \
4999 ((in)->in_txrate++)
5000 #define decrease_rate(in) \
5001 ((in)->in_txrate--)
5002 #define reset_cnt(amrr) \
5003 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
5004
5005 #define IWH_AMRR_MIN_SUCCESS_THRESHOLD 1
5006 #define IWH_AMRR_MAX_SUCCESS_THRESHOLD 15
5007
5008 static void
iwh_amrr_init(iwh_amrr_t * amrr)5009 iwh_amrr_init(iwh_amrr_t *amrr)
5010 {
5011 amrr->success = 0;
5012 amrr->recovery = 0;
5013 amrr->txcnt = amrr->retrycnt = 0;
5014 amrr->success_threshold = IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5015 amrr->ht_mcs_idx = 0; /* 6Mbps */
5016 }
5017
5018 static void
iwh_amrr_timeout(iwh_sc_t * sc)5019 iwh_amrr_timeout(iwh_sc_t *sc)
5020 {
5021 ieee80211com_t *ic = &sc->sc_ic;
5022
5023 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_timeout(): "
5024 "enter\n"));
5025
5026 if (IEEE80211_M_STA == ic->ic_opmode) {
5027 iwh_amrr_ratectl(NULL, ic->ic_bss);
5028 } else {
5029 ieee80211_iterate_nodes(&ic->ic_sta, iwh_amrr_ratectl, NULL);
5030 }
5031
5032 sc->sc_clk = ddi_get_lbolt();
5033 }
5034
5035 static int
iwh_is_max_rate(ieee80211_node_t * in)5036 iwh_is_max_rate(ieee80211_node_t *in)
5037 {
5038 int i;
5039 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5040 uint8_t r = (uint8_t)amrr->ht_mcs_idx;
5041 ieee80211com_t *ic = in->in_ic;
5042 iwh_sc_t *sc = (iwh_sc_t *)ic;
5043
5044 if (in->in_flags & IEEE80211_NODE_HT) {
5045 for (i = in->in_htrates.rs_nrates - 1; i >= 0; i--) {
5046 r = in->in_htrates.rs_rates[i] &
5047 IEEE80211_RATE_VAL;
5048 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5049 (1 << (r%8))) {
5050 break;
5051 }
5052 }
5053
5054 return (r == (uint8_t)amrr->ht_mcs_idx);
5055 } else {
5056 return (is_max_rate(in));
5057 }
5058 }
5059
5060 static int
iwh_is_min_rate(ieee80211_node_t * in)5061 iwh_is_min_rate(ieee80211_node_t *in)
5062 {
5063 int i;
5064 uint8_t r = 0;
5065 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5066 ieee80211com_t *ic = in->in_ic;
5067 iwh_sc_t *sc = (iwh_sc_t *)ic;
5068
5069 if (in->in_flags & IEEE80211_NODE_HT) {
5070 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5071 r = in->in_htrates.rs_rates[i] &
5072 IEEE80211_RATE_VAL;
5073 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5074 (1 << (r%8))) {
5075 break;
5076 }
5077 }
5078
5079 return (r == (uint8_t)amrr->ht_mcs_idx);
5080 } else {
5081 return (is_min_rate(in));
5082 }
5083 }
5084
5085 static void
iwh_increase_rate(ieee80211_node_t * in)5086 iwh_increase_rate(ieee80211_node_t *in)
5087 {
5088 int i;
5089 uint8_t r;
5090 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5091 ieee80211com_t *ic = in->in_ic;
5092 iwh_sc_t *sc = (iwh_sc_t *)ic;
5093
5094 if (in->in_flags & IEEE80211_NODE_HT) {
5095 again:
5096 amrr->ht_mcs_idx++;
5097
5098 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5099 r = in->in_htrates.rs_rates[i] &
5100 IEEE80211_RATE_VAL;
5101 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5102 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5103 (1 << (r%8)))) {
5104 break;
5105 }
5106 }
5107
5108 if (i >= in->in_htrates.rs_nrates) {
5109 goto again;
5110 }
5111 } else {
5112 increase_rate(in);
5113 }
5114 }
5115
5116 static void
iwh_decrease_rate(ieee80211_node_t * in)5117 iwh_decrease_rate(ieee80211_node_t *in)
5118 {
5119 int i;
5120 uint8_t r;
5121 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5122 ieee80211com_t *ic = in->in_ic;
5123 iwh_sc_t *sc = (iwh_sc_t *)ic;
5124
5125 if (in->in_flags & IEEE80211_NODE_HT) {
5126 again:
5127 amrr->ht_mcs_idx--;
5128
5129 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5130 r = in->in_htrates.rs_rates[i] &
5131 IEEE80211_RATE_VAL;
5132 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5133 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5134 (1 << (r%8)))) {
5135 break;
5136 }
5137 }
5138
5139 if (i >= in->in_htrates.rs_nrates) {
5140 goto again;
5141 }
5142 } else {
5143 decrease_rate(in);
5144 }
5145 }
5146
5147 /* ARGSUSED */
5148 static void
iwh_amrr_ratectl(void * arg,ieee80211_node_t * in)5149 iwh_amrr_ratectl(void *arg, ieee80211_node_t *in)
5150 {
5151 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5152 int need_change = 0;
5153
5154 if (is_success(amrr) && is_enough(amrr)) {
5155 amrr->success++;
5156 if (amrr->success >= amrr->success_threshold &&
5157 !iwh_is_max_rate(in)) {
5158 amrr->recovery = 1;
5159 amrr->success = 0;
5160 iwh_increase_rate(in);
5161 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5162 "AMRR increasing rate %d "
5163 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5164 in->in_txrate, amrr->txcnt,
5165 amrr->retrycnt, amrr->ht_mcs_idx));
5166 need_change = 1;
5167 } else {
5168 amrr->recovery = 0;
5169 }
5170 } else if (not_very_few(amrr) && is_failure(amrr)) {
5171 amrr->success = 0;
5172 if (!iwh_is_min_rate(in)) {
5173 if (amrr->recovery) {
5174 amrr->success_threshold++;
5175 if (amrr->success_threshold >
5176 IWH_AMRR_MAX_SUCCESS_THRESHOLD) {
5177 amrr->success_threshold =
5178 IWH_AMRR_MAX_SUCCESS_THRESHOLD;
5179 }
5180 } else {
5181 amrr->success_threshold =
5182 IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5183 }
5184 iwh_decrease_rate(in);
5185 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5186 "AMRR decreasing rate %d "
5187 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5188 in->in_txrate, amrr->txcnt,
5189 amrr->retrycnt, amrr->ht_mcs_idx));
5190 need_change = 1;
5191 }
5192 amrr->recovery = 0; /* paper is incorrect */
5193 }
5194
5195 if (is_enough(amrr) || need_change) {
5196 reset_cnt(amrr);
5197 }
5198 }
5199
5200 /*
5201 * translate indirect address in eeprom to direct address
5202 * in eeprom and return address of entry whos indirect address
5203 * is indi_addr
5204 */
5205 static uint8_t *
iwh_eep_addr_trans(iwh_sc_t * sc,uint32_t indi_addr)5206 iwh_eep_addr_trans(iwh_sc_t *sc, uint32_t indi_addr)
5207 {
5208 uint32_t di_addr;
5209 uint16_t temp;
5210
5211 if (!(indi_addr & INDIRECT_ADDRESS)) {
5212 di_addr = indi_addr;
5213 return (&sc->sc_eep_map[di_addr]);
5214 }
5215
5216 switch (indi_addr & INDIRECT_TYPE_MSK) {
5217 case INDIRECT_GENERAL:
5218 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
5219 break;
5220
5221 case INDIRECT_HOST:
5222 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_HOST);
5223 break;
5224
5225 case INDIRECT_REGULATORY:
5226 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
5227 break;
5228
5229 case INDIRECT_CALIBRATION:
5230 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
5231 break;
5232
5233 case INDIRECT_PROCESS_ADJST:
5234 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
5235 break;
5236
5237 case INDIRECT_OTHERS:
5238 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
5239 break;
5240
5241 default:
5242 temp = 0;
5243 cmn_err(CE_WARN, "iwh_eep_addr_trans(): "
5244 "incorrect indirect eeprom address.\n");
5245 break;
5246 }
5247
5248 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
5249
5250 return (&sc->sc_eep_map[di_addr]);
5251 }
5252
5253 /*
5254 * loade a section of ucode into NIC
5255 */
5256 static int
iwh_put_seg_fw(iwh_sc_t * sc,uint32_t addr_s,uint32_t addr_d,uint32_t len)5257 iwh_put_seg_fw(iwh_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
5258 {
5259
5260 iwh_mac_access_enter(sc);
5261
5262 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5263 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
5264
5265 IWH_WRITE(sc, IWH_FH_SRVC_CHNL_SRAM_ADDR_REG(IWH_FH_SRVC_CHNL), addr_d);
5266
5267 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL0_REG(IWH_FH_SRVC_CHNL),
5268 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
5269
5270 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL1_REG(IWH_FH_SRVC_CHNL), len);
5271
5272 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_BUF_STS_REG(IWH_FH_SRVC_CHNL),
5273 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
5274 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
5275 IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
5276
5277 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5278 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5279 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
5280 IWH_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
5281
5282 iwh_mac_access_exit(sc);
5283
5284 return (IWH_SUCCESS);
5285 }
5286
5287 /*
5288 * necessary setting during alive notification
5289 */
5290 static int
iwh_alive_common(iwh_sc_t * sc)5291 iwh_alive_common(iwh_sc_t *sc)
5292 {
5293 uint32_t base;
5294 uint32_t i;
5295 iwh_wimax_coex_cmd_t w_cmd;
5296 iwh_calibration_crystal_cmd_t c_cmd;
5297 uint32_t rv = IWH_FAIL;
5298
5299 /*
5300 * initialize SCD related registers to make TX work.
5301 */
5302 iwh_mac_access_enter(sc);
5303
5304 /*
5305 * read sram address of data base.
5306 */
5307 sc->sc_scd_base = iwh_reg_read(sc, IWH_SCD_SRAM_BASE_ADDR);
5308
5309 for (base = sc->sc_scd_base + IWH_SCD_CONTEXT_DATA_OFFSET;
5310 base < sc->sc_scd_base + IWH_SCD_TX_STTS_BITMAP_OFFSET;
5311 base += 4) {
5312 iwh_mem_write(sc, base, 0);
5313 }
5314
5315 for (; base < sc->sc_scd_base + IWH_SCD_TRANSLATE_TBL_OFFSET;
5316 base += 4) {
5317 iwh_mem_write(sc, base, 0);
5318 }
5319
5320 for (i = 0; i < sizeof (uint16_t) * IWH_NUM_QUEUES; i += 4) {
5321 iwh_mem_write(sc, base + i, 0);
5322 }
5323
5324 iwh_reg_write(sc, IWH_SCD_DRAM_BASE_ADDR,
5325 sc->sc_dma_sh.cookie.dmac_address >> 10);
5326
5327 iwh_reg_write(sc, IWH_SCD_QUEUECHAIN_SEL,
5328 IWH_SCD_QUEUECHAIN_SEL_ALL(IWH_NUM_QUEUES));
5329
5330 iwh_reg_write(sc, IWH_SCD_AGGR_SEL, 0);
5331
5332 for (i = 0; i < IWH_NUM_QUEUES; i++) {
5333 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(i), 0);
5334 IWH_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
5335 iwh_mem_write(sc, sc->sc_scd_base +
5336 IWH_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
5337 iwh_mem_write(sc, sc->sc_scd_base +
5338 IWH_SCD_CONTEXT_QUEUE_OFFSET(i) +
5339 sizeof (uint32_t),
5340 ((SCD_WIN_SIZE << IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
5341 IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
5342 ((SCD_FRAME_LIMIT <<
5343 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5344 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
5345 }
5346
5347 iwh_reg_write(sc, IWH_SCD_INTERRUPT_MASK, (1 << IWH_NUM_QUEUES) - 1);
5348
5349 iwh_reg_write(sc, (IWH_SCD_BASE + 0x10),
5350 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
5351
5352 IWH_WRITE(sc, HBUS_TARG_WRPTR, (IWH_CMD_QUEUE_NUM << 8));
5353 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(IWH_CMD_QUEUE_NUM), 0);
5354
5355 /*
5356 * queue 0-7 map to FIFO 0-7 and
5357 * all queues work under FIFO mode(none-scheduler_ack)
5358 */
5359 for (i = 0; i < 4; i++) {
5360 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5361 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5362 ((3-i) << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5363 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5364 IWH_SCD_QUEUE_STTS_REG_MSK);
5365 }
5366
5367 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(IWH_CMD_QUEUE_NUM),
5368 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5369 (IWH_CMD_FIFO_NUM << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5370 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5371 IWH_SCD_QUEUE_STTS_REG_MSK);
5372
5373 for (i = 5; i < 7; i++) {
5374 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5375 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5376 (i << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5377 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5378 IWH_SCD_QUEUE_STTS_REG_MSK);
5379 }
5380
5381 iwh_mac_access_exit(sc);
5382
5383 (void) memset(&w_cmd, 0, sizeof (w_cmd));
5384
5385 rv = iwh_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5386 if (rv != IWH_SUCCESS) {
5387 cmn_err(CE_WARN, "iwh_alive_common(): "
5388 "failed to send wimax coexist command.\n");
5389 return (rv);
5390 }
5391
5392 if ((sc->sc_dev_id != 0x423c) &&
5393 (sc->sc_dev_id != 0x423d)) {
5394 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5395
5396 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5397 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5398 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5399
5400 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5401 &c_cmd, sizeof (c_cmd), 1);
5402 if (rv != IWH_SUCCESS) {
5403 cmn_err(CE_WARN, "iwh_alive_common(): "
5404 "failed to send crystal"
5405 "frq calibration command.\n");
5406 return (rv);
5407 }
5408
5409 /*
5410 * make sure crystal frequency calibration ready
5411 * before next operations.
5412 */
5413 DELAY(1000);
5414 }
5415
5416 return (IWH_SUCCESS);
5417 }
5418
5419 /*
5420 * save results of calibration from ucode
5421 */
5422 static void
iwh_save_calib_result(iwh_sc_t * sc,iwh_rx_desc_t * desc)5423 iwh_save_calib_result(iwh_sc_t *sc, iwh_rx_desc_t *desc)
5424 {
5425 struct iwh_calib_results *res_p = &sc->sc_calib_results;
5426 struct iwh_calib_hdr *calib_hdr = (struct iwh_calib_hdr *)(desc + 1);
5427 int len = LE_32(desc->len);
5428
5429 /*
5430 * ensure the size of buffer is not too big
5431 */
5432 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5433
5434 switch (calib_hdr->op_code) {
5435 case PHY_CALIBRATE_LO_CMD:
5436 if (NULL == res_p->lo_res) {
5437 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5438 }
5439
5440 if (NULL == res_p->lo_res) {
5441 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5442 "failed to allocate memory.\n");
5443 return;
5444 }
5445
5446 res_p->lo_res_len = len;
5447 bcopy(calib_hdr, res_p->lo_res, len);
5448 break;
5449
5450 case PHY_CALIBRATE_TX_IQ_CMD:
5451 if (NULL == res_p->tx_iq_res) {
5452 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5453 }
5454
5455 if (NULL == res_p->tx_iq_res) {
5456 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5457 "failed to allocate memory.\n");
5458 return;
5459 }
5460
5461 res_p->tx_iq_res_len = len;
5462 bcopy(calib_hdr, res_p->tx_iq_res, len);
5463 break;
5464
5465 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5466 if (NULL == res_p->tx_iq_perd_res) {
5467 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5468 }
5469
5470 if (NULL == res_p->tx_iq_perd_res) {
5471 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5472 "failed to allocate memory.\n");
5473 return;
5474 }
5475
5476 res_p->tx_iq_perd_res_len = len;
5477 bcopy(calib_hdr, res_p->tx_iq_perd_res, len);
5478 break;
5479
5480 case PHY_CALIBRATE_DC_CMD:
5481 if (NULL == res_p->dc_res) {
5482 res_p->dc_res = kmem_alloc(len, KM_NOSLEEP);
5483 }
5484
5485 if (NULL == res_p->dc_res) {
5486 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5487 "failed to allocate memory.\n");
5488 return;
5489 }
5490
5491 res_p->dc_res_len = len;
5492 bcopy(calib_hdr, res_p->dc_res, len);
5493 break;
5494
5495 case PHY_CALIBRATE_BASE_BAND_CMD:
5496 if (NULL == res_p->base_band_res) {
5497 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5498 }
5499
5500 if (NULL == res_p->base_band_res) {
5501 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5502 "failed to allocate memory.\n");
5503 return;
5504 }
5505
5506 res_p->base_band_res_len = len;
5507 bcopy(calib_hdr, res_p->base_band_res, len);
5508 break;
5509
5510 default:
5511 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5512 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5513 break;
5514 }
5515
5516 }
5517
5518 /*
5519 * configure TX pwoer table
5520 */
5521 static int
iwh_tx_power_table(iwh_sc_t * sc,int async)5522 iwh_tx_power_table(iwh_sc_t *sc, int async)
5523 {
5524 iwh_tx_power_table_cmd_t txpower;
5525 int i, err = IWH_FAIL;
5526
5527 (void) memset(&txpower, 0, sizeof (txpower));
5528
5529 txpower.band = 1; /* for 2.4G */
5530 txpower.channel = (uint8_t)LE_16(sc->sc_config.chan);
5531 txpower.pa_measurements = 1;
5532 txpower.max_mcs = 23;
5533
5534 for (i = 0; i < 24; i++) {
5535 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[0] = 0x16;
5536 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[1] = 0x16;
5537 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[2] = 0x16;
5538 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[0] = 0x6E;
5539 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[1] = 0x6E;
5540 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[2] = 0x6E;
5541 }
5542
5543 for (i = 0; i < 2; i++) {
5544 txpower.db.cck_power[i].s.radio_tx_gain[0] = 0x16;
5545 txpower.db.cck_power[i].s.radio_tx_gain[1] = 0x16;
5546 txpower.db.cck_power[i].s.radio_tx_gain[2] = 0x16;
5547 txpower.db.cck_power[i].s.dsp_predis_atten[0] = 0x6E;
5548 txpower.db.cck_power[i].s.dsp_predis_atten[1] = 0x6E;
5549 txpower.db.cck_power[i].s.dsp_predis_atten[2] = 0x6E;
5550 }
5551
5552 err = iwh_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
5553 sizeof (txpower), async);
5554 if (err != IWH_SUCCESS) {
5555 cmn_err(CE_WARN, "iwh_tx_power_table(): "
5556 "failed to set tx power table.\n");
5557 return (err);
5558 }
5559
5560 return (err);
5561 }
5562
5563 static void
iwh_release_calib_buffer(iwh_sc_t * sc)5564 iwh_release_calib_buffer(iwh_sc_t *sc)
5565 {
5566 if (sc->sc_calib_results.lo_res != NULL) {
5567 kmem_free(sc->sc_calib_results.lo_res,
5568 sc->sc_calib_results.lo_res_len);
5569 sc->sc_calib_results.lo_res = NULL;
5570 }
5571
5572 if (sc->sc_calib_results.tx_iq_res != NULL) {
5573 kmem_free(sc->sc_calib_results.tx_iq_res,
5574 sc->sc_calib_results.tx_iq_res_len);
5575 sc->sc_calib_results.tx_iq_res = NULL;
5576 }
5577
5578 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5579 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5580 sc->sc_calib_results.tx_iq_perd_res_len);
5581 sc->sc_calib_results.tx_iq_perd_res = NULL;
5582 }
5583
5584 if (sc->sc_calib_results.dc_res != NULL) {
5585 kmem_free(sc->sc_calib_results.dc_res,
5586 sc->sc_calib_results.dc_res_len);
5587 sc->sc_calib_results.dc_res = NULL;
5588 }
5589
5590 if (sc->sc_calib_results.base_band_res != NULL) {
5591 kmem_free(sc->sc_calib_results.base_band_res,
5592 sc->sc_calib_results.base_band_res_len);
5593 sc->sc_calib_results.base_band_res = NULL;
5594 }
5595 }
5596
5597 /*
5598 * common section of intialization
5599 */
5600 static int
iwh_init_common(iwh_sc_t * sc)5601 iwh_init_common(iwh_sc_t *sc)
5602 {
5603 int32_t qid;
5604 uint32_t tmp;
5605
5606 if (iwh_reset_hw(sc) != IWH_SUCCESS) {
5607 cmn_err(CE_WARN, "iwh_init_common(): "
5608 "failed to reset hardware\n");
5609 return (IWH_FAIL);
5610 }
5611
5612 (void) iwh_preinit(sc);
5613
5614 tmp = IWH_READ(sc, CSR_GP_CNTRL);
5615 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5616 cmn_err(CE_NOTE, "iwh_init_common(): "
5617 "radio transmitter is off\n");
5618 return (IWH_FAIL);
5619 }
5620
5621 /*
5622 * init Rx ring
5623 */
5624 iwh_mac_access_enter(sc);
5625 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5626
5627 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5628 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5629 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5630
5631 IWH_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5632 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5633 offsetof(struct iwh_shared, val0)) >> 4));
5634
5635 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5636 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5637 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5638 IWH_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K |
5639 (RX_QUEUE_SIZE_LOG <<
5640 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5641 iwh_mac_access_exit(sc);
5642 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5643 (RX_QUEUE_SIZE - 1) & ~0x7);
5644
5645 /*
5646 * init Tx rings
5647 */
5648 iwh_mac_access_enter(sc);
5649 iwh_reg_write(sc, IWH_SCD_TXFACT, 0);
5650
5651 /*
5652 * keep warm page
5653 */
5654 IWH_WRITE(sc, IWH_FH_KW_MEM_ADDR_REG,
5655 sc->sc_dma_kw.cookie.dmac_address >> 4);
5656
5657 for (qid = 0; qid < IWH_NUM_QUEUES; qid++) {
5658 IWH_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5659 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5660 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5661 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5662 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5663 }
5664
5665 iwh_mac_access_exit(sc);
5666
5667 /*
5668 * clear "radio off" and "disable command" bits
5669 */
5670 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5671 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5672 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5673
5674 /*
5675 * clear any pending interrupts
5676 */
5677 IWH_WRITE(sc, CSR_INT, 0xffffffff);
5678
5679 /*
5680 * enable interrupts
5681 */
5682 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5683
5684 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5685 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5686
5687 return (IWH_SUCCESS);
5688 }
5689
5690 static int
iwh_fast_recover(iwh_sc_t * sc)5691 iwh_fast_recover(iwh_sc_t *sc)
5692 {
5693 ieee80211com_t *ic = &sc->sc_ic;
5694 int err = IWH_FAIL;
5695
5696 mutex_enter(&sc->sc_glock);
5697
5698 /*
5699 * restore runtime configuration
5700 */
5701 bcopy(&sc->sc_config_save, &sc->sc_config,
5702 sizeof (sc->sc_config));
5703
5704 sc->sc_config.assoc_id = 0;
5705 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5706
5707 if ((err = iwh_hw_set_before_auth(sc)) != IWH_SUCCESS) {
5708 cmn_err(CE_WARN, "iwh_fast_recover(): "
5709 "could not setup authentication\n");
5710 mutex_exit(&sc->sc_glock);
5711 return (err);
5712 }
5713
5714 bcopy(&sc->sc_config_save, &sc->sc_config,
5715 sizeof (sc->sc_config));
5716
5717 /*
5718 * update adapter's configuration
5719 */
5720 err = iwh_run_state_config(sc);
5721 if (err != IWH_SUCCESS) {
5722 cmn_err(CE_WARN, "iwh_fast_recover(): "
5723 "failed to setup association\n");
5724 mutex_exit(&sc->sc_glock);
5725 return (err);
5726 }
5727
5728 /*
5729 * set LED on
5730 */
5731 iwh_set_led(sc, 2, 0, 1);
5732
5733 mutex_exit(&sc->sc_glock);
5734
5735 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
5736
5737 /*
5738 * start queue
5739 */
5740 IWH_DBG((IWH_DEBUG_FW, "iwh_fast_recover(): "
5741 "resume xmit\n"));
5742 mac_tx_update(ic->ic_mach);
5743
5744 return (IWH_SUCCESS);
5745 }
5746
5747 static int
iwh_run_state_config(iwh_sc_t * sc)5748 iwh_run_state_config(iwh_sc_t *sc)
5749 {
5750 struct ieee80211com *ic = &sc->sc_ic;
5751 ieee80211_node_t *in = ic->ic_bss;
5752 uint32_t ht_protec = (uint32_t)(-1);
5753 int err = IWH_FAIL;
5754
5755 /*
5756 * update adapter's configuration
5757 */
5758 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5759
5760 /*
5761 * short preamble/slot time are
5762 * negotiated when associating
5763 */
5764 sc->sc_config.flags &=
5765 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5766 RXON_FLG_SHORT_SLOT_MSK);
5767
5768 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5769 sc->sc_config.flags |=
5770 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5771 }
5772
5773 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5774 sc->sc_config.flags |=
5775 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5776 }
5777
5778 if (in->in_flags & IEEE80211_NODE_HT) {
5779 ht_protec = in->in_htopmode;
5780 if (ht_protec > 3) {
5781 cmn_err(CE_WARN, "iwh_run_state_config(): "
5782 "HT protection mode is not correct.\n");
5783 return (IWH_FAIL);
5784 } else if (NO_HT_PROT == ht_protec) {
5785 ht_protec = sc->sc_ht_conf.ht_protection;
5786 }
5787
5788 sc->sc_config.flags |=
5789 LE_32(ht_protec << RXON_FLG_HT_OPERATING_MODE_POS);
5790 }
5791
5792 /*
5793 * set RX chains/antennas.
5794 */
5795 iwh_config_rxon_chain(sc);
5796
5797 sc->sc_config.filter_flags |=
5798 LE_32(RXON_FILTER_ASSOC_MSK);
5799
5800 if (ic->ic_opmode != IEEE80211_M_STA) {
5801 sc->sc_config.filter_flags |=
5802 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5803 }
5804
5805 IWH_DBG((IWH_DEBUG_80211, "iwh_run_state_config(): "
5806 "config chan %d flags %x"
5807 " filter_flags %x\n",
5808 sc->sc_config.chan, sc->sc_config.flags,
5809 sc->sc_config.filter_flags));
5810
5811 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
5812 sizeof (iwh_rxon_cmd_t), 1);
5813 if (err != IWH_SUCCESS) {
5814 cmn_err(CE_WARN, "iwh_run_state_config(): "
5815 "could not update configuration\n");
5816 return (err);
5817 }
5818
5819 if ((sc->sc_dev_id != 0x423c) &&
5820 (sc->sc_dev_id != 0x423d)) {
5821 /*
5822 * send tx power table command
5823 */
5824 err = iwh_tx_power_table(sc, 1);
5825 if (err != IWH_SUCCESS) {
5826 return (err);
5827 }
5828 }
5829
5830 /*
5831 * Not need to update retry rate table for AP node
5832 */
5833 err = iwh_qosparam_to_hw(sc, 1);
5834 if (err != IWH_SUCCESS) {
5835 return (err);
5836 }
5837
5838 return (err);
5839 }
5840
5841 /*
5842 * This function is only for compatibility with Net80211 module.
5843 * iwh_qosparam_to_hw() is the actual function updating EDCA
5844 * parameters to hardware.
5845 */
5846 /* ARGSUSED */
5847 static int
iwh_wme_update(ieee80211com_t * ic)5848 iwh_wme_update(ieee80211com_t *ic)
5849 {
5850 return (0);
5851 }
5852
5853 static int
iwh_wme_to_qos_ac(int wme_ac)5854 iwh_wme_to_qos_ac(int wme_ac)
5855 {
5856 int qos_ac = QOS_AC_INVALID;
5857
5858 if (wme_ac < WME_AC_BE || wme_ac > WME_AC_VO) {
5859 cmn_err(CE_WARN, "iwh_wme_to_qos_ac(): "
5860 "WME AC index is not in suitable range.\n");
5861 return (qos_ac);
5862 }
5863
5864 switch (wme_ac) {
5865 case WME_AC_BE:
5866 qos_ac = QOS_AC_BK;
5867 break;
5868 case WME_AC_BK:
5869 qos_ac = QOS_AC_BE;
5870 break;
5871 case WME_AC_VI:
5872 qos_ac = QOS_AC_VI;
5873 break;
5874 case WME_AC_VO:
5875 qos_ac = QOS_AC_VO;
5876 break;
5877 }
5878
5879 return (qos_ac);
5880 }
5881
5882 static uint16_t
iwh_cw_e_to_cw(uint8_t cw_e)5883 iwh_cw_e_to_cw(uint8_t cw_e)
5884 {
5885 uint16_t cw = 1;
5886
5887 while (cw_e > 0) {
5888 cw <<= 1;
5889 cw_e--;
5890 }
5891
5892 cw -= 1;
5893 return (cw);
5894 }
5895
5896 static int
iwh_wmeparam_check(struct wmeParams * wmeparam)5897 iwh_wmeparam_check(struct wmeParams *wmeparam)
5898 {
5899 int i;
5900
5901 for (i = 0; i < WME_NUM_AC; i++) {
5902
5903 if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
5904 (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
5905 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5906 "Contention window is not in suitable range.\n");
5907 return (IWH_FAIL);
5908 }
5909
5910 if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
5911 (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
5912 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5913 "Arbitration interframe space number"
5914 "is not in suitable range.\n");
5915 return (IWH_FAIL);
5916 }
5917 }
5918
5919 return (IWH_SUCCESS);
5920 }
5921
5922 /*
5923 * This function updates EDCA parameters into hardware.
5924 * FIFO0-background, FIFO1-best effort, FIFO2-viedo, FIFO3-voice.
5925 */
5926 static int
iwh_qosparam_to_hw(iwh_sc_t * sc,int async)5927 iwh_qosparam_to_hw(iwh_sc_t *sc, int async)
5928 {
5929 ieee80211com_t *ic = &sc->sc_ic;
5930 ieee80211_node_t *in = ic->ic_bss;
5931 struct wmeParams *wmeparam;
5932 iwh_qos_param_cmd_t qosparam_cmd;
5933 int i, j;
5934 int err = IWH_FAIL;
5935
5936 if ((in->in_flags & IEEE80211_NODE_QOS) &&
5937 (IEEE80211_M_STA == ic->ic_opmode)) {
5938 wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
5939 } else {
5940 return (IWH_SUCCESS);
5941 }
5942
5943 (void) memset(&qosparam_cmd, 0, sizeof (qosparam_cmd));
5944
5945 err = iwh_wmeparam_check(wmeparam);
5946 if (err != IWH_SUCCESS) {
5947 return (err);
5948 }
5949
5950 if (in->in_flags & IEEE80211_NODE_QOS) {
5951 qosparam_cmd.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
5952 }
5953
5954 if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
5955 qosparam_cmd.flags |= QOS_PARAM_FLG_TGN;
5956 }
5957
5958 for (i = 0; i < WME_NUM_AC; i++) {
5959
5960 j = iwh_wme_to_qos_ac(i);
5961 if (j < QOS_AC_BK || j > QOS_AC_VO) {
5962 return (IWH_FAIL);
5963 }
5964
5965 qosparam_cmd.ac[j].cw_min =
5966 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
5967 qosparam_cmd.ac[j].cw_max =
5968 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
5969 qosparam_cmd.ac[j].aifsn =
5970 wmeparam[i].wmep_aifsn;
5971 qosparam_cmd.ac[j].txop =
5972 (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
5973 }
5974
5975 err = iwh_cmd(sc, REPLY_QOS_PARAM, &qosparam_cmd,
5976 sizeof (qosparam_cmd), async);
5977 if (err != IWH_SUCCESS) {
5978 cmn_err(CE_WARN, "iwh_qosparam_to_hw(): "
5979 "failed to update QoS parameters into hardware.\n");
5980 return (err);
5981 }
5982
5983 #ifdef DEBUG
5984 IWH_DBG((IWH_DEBUG_QOS, "iwh_qosparam_to_hw(): "
5985 "EDCA parameters are as follows:\n"));
5986
5987 IWH_DBG((IWH_DEBUG_QOS, "BK parameters are: "
5988 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5989 qosparam_cmd.ac[0].cw_min, qosparam_cmd.ac[0].cw_max,
5990 qosparam_cmd.ac[0].aifsn, qosparam_cmd.ac[0].txop));
5991
5992 IWH_DBG((IWH_DEBUG_QOS, "BE parameters are: "
5993 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5994 qosparam_cmd.ac[1].cw_min, qosparam_cmd.ac[1].cw_max,
5995 qosparam_cmd.ac[1].aifsn, qosparam_cmd.ac[1].txop));
5996
5997 IWH_DBG((IWH_DEBUG_QOS, "VI parameters are: "
5998 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5999 qosparam_cmd.ac[2].cw_min, qosparam_cmd.ac[2].cw_max,
6000 qosparam_cmd.ac[2].aifsn, qosparam_cmd.ac[2].txop));
6001
6002 IWH_DBG((IWH_DEBUG_QOS, "VO parameters are: "
6003 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
6004 qosparam_cmd.ac[3].cw_min, qosparam_cmd.ac[3].cw_max,
6005 qosparam_cmd.ac[3].aifsn, qosparam_cmd.ac[3].txop));
6006 #endif
6007 return (err);
6008 }
6009
6010 static inline int
iwh_wme_tid_qos_ac(int tid)6011 iwh_wme_tid_qos_ac(int tid)
6012 {
6013 switch (tid) {
6014 case 1:
6015 case 2:
6016 return (QOS_AC_BK);
6017 case 0:
6018 case 3:
6019 return (QOS_AC_BE);
6020 case 4:
6021 case 5:
6022 return (QOS_AC_VI);
6023 case 6:
6024 case 7:
6025 return (QOS_AC_VO);
6026 }
6027
6028 return (QOS_AC_BE);
6029 }
6030
6031 static inline int
iwh_qos_ac_to_txq(int qos_ac)6032 iwh_qos_ac_to_txq(int qos_ac)
6033 {
6034 switch (qos_ac) {
6035 case QOS_AC_BK:
6036 return (QOS_AC_BK_TO_TXQ);
6037 case QOS_AC_BE:
6038 return (QOS_AC_BE_TO_TXQ);
6039 case QOS_AC_VI:
6040 return (QOS_AC_VI_TO_TXQ);
6041 case QOS_AC_VO:
6042 return (QOS_AC_VO_TO_TXQ);
6043 }
6044
6045 return (QOS_AC_BE_TO_TXQ);
6046 }
6047
6048 static int
iwh_wme_tid_to_txq(int tid)6049 iwh_wme_tid_to_txq(int tid)
6050 {
6051 int queue_n = TXQ_FOR_AC_INVALID;
6052 int qos_ac;
6053
6054 if (tid < WME_TID_MIN ||
6055 tid > WME_TID_MAX) {
6056 cmn_err(CE_WARN, "wme_tid_to_txq(): "
6057 "TID is not in suitable range.\n");
6058 return (queue_n);
6059 }
6060
6061 qos_ac = iwh_wme_tid_qos_ac(tid);
6062 queue_n = iwh_qos_ac_to_txq(qos_ac);
6063
6064 return (queue_n);
6065 }
6066
6067 /*
6068 * This function is used for intializing HT relevant configurations.
6069 */
6070 static void
iwh_init_ht_conf(iwh_sc_t * sc)6071 iwh_init_ht_conf(iwh_sc_t *sc)
6072 {
6073 (void) memset(&sc->sc_ht_conf, 0, sizeof (iwh_ht_conf_t));
6074
6075 if ((0x4235 == sc->sc_dev_id) ||
6076 (0x4236 == sc->sc_dev_id) ||
6077 (0x423a == sc->sc_dev_id)) {
6078 sc->sc_ht_conf.ht_support = 1;
6079
6080 sc->sc_ht_conf.valid_chains = 3;
6081 sc->sc_ht_conf.tx_stream_count = 2;
6082 sc->sc_ht_conf.rx_stream_count = 2;
6083
6084 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6085 sc->sc_ht_conf.tx_support_mcs[1] = 0xff;
6086 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6087 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6088 } else {
6089 sc->sc_ht_conf.ht_support = 1;
6090
6091 sc->sc_ht_conf.valid_chains = 2;
6092 sc->sc_ht_conf.tx_stream_count = 1;
6093 sc->sc_ht_conf.rx_stream_count = 2;
6094
6095 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6096 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6097 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6098 }
6099
6100 if (sc->sc_ht_conf.ht_support) {
6101 sc->sc_ht_conf.cap |= HT_CAP_GRN_FLD;
6102 sc->sc_ht_conf.cap |= HT_CAP_SGI_20;
6103 sc->sc_ht_conf.cap |= HT_CAP_MAX_AMSDU;
6104 /* should disable MIMO */
6105 sc->sc_ht_conf.cap |= HT_CAP_MIMO_PS;
6106
6107 sc->sc_ht_conf.ampdu_p.factor = HT_RX_AMPDU_FACTOR;
6108 sc->sc_ht_conf.ampdu_p.density = HT_MPDU_DENSITY;
6109
6110 sc->sc_ht_conf.ht_protection = HT_PROT_CHAN_NON_HT;
6111 }
6112 }
6113
6114 /*
6115 * This function overwrites default ieee80211_rateset_11n struc.
6116 */
6117 static void
iwh_overwrite_11n_rateset(iwh_sc_t * sc)6118 iwh_overwrite_11n_rateset(iwh_sc_t *sc)
6119 {
6120 uint8_t *ht_rs = sc->sc_ht_conf.rx_support_mcs;
6121 int mcs_idx, mcs_count = 0;
6122 int i, j;
6123
6124 for (i = 0; i < HT_RATESET_NUM; i++) {
6125 for (j = 0; j < 8; j++) {
6126 if (ht_rs[i] & (1 << j)) {
6127 mcs_idx = i * 8 + j;
6128 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
6129 break;
6130 }
6131
6132 ieee80211_rateset_11n.rs_rates[mcs_idx] =
6133 (uint8_t)mcs_idx;
6134 mcs_count++;
6135 }
6136 }
6137 }
6138
6139 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
6140
6141 #ifdef DEBUG
6142 IWH_DBG((IWH_DEBUG_HTRATE, "iwh_overwrite_11n_rateset(): "
6143 "HT rates supported by this station is as follows:\n"));
6144
6145 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
6146 IWH_DBG((IWH_DEBUG_HTRATE, "Rate %d is %d\n",
6147 i, ieee80211_rateset_11n.rs_rates[i]));
6148 }
6149 #endif
6150 }
6151
6152 /*
6153 * This function overwrites default configurations of
6154 * ieee80211com structure in Net80211 module.
6155 */
6156 static void
iwh_overwrite_ic_default(iwh_sc_t * sc)6157 iwh_overwrite_ic_default(iwh_sc_t *sc)
6158 {
6159 ieee80211com_t *ic = &sc->sc_ic;
6160
6161 sc->sc_newstate = ic->ic_newstate;
6162 ic->ic_newstate = iwh_newstate;
6163 ic->ic_node_alloc = iwh_node_alloc;
6164 ic->ic_node_free = iwh_node_free;
6165
6166 if (sc->sc_ht_conf.ht_support) {
6167 sc->sc_recv_action = ic->ic_recv_action;
6168 ic->ic_recv_action = iwh_recv_action;
6169 sc->sc_send_action = ic->ic_send_action;
6170 ic->ic_send_action = iwh_send_action;
6171
6172 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_p.factor;
6173 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_p.density;
6174 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
6175 }
6176 }
6177
6178 /*
6179 * This function sets "RX chain selection" feild
6180 * in RXON command during plumb driver.
6181 */
6182 static void
iwh_config_rxon_chain(iwh_sc_t * sc)6183 iwh_config_rxon_chain(iwh_sc_t *sc)
6184 {
6185 ieee80211com_t *ic = &sc->sc_ic;
6186 ieee80211_node_t *in = ic->ic_bss;
6187
6188 if (3 == sc->sc_ht_conf.valid_chains) {
6189 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6190 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6191 RXON_RX_CHAIN_VALID_POS);
6192
6193 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6194 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6195 RXON_RX_CHAIN_FORCE_SEL_POS);
6196
6197 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6198 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6199 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6200 } else {
6201 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6202 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_VALID_POS);
6203
6204 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6205 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_FORCE_SEL_POS);
6206
6207 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6208 RXON_RX_CHAIN_B_MSK) <<
6209 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6210 }
6211
6212 sc->sc_config.rx_chain |= LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK);
6213
6214 if ((in != NULL) &&
6215 (in->in_flags & IEEE80211_NODE_HT) &&
6216 sc->sc_ht_conf.ht_support) {
6217 if (3 == sc->sc_ht_conf.valid_chains) {
6218 sc->sc_config.rx_chain |= LE_16(3 <<
6219 RXON_RX_CHAIN_CNT_POS);
6220 sc->sc_config.rx_chain |= LE_16(3 <<
6221 RXON_RX_CHAIN_MIMO_CNT_POS);
6222 } else {
6223 sc->sc_config.rx_chain |= LE_16(2 <<
6224 RXON_RX_CHAIN_CNT_POS);
6225 sc->sc_config.rx_chain |= LE_16(2 <<
6226 RXON_RX_CHAIN_MIMO_CNT_POS);
6227 }
6228
6229 sc->sc_config.rx_chain |= LE_16(1 <<
6230 RXON_RX_CHAIN_MIMO_FORCE_POS);
6231 }
6232
6233 IWH_DBG((IWH_DEBUG_RXON, "iwh_config_rxon_chain(): "
6234 "rxon->rx_chain = %x\n", sc->sc_config.rx_chain));
6235 }
6236
6237 /*
6238 * This function adds AP station into hardware.
6239 */
6240 static int
iwh_add_ap_sta(iwh_sc_t * sc)6241 iwh_add_ap_sta(iwh_sc_t *sc)
6242 {
6243 ieee80211com_t *ic = &sc->sc_ic;
6244 ieee80211_node_t *in = ic->ic_bss;
6245 iwh_add_sta_t node;
6246 uint32_t ampdu_factor, ampdu_density;
6247 int err = IWH_FAIL;
6248
6249 /*
6250 * Add AP node into hardware.
6251 */
6252 (void) memset(&node, 0, sizeof (node));
6253 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6254 node.mode = STA_MODE_ADD_MSK;
6255 node.sta.sta_id = IWH_AP_ID;
6256
6257 if (sc->sc_ht_conf.ht_support &&
6258 (in->in_htcap_ie != NULL) &&
6259 (in->in_htcap != 0) &&
6260 (in->in_htparam != 0)) {
6261
6262 if (((in->in_htcap & HT_CAP_MIMO_PS) >> 2)
6263 == HT_CAP_MIMO_PS_DYNAMIC) {
6264 node.station_flags |= LE_32(STA_FLG_RTS_MIMO_PROT);
6265 }
6266
6267 ampdu_factor = in->in_htparam & HT_RX_AMPDU_FACTOR_MSK;
6268 node.station_flags |=
6269 LE_32(ampdu_factor << STA_FLG_MAX_AMPDU_POS);
6270
6271 ampdu_density = (in->in_htparam & HT_MPDU_DENSITY_MSK) >>
6272 HT_MPDU_DENSITY_POS;
6273 node.station_flags |=
6274 LE_32(ampdu_density << STA_FLG_AMPDU_DENSITY_POS);
6275
6276 if (in->in_htcap & LE_16(HT_CAP_SUP_WIDTH)) {
6277 node.station_flags |=
6278 LE_32(STA_FLG_FAT_EN);
6279 }
6280 }
6281
6282 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6283 if (err != IWH_SUCCESS) {
6284 cmn_err(CE_WARN, "iwh_add_ap_lq(): "
6285 "failed to add AP node\n");
6286 return (err);
6287 }
6288
6289 return (err);
6290 }
6291
6292 /*
6293 * Each station in the Shirley Peak's internal station table has
6294 * its own table of 16 TX rates and modulation modes for retrying
6295 * TX when an ACK is not received. This function replaces the entire
6296 * table for one station.Station must already be in Shirley Peak's
6297 * station talbe.
6298 */
6299 static int
iwh_ap_lq(iwh_sc_t * sc)6300 iwh_ap_lq(iwh_sc_t *sc)
6301 {
6302 ieee80211com_t *ic = &sc->sc_ic;
6303 ieee80211_node_t *in = ic->ic_bss;
6304 iwh_link_quality_cmd_t link_quality;
6305 const struct ieee80211_rateset *rs_sup = NULL;
6306 uint32_t masks = 0, rate;
6307 int i, err = IWH_FAIL;
6308
6309 /*
6310 * TX_LINK_QUALITY cmd
6311 */
6312 (void) memset(&link_quality, 0, sizeof (link_quality));
6313 if (in->in_chan == IEEE80211_CHAN_ANYC) /* skip null node */
6314 return (err);
6315 rs_sup = ieee80211_get_suprates(ic, in->in_chan);
6316
6317 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6318 if (i < rs_sup->ir_nrates) {
6319 rate = rs_sup->ir_rates[rs_sup->ir_nrates - i] &
6320 IEEE80211_RATE_VAL;
6321 } else {
6322 rate = 2;
6323 }
6324
6325 if (2 == rate || 4 == rate ||
6326 11 == rate || 22 == rate) {
6327 masks |= LE_32(RATE_MCS_CCK_MSK);
6328 }
6329
6330 masks |= LE_32(RATE_MCS_ANT_B_MSK);
6331
6332 link_quality.rate_n_flags[i] =
6333 LE_32(iwh_rate_to_plcp(rate) | masks);
6334 }
6335
6336 link_quality.general_params.single_stream_ant_msk = LINK_QUAL_ANT_B_MSK;
6337 link_quality.general_params.dual_stream_ant_msk = LINK_QUAL_ANT_MSK;
6338 link_quality.agg_params.agg_dis_start_th = 3;
6339 link_quality.agg_params.agg_time_limit = LE_16(4000);
6340 link_quality.sta_id = IWH_AP_ID;
6341 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
6342 sizeof (link_quality), 1);
6343 if (err != IWH_SUCCESS) {
6344 cmn_err(CE_WARN, "iwh_ap_lq(): "
6345 "failed to config link quality table\n");
6346 return (err);
6347 }
6348
6349 #ifdef DEBUG
6350 IWH_DBG((IWH_DEBUG_HWRATE, "iwh_ap_lq(): "
6351 "Rates in HW are as follows:\n"));
6352
6353 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6354 IWH_DBG((IWH_DEBUG_HWRATE,
6355 "Rate %d in HW is %x\n", i, link_quality.rate_n_flags[i]));
6356 }
6357 #endif
6358
6359 return (err);
6360 }
6361
6362 /*
6363 * When block ACK agreement has been set up between station and AP,
6364 * Net80211 module will call this function to inform hardware about
6365 * informations of this BA agreement.
6366 * When AP wants to delete BA agreement that was originated by it,
6367 * Net80211 modele will call this function to clean up relevant
6368 * information in hardware.
6369 */
6370 static void
iwh_recv_action(struct ieee80211_node * in,const uint8_t * frm,const uint8_t * efrm)6371 iwh_recv_action(struct ieee80211_node *in,
6372 const uint8_t *frm, const uint8_t *efrm)
6373 {
6374 struct ieee80211com *ic;
6375 iwh_sc_t *sc;
6376 const struct ieee80211_action *ia;
6377 uint16_t baparamset, baseqctl;
6378 uint32_t tid, ssn;
6379 iwh_add_sta_t node;
6380 int err = IWH_FAIL;
6381
6382 if ((NULL == in) || (NULL == frm)) {
6383 return;
6384 }
6385
6386 ic = in->in_ic;
6387 if (NULL == ic) {
6388 return;
6389 }
6390
6391 sc = (iwh_sc_t *)ic;
6392
6393 sc->sc_recv_action(in, frm, efrm);
6394
6395 ia = (const struct ieee80211_action *)frm;
6396 if (ia->ia_category != IEEE80211_ACTION_CAT_BA) {
6397 return;
6398 }
6399
6400 switch (ia->ia_action) {
6401 case IEEE80211_ACTION_BA_ADDBA_REQUEST:
6402 baparamset = *(uint16_t *)(frm + 3);
6403 baseqctl = *(uint16_t *)(frm + 7);
6404
6405 tid = MS(baparamset, IEEE80211_BAPS_TID);
6406 ssn = MS(baseqctl, IEEE80211_BASEQ_START);
6407
6408 (void) memset(&node, 0, sizeof (node));
6409 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6410 node.mode = STA_MODE_MODIFY_MSK;
6411 node.sta.sta_id = IWH_AP_ID;
6412
6413 node.station_flags_msk = 0;
6414 node.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
6415 node.add_immediate_ba_tid = (uint8_t)tid;
6416 node.add_immediate_ba_ssn = LE_16(ssn);
6417
6418 mutex_enter(&sc->sc_glock);
6419 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6420 if (err != IWH_SUCCESS) {
6421 cmn_err(CE_WARN, "iwh_recv_action(): "
6422 "failed to setup RX block ACK\n");
6423 mutex_exit(&sc->sc_glock);
6424 return;
6425 }
6426 mutex_exit(&sc->sc_glock);
6427
6428 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6429 "RX block ACK "
6430 "was setup on TID %d and SSN is %d.\n", tid, ssn));
6431
6432 return;
6433
6434 case IEEE80211_ACTION_BA_DELBA:
6435 baparamset = *(uint16_t *)(frm + 2);
6436
6437 if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) {
6438 return;
6439 }
6440
6441 tid = MS(baparamset, IEEE80211_DELBAPS_TID);
6442
6443 (void) memset(&node, 0, sizeof (node));
6444 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6445 node.mode = STA_MODE_MODIFY_MSK;
6446 node.sta.sta_id = IWH_AP_ID;
6447
6448 node.station_flags_msk = 0;
6449 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6450 node.add_immediate_ba_tid = (uint8_t)tid;
6451
6452 mutex_enter(&sc->sc_glock);
6453 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6454 if (err != IWH_SUCCESS) {
6455 cmn_err(CE_WARN, "iwh_recv_action(): "
6456 "failed to delete RX block ACK\n");
6457 mutex_exit(&sc->sc_glock);
6458 return;
6459 }
6460 mutex_exit(&sc->sc_glock);
6461
6462 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6463 "RX block ACK "
6464 "was deleted on TID %d.\n", tid));
6465
6466 return;
6467 }
6468 }
6469
6470 /*
6471 * When local station wants to delete BA agreement that was originated by AP,
6472 * Net80211 module will call this function to clean up relevant information
6473 * in hardware.
6474 */
6475 static int
iwh_send_action(struct ieee80211_node * in,int category,int action,uint16_t args[4])6476 iwh_send_action(struct ieee80211_node *in,
6477 int category, int action, uint16_t args[4])
6478 {
6479 struct ieee80211com *ic;
6480 iwh_sc_t *sc;
6481 uint32_t tid;
6482 iwh_add_sta_t node;
6483 int ret = EIO;
6484 int err = IWH_FAIL;
6485
6486
6487 if (NULL == in) {
6488 return (ret);
6489 }
6490
6491 ic = in->in_ic;
6492 if (NULL == ic) {
6493 return (ret);
6494 }
6495
6496 sc = (iwh_sc_t *)ic;
6497
6498 ret = sc->sc_send_action(in, category, action, args);
6499
6500 if (category != IEEE80211_ACTION_CAT_BA) {
6501 return (ret);
6502 }
6503
6504 switch (action) {
6505 case IEEE80211_ACTION_BA_DELBA:
6506 if (IEEE80211_DELBAPS_INIT == args[1]) {
6507 return (ret);
6508 }
6509
6510 tid = args[0];
6511
6512 (void) memset(&node, 0, sizeof (node));
6513 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6514 node.mode = STA_MODE_MODIFY_MSK;
6515 node.sta.sta_id = IWH_AP_ID;
6516
6517 node.station_flags_msk = 0;
6518 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6519 node.add_immediate_ba_tid = (uint8_t)tid;
6520
6521 mutex_enter(&sc->sc_glock);
6522 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6523 if (err != IWH_SUCCESS) {
6524 cmn_err(CE_WARN, "iwh_send_action(): "
6525 "failed to delete RX balock ACK\n");
6526 mutex_exit(&sc->sc_glock);
6527 return (EIO);
6528 }
6529 mutex_exit(&sc->sc_glock);
6530
6531 IWH_DBG((IWH_DEBUG_BA, "iwh_send_action(): "
6532 "RX block ACK "
6533 "was deleted on TID %d.\n", tid));
6534
6535 break;
6536 }
6537
6538 return (ret);
6539 }
6540
6541 static int
iwh_reset_hw(iwh_sc_t * sc)6542 iwh_reset_hw(iwh_sc_t *sc)
6543 {
6544 uint32_t tmp;
6545 int n;
6546
6547 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6548 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6549 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6550
6551 /*
6552 * wait for HW ready
6553 */
6554 for (n = 0; n < 5; n++) {
6555 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6556 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6557 break;
6558 }
6559 DELAY(10);
6560 }
6561
6562 if (n != 5) {
6563 return (IWH_SUCCESS);
6564 }
6565
6566 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6567 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6568 tmp | CSR_HW_IF_CONFIG_REG_BITS_PREPARE);
6569
6570 for (n = 0; n < 15000; n++) {
6571 if (0 == (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6572 CSR_HW_IF_CONFIG_REG_BITS_NIC_PREPARE_DONE)) {
6573 break;
6574 }
6575 DELAY(10);
6576 }
6577
6578 if (15000 == n) {
6579 return (ETIMEDOUT);
6580 }
6581
6582 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6583 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6584 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6585
6586 /*
6587 * wait for HW ready
6588 */
6589 for (n = 0; n < 5; n++) {
6590 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6591 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6592 break;
6593 }
6594 DELAY(10);
6595 }
6596
6597 if (n != 5) {
6598 return (IWH_SUCCESS);
6599 } else {
6600 return (ETIMEDOUT);
6601 }
6602 }
6603