1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2009, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Intel(R) WiFi Link 5100/5300 Driver
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/net80211_ht.h>
56 #include <sys/varargs.h>
57 #include <sys/policy.h>
58 #include <sys/pci.h>
59
60 #include "iwh_calibration.h"
61 #include "iwh_hw.h"
62 #include "iwh_eeprom.h"
63 #include "iwh_var.h"
64 #include <inet/wifi_ioctl.h>
65
66 #ifdef DEBUG
67 #define IWH_DEBUG_80211 (1 << 0)
68 #define IWH_DEBUG_CMD (1 << 1)
69 #define IWH_DEBUG_DMA (1 << 2)
70 #define IWH_DEBUG_EEPROM (1 << 3)
71 #define IWH_DEBUG_FW (1 << 4)
72 #define IWH_DEBUG_HW (1 << 5)
73 #define IWH_DEBUG_INTR (1 << 6)
74 #define IWH_DEBUG_MRR (1 << 7)
75 #define IWH_DEBUG_PIO (1 << 8)
76 #define IWH_DEBUG_RX (1 << 9)
77 #define IWH_DEBUG_SCAN (1 << 10)
78 #define IWH_DEBUG_TX (1 << 11)
79 #define IWH_DEBUG_RATECTL (1 << 12)
80 #define IWH_DEBUG_RADIO (1 << 13)
81 #define IWH_DEBUG_RESUME (1 << 14)
82 #define IWH_DEBUG_CALIBRATION (1 << 15)
83 #define IWH_DEBUG_BA (1 << 16)
84 #define IWH_DEBUG_RXON (1 << 17)
85 #define IWH_DEBUG_HWRATE (1 << 18)
86 #define IWH_DEBUG_HTRATE (1 << 19)
87 #define IWH_DEBUG_QOS (1 << 20)
88 /*
89 * if want to see debug message of a given section,
90 * please set this flag to one of above values
91 */
92 uint32_t iwh_dbg_flags = 0;
93 #define IWH_DBG(x) \
94 iwh_dbg x
95 #else
96 #define IWH_DBG(x)
97 #endif
98
99 #define MS(v, f) (((v) & f) >> f##_S)
100
101 static void *iwh_soft_state_p = NULL;
102
103 /*
104 * ucode will be compiled into driver image
105 */
106 static uint8_t iwh_fw_5000_bin[] = {
107 #include "fw-iw/fw_5000/iwh_5000.ucode"
108 };
109
110 static uint8_t iwh_fw_5150_bin[] = {
111 #include "fw-iw/fw_5150/iwh_5150.ucode"
112 };
113
114 /*
115 * DMA attributes for a shared page
116 */
117 static ddi_dma_attr_t sh_dma_attr = {
118 DMA_ATTR_V0, /* version of this structure */
119 0, /* lowest usable address */
120 0xffffffffU, /* highest usable address */
121 0xffffffffU, /* maximum DMAable byte count */
122 0x1000, /* alignment in bytes */
123 0x1000, /* burst sizes (any?) */
124 1, /* minimum transfer */
125 0xffffffffU, /* maximum transfer */
126 0xffffffffU, /* maximum segment length */
127 1, /* maximum number of segments */
128 1, /* granularity */
129 0, /* flags (reserved) */
130 };
131
132 /*
133 * DMA attributes for a keep warm DRAM descriptor
134 */
135 static ddi_dma_attr_t kw_dma_attr = {
136 DMA_ATTR_V0, /* version of this structure */
137 0, /* lowest usable address */
138 0xffffffffU, /* highest usable address */
139 0xffffffffU, /* maximum DMAable byte count */
140 0x1000, /* alignment in bytes */
141 0x1000, /* burst sizes (any?) */
142 1, /* minimum transfer */
143 0xffffffffU, /* maximum transfer */
144 0xffffffffU, /* maximum segment length */
145 1, /* maximum number of segments */
146 1, /* granularity */
147 0, /* flags (reserved) */
148 };
149
150 /*
151 * DMA attributes for a ring descriptor
152 */
153 static ddi_dma_attr_t ring_desc_dma_attr = {
154 DMA_ATTR_V0, /* version of this structure */
155 0, /* lowest usable address */
156 0xffffffffU, /* highest usable address */
157 0xffffffffU, /* maximum DMAable byte count */
158 0x100, /* alignment in bytes */
159 0x100, /* burst sizes (any?) */
160 1, /* minimum transfer */
161 0xffffffffU, /* maximum transfer */
162 0xffffffffU, /* maximum segment length */
163 1, /* maximum number of segments */
164 1, /* granularity */
165 0, /* flags (reserved) */
166 };
167
168 /*
169 * DMA attributes for a cmd
170 */
171 static ddi_dma_attr_t cmd_dma_attr = {
172 DMA_ATTR_V0, /* version of this structure */
173 0, /* lowest usable address */
174 0xffffffffU, /* highest usable address */
175 0xffffffffU, /* maximum DMAable byte count */
176 4, /* alignment in bytes */
177 0x100, /* burst sizes (any?) */
178 1, /* minimum transfer */
179 0xffffffffU, /* maximum transfer */
180 0xffffffffU, /* maximum segment length */
181 1, /* maximum number of segments */
182 1, /* granularity */
183 0, /* flags (reserved) */
184 };
185
186 /*
187 * DMA attributes for a rx buffer
188 */
189 static ddi_dma_attr_t rx_buffer_dma_attr = {
190 DMA_ATTR_V0, /* version of this structure */
191 0, /* lowest usable address */
192 0xffffffffU, /* highest usable address */
193 0xffffffffU, /* maximum DMAable byte count */
194 0x100, /* alignment in bytes */
195 0x100, /* burst sizes (any?) */
196 1, /* minimum transfer */
197 0xffffffffU, /* maximum transfer */
198 0xffffffffU, /* maximum segment length */
199 1, /* maximum number of segments */
200 1, /* granularity */
201 0, /* flags (reserved) */
202 };
203
204 /*
205 * DMA attributes for a tx buffer.
206 * the maximum number of segments is 4 for the hardware.
207 * now all the wifi drivers put the whole frame in a single
208 * descriptor, so we define the maximum number of segments 1,
209 * just the same as the rx_buffer. we consider leverage the HW
210 * ability in the future, that is why we don't define rx and tx
211 * buffer_dma_attr as the same.
212 */
213 static ddi_dma_attr_t tx_buffer_dma_attr = {
214 DMA_ATTR_V0, /* version of this structure */
215 0, /* lowest usable address */
216 0xffffffffU, /* highest usable address */
217 0xffffffffU, /* maximum DMAable byte count */
218 4, /* alignment in bytes */
219 0x100, /* burst sizes (any?) */
220 1, /* minimum transfer */
221 0xffffffffU, /* maximum transfer */
222 0xffffffffU, /* maximum segment length */
223 1, /* maximum number of segments */
224 1, /* granularity */
225 0, /* flags (reserved) */
226 };
227
228 /*
229 * DMA attributes for text and data part in the firmware
230 */
231 static ddi_dma_attr_t fw_dma_attr = {
232 DMA_ATTR_V0, /* version of this structure */
233 0, /* lowest usable address */
234 0xffffffffU, /* highest usable address */
235 0x7fffffff, /* maximum DMAable byte count */
236 0x10, /* alignment in bytes */
237 0x100, /* burst sizes (any?) */
238 1, /* minimum transfer */
239 0xffffffffU, /* maximum transfer */
240 0xffffffffU, /* maximum segment length */
241 1, /* maximum number of segments */
242 1, /* granularity */
243 0, /* flags (reserved) */
244 };
245
246 /*
247 * regs access attributes
248 */
249 static ddi_device_acc_attr_t iwh_reg_accattr = {
250 DDI_DEVICE_ATTR_V0,
251 DDI_STRUCTURE_LE_ACC,
252 DDI_STRICTORDER_ACC,
253 DDI_DEFAULT_ACC
254 };
255
256 /*
257 * DMA access attributes for descriptor
258 */
259 static ddi_device_acc_attr_t iwh_dma_descattr = {
260 DDI_DEVICE_ATTR_V0,
261 DDI_STRUCTURE_LE_ACC,
262 DDI_STRICTORDER_ACC,
263 DDI_DEFAULT_ACC
264 };
265
266 /*
267 * DMA access attributes
268 */
269 static ddi_device_acc_attr_t iwh_dma_accattr = {
270 DDI_DEVICE_ATTR_V0,
271 DDI_NEVERSWAP_ACC,
272 DDI_STRICTORDER_ACC,
273 DDI_DEFAULT_ACC
274 };
275
276 static int iwh_ring_init(iwh_sc_t *);
277 static void iwh_ring_free(iwh_sc_t *);
278 static int iwh_alloc_shared(iwh_sc_t *);
279 static void iwh_free_shared(iwh_sc_t *);
280 static int iwh_alloc_kw(iwh_sc_t *);
281 static void iwh_free_kw(iwh_sc_t *);
282 static int iwh_alloc_fw_dma(iwh_sc_t *);
283 static void iwh_free_fw_dma(iwh_sc_t *);
284 static int iwh_alloc_rx_ring(iwh_sc_t *);
285 static void iwh_reset_rx_ring(iwh_sc_t *);
286 static void iwh_free_rx_ring(iwh_sc_t *);
287 static int iwh_alloc_tx_ring(iwh_sc_t *, iwh_tx_ring_t *,
288 int, int);
289 static void iwh_reset_tx_ring(iwh_sc_t *, iwh_tx_ring_t *);
290 static void iwh_free_tx_ring(iwh_tx_ring_t *);
291 static ieee80211_node_t *iwh_node_alloc(ieee80211com_t *);
292 static void iwh_node_free(ieee80211_node_t *);
293 static int iwh_newstate(ieee80211com_t *, enum ieee80211_state, int);
294 static void iwh_mac_access_enter(iwh_sc_t *);
295 static void iwh_mac_access_exit(iwh_sc_t *);
296 static uint32_t iwh_reg_read(iwh_sc_t *, uint32_t);
297 static void iwh_reg_write(iwh_sc_t *, uint32_t, uint32_t);
298 static int iwh_load_init_firmware(iwh_sc_t *);
299 static int iwh_load_run_firmware(iwh_sc_t *);
300 static void iwh_tx_intr(iwh_sc_t *, iwh_rx_desc_t *);
301 static void iwh_cmd_intr(iwh_sc_t *, iwh_rx_desc_t *);
302 static uint_t iwh_intr(caddr_t, caddr_t);
303 static int iwh_eep_load(iwh_sc_t *);
304 static void iwh_get_mac_from_eep(iwh_sc_t *);
305 static int iwh_eep_sem_down(iwh_sc_t *);
306 static void iwh_eep_sem_up(iwh_sc_t *);
307 static uint_t iwh_rx_softintr(caddr_t, caddr_t);
308 static uint8_t iwh_rate_to_plcp(int);
309 static int iwh_cmd(iwh_sc_t *, int, const void *, int, int);
310 static void iwh_set_led(iwh_sc_t *, uint8_t, uint8_t, uint8_t);
311 static int iwh_hw_set_before_auth(iwh_sc_t *);
312 static int iwh_scan(iwh_sc_t *);
313 static int iwh_config(iwh_sc_t *);
314 static void iwh_stop_master(iwh_sc_t *);
315 static int iwh_power_up(iwh_sc_t *);
316 static int iwh_preinit(iwh_sc_t *);
317 static int iwh_init(iwh_sc_t *);
318 static void iwh_stop(iwh_sc_t *);
319 static int iwh_quiesce(dev_info_t *t);
320 static void iwh_amrr_init(iwh_amrr_t *);
321 static void iwh_amrr_timeout(iwh_sc_t *);
322 static void iwh_amrr_ratectl(void *, ieee80211_node_t *);
323 static void iwh_ucode_alive(iwh_sc_t *, iwh_rx_desc_t *);
324 static void iwh_rx_phy_intr(iwh_sc_t *, iwh_rx_desc_t *);
325 static void iwh_rx_mpdu_intr(iwh_sc_t *, iwh_rx_desc_t *);
326 static void iwh_release_calib_buffer(iwh_sc_t *);
327 static int iwh_init_common(iwh_sc_t *);
328 static uint8_t *iwh_eep_addr_trans(iwh_sc_t *, uint32_t);
329 static int iwh_put_seg_fw(iwh_sc_t *, uint32_t, uint32_t, uint32_t);
330 static int iwh_alive_common(iwh_sc_t *);
331 static void iwh_save_calib_result(iwh_sc_t *, iwh_rx_desc_t *);
332 static int iwh_tx_power_table(iwh_sc_t *, int);
333 static int iwh_attach(dev_info_t *, ddi_attach_cmd_t);
334 static int iwh_detach(dev_info_t *, ddi_detach_cmd_t);
335 static void iwh_destroy_locks(iwh_sc_t *);
336 static int iwh_send(ieee80211com_t *, mblk_t *, uint8_t);
337 static void iwh_thread(iwh_sc_t *);
338 static int iwh_run_state_config(iwh_sc_t *);
339 static int iwh_fast_recover(iwh_sc_t *);
340 static int iwh_wme_update(ieee80211com_t *);
341 static int iwh_qosparam_to_hw(iwh_sc_t *, int);
342 static int iwh_wme_to_qos_ac(int);
343 static uint16_t iwh_cw_e_to_cw(uint8_t);
344 static int iwh_wmeparam_check(struct wmeParams *);
345 static inline int iwh_wme_tid_qos_ac(int);
346 static inline int iwh_qos_ac_to_txq(int);
347 static int iwh_wme_tid_to_txq(int);
348 static void iwh_init_ht_conf(iwh_sc_t *);
349 static void iwh_overwrite_11n_rateset(iwh_sc_t *);
350 static void iwh_overwrite_ic_default(iwh_sc_t *);
351 static void iwh_config_rxon_chain(iwh_sc_t *);
352 static int iwh_add_ap_sta(iwh_sc_t *);
353 static int iwh_ap_lq(iwh_sc_t *);
354 static void iwh_recv_action(struct ieee80211_node *,
355 const uint8_t *, const uint8_t *);
356 static int iwh_send_action(struct ieee80211_node *,
357 int, int, uint16_t[4]);
358 static int iwh_is_max_rate(ieee80211_node_t *);
359 static int iwh_is_min_rate(ieee80211_node_t *);
360 static void iwh_increase_rate(ieee80211_node_t *);
361 static void iwh_decrease_rate(ieee80211_node_t *);
362 static int iwh_alloc_dma_mem(iwh_sc_t *, size_t,
363 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
364 uint_t, iwh_dma_t *);
365 static void iwh_free_dma_mem(iwh_dma_t *);
366 static int iwh_reset_hw(iwh_sc_t *);
367
368 /*
369 * GLD specific operations
370 */
371 static int iwh_m_stat(void *, uint_t, uint64_t *);
372 static int iwh_m_start(void *);
373 static void iwh_m_stop(void *);
374 static int iwh_m_unicst(void *, const uint8_t *);
375 static int iwh_m_multicst(void *, boolean_t, const uint8_t *);
376 static int iwh_m_promisc(void *, boolean_t);
377 static mblk_t *iwh_m_tx(void *, mblk_t *);
378 static void iwh_m_ioctl(void *, queue_t *, mblk_t *);
379 static int iwh_m_setprop(void *arg, const char *pr_name,
380 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
381 static int iwh_m_getprop(void *arg, const char *pr_name,
382 mac_prop_id_t wldp_pr_num, uint_t wldp_length,
383 void *wldp_buf);
384 static void iwh_m_propinfo(void *arg, const char *pr_name,
385 mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
386
387 /*
388 * Supported rates for 802.11b/g modes (in 500Kbps unit).
389 */
390 static const struct ieee80211_rateset iwh_rateset_11b =
391 { 4, { 2, 4, 11, 22 } };
392
393 static const struct ieee80211_rateset iwh_rateset_11g =
394 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
395
396 /*
397 * Default 11n reates supported by this station.
398 */
399 extern struct ieee80211_htrateset ieee80211_rateset_11n;
400
401 /*
402 * For mfthread only
403 */
404 extern pri_t minclsyspri;
405
406 #define DRV_NAME_SP "iwh"
407
408 /*
409 * Module Loading Data & Entry Points
410 */
411 DDI_DEFINE_STREAM_OPS(iwh_devops, nulldev, nulldev, iwh_attach,
412 iwh_detach, nodev, NULL, D_MP, NULL, iwh_quiesce);
413
414 static struct modldrv iwh_modldrv = {
415 &mod_driverops,
416 "Intel(R) ShirleyPeak/EchoPeak driver(N)",
417 &iwh_devops
418 };
419
420 static struct modlinkage iwh_modlinkage = {
421 MODREV_1,
422 &iwh_modldrv,
423 NULL
424 };
425
426 int
_init(void)427 _init(void)
428 {
429 int status;
430
431 status = ddi_soft_state_init(&iwh_soft_state_p,
432 sizeof (iwh_sc_t), 1);
433 if (status != DDI_SUCCESS) {
434 return (status);
435 }
436
437 mac_init_ops(&iwh_devops, DRV_NAME_SP);
438 status = mod_install(&iwh_modlinkage);
439 if (status != DDI_SUCCESS) {
440 mac_fini_ops(&iwh_devops);
441 ddi_soft_state_fini(&iwh_soft_state_p);
442 }
443
444 return (status);
445 }
446
447 int
_fini(void)448 _fini(void)
449 {
450 int status;
451
452 status = mod_remove(&iwh_modlinkage);
453 if (DDI_SUCCESS == status) {
454 mac_fini_ops(&iwh_devops);
455 ddi_soft_state_fini(&iwh_soft_state_p);
456 }
457
458 return (status);
459 }
460
461 int
_info(struct modinfo * mip)462 _info(struct modinfo *mip)
463 {
464 return (mod_info(&iwh_modlinkage, mip));
465 }
466
467 /*
468 * Mac Call Back entries
469 */
470 mac_callbacks_t iwh_m_callbacks = {
471 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
472 iwh_m_stat,
473 iwh_m_start,
474 iwh_m_stop,
475 iwh_m_promisc,
476 iwh_m_multicst,
477 iwh_m_unicst,
478 iwh_m_tx,
479 NULL,
480 iwh_m_ioctl,
481 NULL,
482 NULL,
483 NULL,
484 iwh_m_setprop,
485 iwh_m_getprop,
486 iwh_m_propinfo
487 };
488
489 #ifdef DEBUG
490 void
iwh_dbg(uint32_t flags,const char * fmt,...)491 iwh_dbg(uint32_t flags, const char *fmt, ...)
492 {
493 va_list ap;
494
495 if (flags & iwh_dbg_flags) {
496 va_start(ap, fmt);
497 vcmn_err(CE_NOTE, fmt, ap);
498 va_end(ap);
499 }
500 }
501 #endif /* DEBUG */
502
503 /*
504 * device operations
505 */
506 int
iwh_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)507 iwh_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
508 {
509 iwh_sc_t *sc;
510 ieee80211com_t *ic;
511 int instance, i;
512 char strbuf[32];
513 wifi_data_t wd = { 0 };
514 mac_register_t *macp;
515 int intr_type;
516 int intr_count;
517 int intr_actual;
518 int err = DDI_FAILURE;
519
520 switch (cmd) {
521 case DDI_ATTACH:
522 break;
523
524 case DDI_RESUME:
525 instance = ddi_get_instance(dip);
526 sc = ddi_get_soft_state(iwh_soft_state_p,
527 instance);
528 ASSERT(sc != NULL);
529
530 if (sc->sc_flags & IWH_F_RUNNING) {
531 (void) iwh_init(sc);
532 }
533
534 atomic_and_32(&sc->sc_flags, ~IWH_F_SUSPEND);
535
536 IWH_DBG((IWH_DEBUG_RESUME, "iwh_attach(): "
537 "resume\n"));
538 return (DDI_SUCCESS);
539
540 default:
541 goto attach_fail1;
542 }
543
544 instance = ddi_get_instance(dip);
545 err = ddi_soft_state_zalloc(iwh_soft_state_p, instance);
546 if (err != DDI_SUCCESS) {
547 cmn_err(CE_WARN, "iwh_attach(): "
548 "failed to allocate soft state\n");
549 goto attach_fail1;
550 }
551
552 sc = ddi_get_soft_state(iwh_soft_state_p, instance);
553 ASSERT(sc != NULL);
554
555 sc->sc_dip = dip;
556
557 /*
558 * map configure space
559 */
560 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
561 &iwh_reg_accattr, &sc->sc_cfg_handle);
562 if (err != DDI_SUCCESS) {
563 cmn_err(CE_WARN, "iwh_attach(): "
564 "failed to map config spaces regs\n");
565 goto attach_fail2;
566 }
567
568 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
569 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
570 if ((sc->sc_dev_id != 0x4232) &&
571 (sc->sc_dev_id != 0x4235) &&
572 (sc->sc_dev_id != 0x4236) &&
573 (sc->sc_dev_id != 0x4237) &&
574 (sc->sc_dev_id != 0x423a) &&
575 (sc->sc_dev_id != 0x423b) &&
576 (sc->sc_dev_id != 0x423c) &&
577 (sc->sc_dev_id != 0x423d)) {
578 cmn_err(CE_WARN, "iwh_attach(): "
579 "Do not support this device\n");
580 goto attach_fail3;
581 }
582
583 iwh_init_ht_conf(sc);
584 iwh_overwrite_11n_rateset(sc);
585
586 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
587 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
588
589 /*
590 * keep from disturbing C3 state of CPU
591 */
592 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
593 PCI_CFG_RETRY_TIMEOUT), 0);
594
595 /*
596 * determine the size of buffer for frame and command to ucode
597 */
598 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
599 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
600 if (!sc->sc_clsz) {
601 sc->sc_clsz = 16;
602 }
603 sc->sc_clsz = (sc->sc_clsz << 2);
604
605 sc->sc_dmabuf_sz = roundup(0x2000 + sizeof (struct ieee80211_frame) +
606 IEEE80211_MTU + IEEE80211_CRC_LEN +
607 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
608 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
609
610 /*
611 * Map operating registers
612 */
613 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
614 0, 0, &iwh_reg_accattr, &sc->sc_handle);
615 if (err != DDI_SUCCESS) {
616 cmn_err(CE_WARN, "iwh_attach(): "
617 "failed to map device regs\n");
618 goto attach_fail3;
619 }
620
621 /*
622 * this is used to differentiate type of hardware
623 */
624 sc->sc_hw_rev = IWH_READ(sc, CSR_HW_REV);
625
626 err = ddi_intr_get_supported_types(dip, &intr_type);
627 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
628 cmn_err(CE_WARN, "iwh_attach(): "
629 "fixed type interrupt is not supported\n");
630 goto attach_fail4;
631 }
632
633 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
634 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
635 cmn_err(CE_WARN, "iwh_attach(): "
636 "no fixed interrupts\n");
637 goto attach_fail4;
638 }
639
640 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
641
642 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
643 intr_count, &intr_actual, 0);
644 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
645 cmn_err(CE_WARN, "iwh_attach(): "
646 "ddi_intr_alloc() failed 0x%x\n", err);
647 goto attach_fail5;
648 }
649
650 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
651 if (err != DDI_SUCCESS) {
652 cmn_err(CE_WARN, "iwh_attach(): "
653 "ddi_intr_get_pri() failed 0x%x\n", err);
654 goto attach_fail6;
655 }
656
657 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
658 DDI_INTR_PRI(sc->sc_intr_pri));
659 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
660 DDI_INTR_PRI(sc->sc_intr_pri));
661 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
662 DDI_INTR_PRI(sc->sc_intr_pri));
663
664 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
665 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
666 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
667
668 /*
669 * initialize the mfthread
670 */
671 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
672 sc->sc_mf_thread = NULL;
673 sc->sc_mf_thread_switch = 0;
674
675 /*
676 * Allocate shared buffer for communication between driver and ucode.
677 */
678 err = iwh_alloc_shared(sc);
679 if (err != DDI_SUCCESS) {
680 cmn_err(CE_WARN, "iwh_attach(): "
681 "failed to allocate shared page\n");
682 goto attach_fail7;
683 }
684
685 (void) memset(sc->sc_shared, 0, sizeof (iwh_shared_t));
686
687 /*
688 * Allocate keep warm page.
689 */
690 err = iwh_alloc_kw(sc);
691 if (err != DDI_SUCCESS) {
692 cmn_err(CE_WARN, "iwh_attach(): "
693 "failed to allocate keep warm page\n");
694 goto attach_fail8;
695 }
696
697 err = iwh_reset_hw(sc);
698 if (err != IWH_SUCCESS) {
699 cmn_err(CE_WARN, "iwh_attach(): "
700 "failed to reset hardware\n");
701 goto attach_fail9;
702 }
703
704 /*
705 * Do some necessary hardware initializations.
706 */
707 err = iwh_preinit(sc);
708 if (err != IWH_SUCCESS) {
709 cmn_err(CE_WARN, "iwh_attach(): "
710 "failed to initialize hardware\n");
711 goto attach_fail9;
712 }
713
714 /*
715 * get hardware configurations from eeprom
716 */
717 err = iwh_eep_load(sc);
718 if (err != IWH_SUCCESS) {
719 cmn_err(CE_WARN, "iwh_attach(): "
720 "failed to load eeprom\n");
721 goto attach_fail9;
722 }
723
724 if (IWH_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) {
725 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_attach(): "
726 "unsupported eeprom detected\n"));
727 goto attach_fail9;
728 }
729
730 /*
731 * get MAC address of this chipset
732 */
733 iwh_get_mac_from_eep(sc);
734
735 /*
736 * calibration information from EEPROM
737 */
738 sc->sc_eep_calib = (struct iwh_eep_calibration *)
739 iwh_eep_addr_trans(sc, EEP_CALIBRATION);
740
741 /*
742 * initialize TX and RX ring buffers
743 */
744 err = iwh_ring_init(sc);
745 if (err != DDI_SUCCESS) {
746 cmn_err(CE_WARN, "iwh_attach(): "
747 "failed to allocate and initialize ring\n");
748 goto attach_fail9;
749 }
750
751 if ((0x423c == sc->sc_dev_id) || (0x423d == sc->sc_dev_id)) {
752 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5150_bin;
753 } else {
754 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5000_bin;
755 }
756
757 /*
758 * copy ucode to dma buffer
759 */
760 err = iwh_alloc_fw_dma(sc);
761 if (err != DDI_SUCCESS) {
762 cmn_err(CE_WARN, "iwh_attach(): "
763 "failed to allocate firmware dma\n");
764 goto attach_fail10;
765 }
766
767 /*
768 * Initialize the wifi part, which will be used by
769 * 802.11 module
770 */
771 ic = &sc->sc_ic;
772 ic->ic_phytype = IEEE80211_T_HT;
773 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
774 ic->ic_state = IEEE80211_S_INIT;
775 ic->ic_maxrssi = 100; /* experimental number */
776 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
777 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
778
779 /*
780 * Support WPA/WPA2
781 */
782 ic->ic_caps |= IEEE80211_C_WPA;
783
784 /*
785 * Support QoS/WME
786 */
787 ic->ic_caps |= IEEE80211_C_WME;
788 ic->ic_wme.wme_update = iwh_wme_update;
789
790 /*
791 * Support 802.11n/HT
792 */
793 if (sc->sc_ht_conf.ht_support) {
794 ic->ic_htcaps = IEEE80211_HTC_HT |
795 IEEE80211_HTC_AMSDU;
796 ic->ic_htcaps |= IEEE80211_HTCAP_MAXAMSDU_7935;
797 }
798
799 /*
800 * set supported .11b and .11g rates
801 */
802 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwh_rateset_11b;
803 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwh_rateset_11g;
804
805 /*
806 * set supported .11b and .11g channels (1 through 11)
807 */
808 for (i = 1; i <= 11; i++) {
809 ic->ic_sup_channels[i].ich_freq =
810 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
811 ic->ic_sup_channels[i].ich_flags =
812 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
813 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
814 IEEE80211_CHAN_PASSIVE;
815
816 if (sc->sc_ht_conf.cap & HT_CAP_SUP_WIDTH) {
817 ic->ic_sup_channels[i].ich_flags |=
818 IEEE80211_CHAN_HT40;
819 } else {
820 ic->ic_sup_channels[i].ich_flags |=
821 IEEE80211_CHAN_HT20;
822 }
823 }
824
825 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
826 ic->ic_xmit = iwh_send;
827
828 /*
829 * attach to 802.11 module
830 */
831 ieee80211_attach(ic);
832
833 /*
834 * different instance has different WPA door
835 */
836 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
837 ddi_driver_name(dip),
838 ddi_get_instance(dip));
839
840 /*
841 * Overwrite 80211 default configurations.
842 */
843 iwh_overwrite_ic_default(sc);
844
845 /*
846 * initialize 802.11 module
847 */
848 ieee80211_media_init(ic);
849
850 /*
851 * initialize default tx key
852 */
853 ic->ic_def_txkey = 0;
854
855 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
856 iwh_rx_softintr, (caddr_t)sc);
857 if (err != DDI_SUCCESS) {
858 cmn_err(CE_WARN, "iwh_attach(): "
859 "add soft interrupt failed\n");
860 goto attach_fail12;
861 }
862
863 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwh_intr,
864 (caddr_t)sc, NULL);
865 if (err != DDI_SUCCESS) {
866 cmn_err(CE_WARN, "iwh_attach(): "
867 "ddi_intr_add_handle() failed\n");
868 goto attach_fail13;
869 }
870
871 err = ddi_intr_enable(sc->sc_intr_htable[0]);
872 if (err != DDI_SUCCESS) {
873 cmn_err(CE_WARN, "iwh_attach(): "
874 "ddi_intr_enable() failed\n");
875 goto attach_fail14;
876 }
877
878 /*
879 * Initialize pointer to device specific functions
880 */
881 wd.wd_secalloc = WIFI_SEC_NONE;
882 wd.wd_opmode = ic->ic_opmode;
883 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
884
885 /*
886 * create relation to GLD
887 */
888 macp = mac_alloc(MAC_VERSION);
889 if (NULL == macp) {
890 cmn_err(CE_WARN, "iwh_attach(): "
891 "failed to do mac_alloc()\n");
892 goto attach_fail15;
893 }
894
895 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
896 macp->m_driver = sc;
897 macp->m_dip = dip;
898 macp->m_src_addr = ic->ic_macaddr;
899 macp->m_callbacks = &iwh_m_callbacks;
900 macp->m_min_sdu = 0;
901 macp->m_max_sdu = IEEE80211_MTU;
902 macp->m_pdata = &wd;
903 macp->m_pdata_size = sizeof (wd);
904
905 /*
906 * Register the macp to mac
907 */
908 err = mac_register(macp, &ic->ic_mach);
909 mac_free(macp);
910 if (err != DDI_SUCCESS) {
911 cmn_err(CE_WARN, "iwh_attach(): "
912 "failed to do mac_register()\n");
913 goto attach_fail15;
914 }
915
916 /*
917 * Create minor node of type DDI_NT_NET_WIFI
918 */
919 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
920 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
921 instance + 1, DDI_NT_NET_WIFI, 0);
922 if (err != DDI_SUCCESS) {
923 cmn_err(CE_WARN, "iwh_attach(): "
924 "failed to do ddi_create_minor_node()\n");
925 }
926
927 /*
928 * Notify link is down now
929 */
930 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
931
932 /*
933 * create the mf thread to handle the link status,
934 * recovery fatal error, etc.
935 */
936 sc->sc_mf_thread_switch = 1;
937 if (NULL == sc->sc_mf_thread) {
938 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
939 iwh_thread, sc, 0, &p0, TS_RUN, minclsyspri);
940 }
941
942 atomic_or_32(&sc->sc_flags, IWH_F_ATTACHED);
943
944 return (DDI_SUCCESS);
945
946 attach_fail15:
947 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
948
949 attach_fail14:
950 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
951
952 attach_fail13:
953 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
954 sc->sc_soft_hdl = NULL;
955
956 attach_fail12:
957 ieee80211_detach(ic);
958
959 attach_fail11:
960 iwh_free_fw_dma(sc);
961
962 attach_fail10:
963 iwh_ring_free(sc);
964
965 attach_fail9:
966 iwh_free_kw(sc);
967
968 attach_fail8:
969 iwh_free_shared(sc);
970
971 attach_fail7:
972 iwh_destroy_locks(sc);
973
974 attach_fail6:
975 (void) ddi_intr_free(sc->sc_intr_htable[0]);
976
977 attach_fail5:
978 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
979
980 attach_fail4:
981 ddi_regs_map_free(&sc->sc_handle);
982
983 attach_fail3:
984 ddi_regs_map_free(&sc->sc_cfg_handle);
985
986 attach_fail2:
987 ddi_soft_state_free(iwh_soft_state_p, instance);
988
989 attach_fail1:
990 return (DDI_FAILURE);
991 }
992
993 int
iwh_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)994 iwh_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
995 {
996 iwh_sc_t *sc;
997 ieee80211com_t *ic;
998 int err;
999
1000 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
1001 ASSERT(sc != NULL);
1002 ic = &sc->sc_ic;
1003
1004 switch (cmd) {
1005 case DDI_DETACH:
1006 break;
1007
1008 case DDI_SUSPEND:
1009 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
1010 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
1011
1012 atomic_or_32(&sc->sc_flags, IWH_F_SUSPEND);
1013
1014 if (sc->sc_flags & IWH_F_RUNNING) {
1015 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1016 iwh_stop(sc);
1017 }
1018
1019 IWH_DBG((IWH_DEBUG_RESUME, "iwh_detach(): "
1020 "suspend\n"));
1021 return (DDI_SUCCESS);
1022
1023 default:
1024 return (DDI_FAILURE);
1025 }
1026
1027 if (!(sc->sc_flags & IWH_F_ATTACHED)) {
1028 return (DDI_FAILURE);
1029 }
1030
1031 /*
1032 * Destroy the mf_thread
1033 */
1034 sc->sc_mf_thread_switch = 0;
1035
1036 mutex_enter(&sc->sc_mt_lock);
1037 while (sc->sc_mf_thread != NULL) {
1038 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
1039 break;
1040 }
1041 }
1042 mutex_exit(&sc->sc_mt_lock);
1043
1044 err = mac_disable(sc->sc_ic.ic_mach);
1045 if (err != DDI_SUCCESS) {
1046 return (err);
1047 }
1048
1049 /*
1050 * stop chipset
1051 */
1052 iwh_stop(sc);
1053
1054 DELAY(500000);
1055
1056 /*
1057 * release buffer for calibration
1058 */
1059 iwh_release_calib_buffer(sc);
1060
1061 /*
1062 * Unregiste from GLD
1063 */
1064 (void) mac_unregister(sc->sc_ic.ic_mach);
1065
1066 mutex_enter(&sc->sc_glock);
1067 iwh_free_fw_dma(sc);
1068 iwh_ring_free(sc);
1069 iwh_free_kw(sc);
1070 iwh_free_shared(sc);
1071 mutex_exit(&sc->sc_glock);
1072
1073 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
1074 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
1075 (void) ddi_intr_free(sc->sc_intr_htable[0]);
1076 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1077
1078 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
1079 sc->sc_soft_hdl = NULL;
1080
1081 /*
1082 * detach from 80211 module
1083 */
1084 ieee80211_detach(&sc->sc_ic);
1085
1086 iwh_destroy_locks(sc);
1087
1088 ddi_regs_map_free(&sc->sc_handle);
1089 ddi_regs_map_free(&sc->sc_cfg_handle);
1090 ddi_remove_minor_node(dip, NULL);
1091 ddi_soft_state_free(iwh_soft_state_p, ddi_get_instance(dip));
1092
1093 return (DDI_SUCCESS);
1094 }
1095
1096 /*
1097 * destroy all locks
1098 */
1099 static void
iwh_destroy_locks(iwh_sc_t * sc)1100 iwh_destroy_locks(iwh_sc_t *sc)
1101 {
1102 cv_destroy(&sc->sc_mt_cv);
1103 cv_destroy(&sc->sc_cmd_cv);
1104 cv_destroy(&sc->sc_put_seg_cv);
1105 cv_destroy(&sc->sc_ucode_cv);
1106 mutex_destroy(&sc->sc_mt_lock);
1107 mutex_destroy(&sc->sc_tx_lock);
1108 mutex_destroy(&sc->sc_glock);
1109 }
1110
1111 /*
1112 * Allocate an area of memory and a DMA handle for accessing it
1113 */
1114 static int
iwh_alloc_dma_mem(iwh_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwh_dma_t * dma_p)1115 iwh_alloc_dma_mem(iwh_sc_t *sc, size_t memsize,
1116 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1117 uint_t dma_flags, iwh_dma_t *dma_p)
1118 {
1119 caddr_t vaddr;
1120 int err = DDI_FAILURE;
1121
1122 /*
1123 * Allocate handle
1124 */
1125 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1126 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1127 if (err != DDI_SUCCESS) {
1128 dma_p->dma_hdl = NULL;
1129 return (DDI_FAILURE);
1130 }
1131
1132 /*
1133 * Allocate memory
1134 */
1135 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1136 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1137 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1138 if (err != DDI_SUCCESS) {
1139 ddi_dma_free_handle(&dma_p->dma_hdl);
1140 dma_p->dma_hdl = NULL;
1141 dma_p->acc_hdl = NULL;
1142 return (DDI_FAILURE);
1143 }
1144
1145 /*
1146 * Bind the two together
1147 */
1148 dma_p->mem_va = vaddr;
1149 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1150 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1151 &dma_p->cookie, &dma_p->ncookies);
1152 if (err != DDI_DMA_MAPPED) {
1153 ddi_dma_mem_free(&dma_p->acc_hdl);
1154 ddi_dma_free_handle(&dma_p->dma_hdl);
1155 dma_p->acc_hdl = NULL;
1156 dma_p->dma_hdl = NULL;
1157 return (DDI_FAILURE);
1158 }
1159
1160 dma_p->nslots = ~0U;
1161 dma_p->size = ~0U;
1162 dma_p->token = ~0U;
1163 dma_p->offset = 0;
1164 return (DDI_SUCCESS);
1165 }
1166
1167 /*
1168 * Free one allocated area of DMAable memory
1169 */
1170 static void
iwh_free_dma_mem(iwh_dma_t * dma_p)1171 iwh_free_dma_mem(iwh_dma_t *dma_p)
1172 {
1173 if (dma_p->dma_hdl != NULL) {
1174 if (dma_p->ncookies) {
1175 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1176 dma_p->ncookies = 0;
1177 }
1178 ddi_dma_free_handle(&dma_p->dma_hdl);
1179 dma_p->dma_hdl = NULL;
1180 }
1181
1182 if (dma_p->acc_hdl != NULL) {
1183 ddi_dma_mem_free(&dma_p->acc_hdl);
1184 dma_p->acc_hdl = NULL;
1185 }
1186 }
1187
1188 /*
1189 * copy ucode into dma buffers
1190 */
1191 static int
iwh_alloc_fw_dma(iwh_sc_t * sc)1192 iwh_alloc_fw_dma(iwh_sc_t *sc)
1193 {
1194 int err = DDI_FAILURE;
1195 iwh_dma_t *dma_p;
1196 char *t;
1197
1198 /*
1199 * firmware image layout:
1200 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1201 */
1202
1203 /*
1204 * copy text of runtime ucode
1205 */
1206 t = (char *)(sc->sc_hdr + 1);
1207 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1208 &fw_dma_attr, &iwh_dma_accattr,
1209 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1210 &sc->sc_dma_fw_text);
1211 if (err != DDI_SUCCESS) {
1212 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1213 "failed to allocate text dma memory.\n");
1214 goto fail;
1215 }
1216
1217 dma_p = &sc->sc_dma_fw_text;
1218
1219 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1220 "text[ncookies:%d addr:%lx size:%lx]\n",
1221 dma_p->ncookies, dma_p->cookie.dmac_address,
1222 dma_p->cookie.dmac_size));
1223
1224 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->textsz));
1225
1226 /*
1227 * copy data and bak-data of runtime ucode
1228 */
1229 t += LE_32(sc->sc_hdr->textsz);
1230 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1231 &fw_dma_attr, &iwh_dma_accattr,
1232 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1233 &sc->sc_dma_fw_data);
1234 if (err != DDI_SUCCESS) {
1235 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1236 "failed to allocate data dma memory\n");
1237 goto fail;
1238 }
1239
1240 dma_p = &sc->sc_dma_fw_data;
1241
1242 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1243 "data[ncookies:%d addr:%lx size:%lx]\n",
1244 dma_p->ncookies, dma_p->cookie.dmac_address,
1245 dma_p->cookie.dmac_size));
1246
1247 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1248
1249 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1250 &fw_dma_attr, &iwh_dma_accattr,
1251 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1252 &sc->sc_dma_fw_data_bak);
1253 if (err != DDI_SUCCESS) {
1254 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1255 "failed to allocate data bakup dma memory\n");
1256 goto fail;
1257 }
1258
1259 dma_p = &sc->sc_dma_fw_data_bak;
1260
1261 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1262 "data_bak[ncookies:%d addr:%lx "
1263 "size:%lx]\n",
1264 dma_p->ncookies, dma_p->cookie.dmac_address,
1265 dma_p->cookie.dmac_size));
1266
1267 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1268
1269 /*
1270 * copy text of init ucode
1271 */
1272 t += LE_32(sc->sc_hdr->datasz);
1273 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1274 &fw_dma_attr, &iwh_dma_accattr,
1275 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1276 &sc->sc_dma_fw_init_text);
1277 if (err != DDI_SUCCESS) {
1278 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1279 "failed to allocate init text dma memory\n");
1280 goto fail;
1281 }
1282
1283 dma_p = &sc->sc_dma_fw_init_text;
1284
1285 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1286 "init_text[ncookies:%d addr:%lx "
1287 "size:%lx]\n",
1288 dma_p->ncookies, dma_p->cookie.dmac_address,
1289 dma_p->cookie.dmac_size));
1290
1291 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_textsz));
1292
1293 /*
1294 * copy data of init ucode
1295 */
1296 t += LE_32(sc->sc_hdr->init_textsz);
1297 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1298 &fw_dma_attr, &iwh_dma_accattr,
1299 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1300 &sc->sc_dma_fw_init_data);
1301 if (err != DDI_SUCCESS) {
1302 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1303 "failed to allocate init data dma memory\n");
1304 goto fail;
1305 }
1306
1307 dma_p = &sc->sc_dma_fw_init_data;
1308
1309 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1310 "init_data[ncookies:%d addr:%lx "
1311 "size:%lx]\n",
1312 dma_p->ncookies, dma_p->cookie.dmac_address,
1313 dma_p->cookie.dmac_size));
1314
1315 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_datasz));
1316
1317 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1318
1319 fail:
1320 return (err);
1321 }
1322
1323 static void
iwh_free_fw_dma(iwh_sc_t * sc)1324 iwh_free_fw_dma(iwh_sc_t *sc)
1325 {
1326 iwh_free_dma_mem(&sc->sc_dma_fw_text);
1327 iwh_free_dma_mem(&sc->sc_dma_fw_data);
1328 iwh_free_dma_mem(&sc->sc_dma_fw_data_bak);
1329 iwh_free_dma_mem(&sc->sc_dma_fw_init_text);
1330 iwh_free_dma_mem(&sc->sc_dma_fw_init_data);
1331 }
1332
1333 /*
1334 * Allocate a shared buffer between host and NIC.
1335 */
1336 static int
iwh_alloc_shared(iwh_sc_t * sc)1337 iwh_alloc_shared(iwh_sc_t *sc)
1338 {
1339 #ifdef DEBUG
1340 iwh_dma_t *dma_p;
1341 #endif
1342 int err = DDI_FAILURE;
1343
1344 /*
1345 * must be aligned on a 4K-page boundary
1346 */
1347 err = iwh_alloc_dma_mem(sc, sizeof (iwh_shared_t),
1348 &sh_dma_attr, &iwh_dma_descattr,
1349 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1350 &sc->sc_dma_sh);
1351 if (err != DDI_SUCCESS) {
1352 goto fail;
1353 }
1354
1355 sc->sc_shared = (iwh_shared_t *)sc->sc_dma_sh.mem_va;
1356
1357 #ifdef DEBUG
1358 dma_p = &sc->sc_dma_sh;
1359 #endif
1360 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_shared(): "
1361 "sh[ncookies:%d addr:%lx size:%lx]\n",
1362 dma_p->ncookies, dma_p->cookie.dmac_address,
1363 dma_p->cookie.dmac_size));
1364
1365 return (err);
1366
1367 fail:
1368 iwh_free_shared(sc);
1369 return (err);
1370 }
1371
1372 static void
iwh_free_shared(iwh_sc_t * sc)1373 iwh_free_shared(iwh_sc_t *sc)
1374 {
1375 iwh_free_dma_mem(&sc->sc_dma_sh);
1376 }
1377
1378 /*
1379 * Allocate a keep warm page.
1380 */
1381 static int
iwh_alloc_kw(iwh_sc_t * sc)1382 iwh_alloc_kw(iwh_sc_t *sc)
1383 {
1384 #ifdef DEBUG
1385 iwh_dma_t *dma_p;
1386 #endif
1387 int err = DDI_FAILURE;
1388
1389 /*
1390 * must be aligned on a 4K-page boundary
1391 */
1392 err = iwh_alloc_dma_mem(sc, IWH_KW_SIZE,
1393 &kw_dma_attr, &iwh_dma_descattr,
1394 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1395 &sc->sc_dma_kw);
1396 if (err != DDI_SUCCESS) {
1397 goto fail;
1398 }
1399
1400 #ifdef DEBUG
1401 dma_p = &sc->sc_dma_kw;
1402 #endif
1403 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_kw(): "
1404 "kw[ncookies:%d addr:%lx size:%lx]\n",
1405 dma_p->ncookies, dma_p->cookie.dmac_address,
1406 dma_p->cookie.dmac_size));
1407
1408 return (err);
1409
1410 fail:
1411 iwh_free_kw(sc);
1412 return (err);
1413 }
1414
1415 static void
iwh_free_kw(iwh_sc_t * sc)1416 iwh_free_kw(iwh_sc_t *sc)
1417 {
1418 iwh_free_dma_mem(&sc->sc_dma_kw);
1419 }
1420
1421 /*
1422 * initialize RX ring buffers
1423 */
1424 static int
iwh_alloc_rx_ring(iwh_sc_t * sc)1425 iwh_alloc_rx_ring(iwh_sc_t *sc)
1426 {
1427 iwh_rx_ring_t *ring;
1428 iwh_rx_data_t *data;
1429 #ifdef DEBUG
1430 iwh_dma_t *dma_p;
1431 #endif
1432 int i, err = DDI_FAILURE;
1433
1434 ring = &sc->sc_rxq;
1435 ring->cur = 0;
1436
1437 /*
1438 * allocate RX description ring buffer
1439 */
1440 err = iwh_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1441 &ring_desc_dma_attr, &iwh_dma_descattr,
1442 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1443 &ring->dma_desc);
1444 if (err != DDI_SUCCESS) {
1445 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1446 "dma alloc rx ring desc "
1447 "failed\n"));
1448 goto fail;
1449 }
1450
1451 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1452 #ifdef DEBUG
1453 dma_p = &ring->dma_desc;
1454 #endif
1455 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1456 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1457 dma_p->ncookies, dma_p->cookie.dmac_address,
1458 dma_p->cookie.dmac_size));
1459
1460 /*
1461 * Allocate Rx frame buffers.
1462 */
1463 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1464 data = &ring->data[i];
1465 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1466 &rx_buffer_dma_attr, &iwh_dma_accattr,
1467 DDI_DMA_READ | DDI_DMA_STREAMING,
1468 &data->dma_data);
1469 if (err != DDI_SUCCESS) {
1470 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1471 "dma alloc rx ring "
1472 "buf[%d] failed\n", i));
1473 goto fail;
1474 }
1475 /*
1476 * the physical address bit [8-36] are used,
1477 * instead of bit [0-31] in 3945.
1478 */
1479 ring->desc[i] = (uint32_t)
1480 (data->dma_data.cookie.dmac_address >> 8);
1481 }
1482
1483 #ifdef DEBUG
1484 dma_p = &ring->data[0].dma_data;
1485 #endif
1486 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1487 "rx buffer[0][ncookies:%d addr:%lx "
1488 "size:%lx]\n",
1489 dma_p->ncookies, dma_p->cookie.dmac_address,
1490 dma_p->cookie.dmac_size));
1491
1492 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1493
1494 return (err);
1495
1496 fail:
1497 iwh_free_rx_ring(sc);
1498 return (err);
1499 }
1500
1501 /*
1502 * disable RX ring
1503 */
1504 static void
iwh_reset_rx_ring(iwh_sc_t * sc)1505 iwh_reset_rx_ring(iwh_sc_t *sc)
1506 {
1507 int n;
1508
1509 iwh_mac_access_enter(sc);
1510 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1511 for (n = 0; n < 2000; n++) {
1512 if (IWH_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1513 break;
1514 }
1515 DELAY(1000);
1516 }
1517 #ifdef DEBUG
1518 if (2000 == n) {
1519 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_rx_ring(): "
1520 "timeout resetting Rx ring\n"));
1521 }
1522 #endif
1523 iwh_mac_access_exit(sc);
1524
1525 sc->sc_rxq.cur = 0;
1526 }
1527
1528 static void
iwh_free_rx_ring(iwh_sc_t * sc)1529 iwh_free_rx_ring(iwh_sc_t *sc)
1530 {
1531 int i;
1532
1533 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1534 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1535 IWH_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1536 DDI_DMA_SYNC_FORCPU);
1537 }
1538
1539 iwh_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1540 }
1541
1542 if (sc->sc_rxq.dma_desc.dma_hdl) {
1543 IWH_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1544 }
1545
1546 iwh_free_dma_mem(&sc->sc_rxq.dma_desc);
1547 }
1548
1549 /*
1550 * initialize TX ring buffers
1551 */
1552 static int
iwh_alloc_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring,int slots,int qid)1553 iwh_alloc_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring,
1554 int slots, int qid)
1555 {
1556 iwh_tx_data_t *data;
1557 iwh_tx_desc_t *desc_h;
1558 uint32_t paddr_desc_h;
1559 iwh_cmd_t *cmd_h;
1560 uint32_t paddr_cmd_h;
1561 #ifdef DEBUG
1562 iwh_dma_t *dma_p;
1563 #endif
1564 int i, err = DDI_FAILURE;
1565
1566 ring->qid = qid;
1567 ring->count = TFD_QUEUE_SIZE_MAX;
1568 ring->window = slots;
1569 ring->queued = 0;
1570 ring->cur = 0;
1571 ring->desc_cur = 0;
1572
1573 /*
1574 * allocate buffer for TX descriptor ring
1575 */
1576 err = iwh_alloc_dma_mem(sc,
1577 TFD_QUEUE_SIZE_MAX * sizeof (iwh_tx_desc_t),
1578 &ring_desc_dma_attr, &iwh_dma_descattr,
1579 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1580 &ring->dma_desc);
1581 if (err != DDI_SUCCESS) {
1582 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1583 "dma alloc tx ring desc[%d] "
1584 "failed\n", qid));
1585 goto fail;
1586 }
1587
1588 #ifdef DEBUG
1589 dma_p = &ring->dma_desc;
1590 #endif
1591 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1592 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1593 dma_p->ncookies, dma_p->cookie.dmac_address,
1594 dma_p->cookie.dmac_size));
1595
1596 desc_h = (iwh_tx_desc_t *)ring->dma_desc.mem_va;
1597 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1598
1599 /*
1600 * allocate buffer for ucode command
1601 */
1602 err = iwh_alloc_dma_mem(sc,
1603 TFD_QUEUE_SIZE_MAX * sizeof (iwh_cmd_t),
1604 &cmd_dma_attr, &iwh_dma_accattr,
1605 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1606 &ring->dma_cmd);
1607 if (err != DDI_SUCCESS) {
1608 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1609 "dma alloc tx ring cmd[%d]"
1610 " failed\n", qid));
1611 goto fail;
1612 }
1613
1614 #ifdef DEBUG
1615 dma_p = &ring->dma_cmd;
1616 #endif
1617 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1618 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1619 dma_p->ncookies, dma_p->cookie.dmac_address,
1620 dma_p->cookie.dmac_size));
1621
1622 cmd_h = (iwh_cmd_t *)ring->dma_cmd.mem_va;
1623 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1624
1625 /*
1626 * Allocate Tx frame buffers.
1627 */
1628 ring->data = kmem_zalloc(sizeof (iwh_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1629 KM_NOSLEEP);
1630 if (NULL == ring->data) {
1631 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1632 "could not allocate "
1633 "tx data slots\n"));
1634 goto fail;
1635 }
1636
1637 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1638 data = &ring->data[i];
1639 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1640 &tx_buffer_dma_attr, &iwh_dma_accattr,
1641 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1642 &data->dma_data);
1643 if (err != DDI_SUCCESS) {
1644 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1645 "dma alloc tx "
1646 "ring buf[%d] failed\n", i));
1647 goto fail;
1648 }
1649
1650 data->desc = desc_h + i;
1651 data->paddr_desc = paddr_desc_h +
1652 _PTRDIFF(data->desc, desc_h);
1653 data->cmd = cmd_h + i;
1654 data->paddr_cmd = paddr_cmd_h +
1655 _PTRDIFF(data->cmd, cmd_h);
1656 }
1657 #ifdef DEBUG
1658 dma_p = &ring->data[0].dma_data;
1659 #endif
1660 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1661 "tx buffer[0][ncookies:%d addr:%lx "
1662 "size:%lx]\n",
1663 dma_p->ncookies, dma_p->cookie.dmac_address,
1664 dma_p->cookie.dmac_size));
1665
1666 return (err);
1667
1668 fail:
1669 iwh_free_tx_ring(ring);
1670
1671 return (err);
1672 }
1673
1674 /*
1675 * disable TX ring
1676 */
1677 static void
iwh_reset_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring)1678 iwh_reset_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring)
1679 {
1680 iwh_tx_data_t *data;
1681 int i, n;
1682
1683 iwh_mac_access_enter(sc);
1684
1685 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1686 for (n = 0; n < 200; n++) {
1687 if (IWH_READ(sc, IWH_FH_TSSR_TX_STATUS_REG) &
1688 IWH_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1689 break;
1690 }
1691 DELAY(10);
1692 }
1693
1694 #ifdef DEBUG
1695 if (200 == n) {
1696 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_tx_ring(): "
1697 "timeout reset tx ring %d\n",
1698 ring->qid));
1699 }
1700 #endif
1701
1702 iwh_mac_access_exit(sc);
1703
1704 /*
1705 * by pass, if it's quiesce
1706 */
1707 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
1708 for (i = 0; i < ring->count; i++) {
1709 data = &ring->data[i];
1710 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1711 }
1712 }
1713
1714 ring->queued = 0;
1715 ring->cur = 0;
1716 ring->desc_cur = 0;
1717 }
1718
1719 static void
iwh_free_tx_ring(iwh_tx_ring_t * ring)1720 iwh_free_tx_ring(iwh_tx_ring_t *ring)
1721 {
1722 int i;
1723
1724 if (ring->dma_desc.dma_hdl != NULL) {
1725 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1726 }
1727 iwh_free_dma_mem(&ring->dma_desc);
1728
1729 if (ring->dma_cmd.dma_hdl != NULL) {
1730 IWH_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1731 }
1732 iwh_free_dma_mem(&ring->dma_cmd);
1733
1734 if (ring->data != NULL) {
1735 for (i = 0; i < ring->count; i++) {
1736 if (ring->data[i].dma_data.dma_hdl) {
1737 IWH_DMA_SYNC(ring->data[i].dma_data,
1738 DDI_DMA_SYNC_FORDEV);
1739 }
1740 iwh_free_dma_mem(&ring->data[i].dma_data);
1741 }
1742 kmem_free(ring->data, ring->count * sizeof (iwh_tx_data_t));
1743 }
1744 }
1745
1746 /*
1747 * initialize TX and RX ring
1748 */
1749 static int
iwh_ring_init(iwh_sc_t * sc)1750 iwh_ring_init(iwh_sc_t *sc)
1751 {
1752 int i, err = DDI_FAILURE;
1753
1754 for (i = 0; i < IWH_NUM_QUEUES; i++) {
1755 if (IWH_CMD_QUEUE_NUM == i) {
1756 continue;
1757 }
1758
1759 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1760 i);
1761 if (err != DDI_SUCCESS) {
1762 goto fail;
1763 }
1764 }
1765
1766 /*
1767 * initialize command queue
1768 */
1769 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[IWH_CMD_QUEUE_NUM],
1770 TFD_CMD_SLOTS, IWH_CMD_QUEUE_NUM);
1771 if (err != DDI_SUCCESS) {
1772 goto fail;
1773 }
1774
1775 err = iwh_alloc_rx_ring(sc);
1776 if (err != DDI_SUCCESS) {
1777 goto fail;
1778 }
1779
1780 fail:
1781 return (err);
1782 }
1783
1784 static void
iwh_ring_free(iwh_sc_t * sc)1785 iwh_ring_free(iwh_sc_t *sc)
1786 {
1787 int i = IWH_NUM_QUEUES;
1788
1789 iwh_free_rx_ring(sc);
1790 while (--i >= 0) {
1791 iwh_free_tx_ring(&sc->sc_txq[i]);
1792 }
1793 }
1794
1795 /* ARGSUSED */
1796 static ieee80211_node_t *
iwh_node_alloc(ieee80211com_t * ic)1797 iwh_node_alloc(ieee80211com_t *ic)
1798 {
1799 iwh_amrr_t *amrr;
1800
1801 amrr = kmem_zalloc(sizeof (iwh_amrr_t), KM_SLEEP);
1802 if (NULL == amrr) {
1803 cmn_err(CE_WARN, "iwh_node_alloc(): "
1804 "failed to allocate memory for amrr structure\n");
1805 return (NULL);
1806 }
1807
1808 iwh_amrr_init(amrr);
1809
1810 return (&amrr->in);
1811 }
1812
1813 static void
iwh_node_free(ieee80211_node_t * in)1814 iwh_node_free(ieee80211_node_t *in)
1815 {
1816 ieee80211com_t *ic;
1817
1818 if ((NULL == in) ||
1819 (NULL == in->in_ic)) {
1820 cmn_err(CE_WARN, "iwh_node_free() "
1821 "Got a NULL point from Net80211 module\n");
1822 return;
1823 }
1824 ic = in->in_ic;
1825
1826 if (ic->ic_node_cleanup != NULL) {
1827 ic->ic_node_cleanup(in);
1828 }
1829
1830 if (in->in_wpa_ie != NULL) {
1831 ieee80211_free(in->in_wpa_ie);
1832 }
1833
1834 if (in->in_wme_ie != NULL) {
1835 ieee80211_free(in->in_wme_ie);
1836 }
1837
1838 if (in->in_htcap_ie != NULL) {
1839 ieee80211_free(in->in_htcap_ie);
1840 }
1841
1842 kmem_free(in, sizeof (iwh_amrr_t));
1843 }
1844
1845 /*
1846 * change station's state. this function will be invoked by 80211 module
1847 * when need to change staton's state.
1848 */
1849 static int
iwh_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1850 iwh_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1851 {
1852 iwh_sc_t *sc;
1853 ieee80211_node_t *in;
1854 enum ieee80211_state ostate;
1855 iwh_add_sta_t node;
1856 iwh_amrr_t *amrr;
1857 uint8_t r;
1858 int i, err = IWH_FAIL;
1859
1860 if (NULL == ic) {
1861 return (err);
1862 }
1863 sc = (iwh_sc_t *)ic;
1864 in = ic->ic_bss;
1865 ostate = ic->ic_state;
1866
1867 mutex_enter(&sc->sc_glock);
1868
1869 switch (nstate) {
1870 case IEEE80211_S_SCAN:
1871 switch (ostate) {
1872 case IEEE80211_S_INIT:
1873 atomic_or_32(&sc->sc_flags, IWH_F_SCANNING);
1874 iwh_set_led(sc, 2, 10, 2);
1875
1876 /*
1877 * clear association to receive beacons from
1878 * all BSS'es
1879 */
1880 sc->sc_config.assoc_id = 0;
1881 sc->sc_config.filter_flags &=
1882 ~LE_32(RXON_FILTER_ASSOC_MSK);
1883
1884 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1885 "config chan %d "
1886 "flags %x filter_flags %x\n",
1887 LE_16(sc->sc_config.chan),
1888 LE_32(sc->sc_config.flags),
1889 LE_32(sc->sc_config.filter_flags)));
1890
1891 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
1892 sizeof (iwh_rxon_cmd_t), 1);
1893 if (err != IWH_SUCCESS) {
1894 cmn_err(CE_WARN, "iwh_newstate(): "
1895 "could not clear association\n");
1896 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1897 mutex_exit(&sc->sc_glock);
1898 return (err);
1899 }
1900
1901 /*
1902 * add broadcast node to send probe request
1903 */
1904 (void) memset(&node, 0, sizeof (node));
1905 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1906 node.sta.sta_id = IWH_BROADCAST_ID;
1907 err = iwh_cmd(sc, REPLY_ADD_STA, &node,
1908 sizeof (node), 1);
1909 if (err != IWH_SUCCESS) {
1910 cmn_err(CE_WARN, "iwh_newstate(): "
1911 "could not add broadcast node\n");
1912 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1913 mutex_exit(&sc->sc_glock);
1914 return (err);
1915 }
1916 break;
1917 case IEEE80211_S_SCAN:
1918 mutex_exit(&sc->sc_glock);
1919 /* step to next channel before actual FW scan */
1920 err = sc->sc_newstate(ic, nstate, arg);
1921 mutex_enter(&sc->sc_glock);
1922 if ((err != 0) || ((err = iwh_scan(sc)) != 0)) {
1923 cmn_err(CE_WARN, "iwh_newstate(): "
1924 "could not initiate scan\n");
1925 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1926 ieee80211_cancel_scan(ic);
1927 }
1928 mutex_exit(&sc->sc_glock);
1929 return (err);
1930 default:
1931 break;
1932 }
1933 sc->sc_clk = 0;
1934 break;
1935
1936 case IEEE80211_S_AUTH:
1937 if (ostate == IEEE80211_S_SCAN) {
1938 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1939 }
1940
1941 /*
1942 * reset state to handle reassociations correctly
1943 */
1944 sc->sc_config.assoc_id = 0;
1945 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1946
1947 /*
1948 * before sending authentication and association request frame,
1949 * we need do something in the hardware, such as setting the
1950 * channel same to the target AP...
1951 */
1952 if ((err = iwh_hw_set_before_auth(sc)) != 0) {
1953 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1954 "could not send authentication request\n"));
1955 mutex_exit(&sc->sc_glock);
1956 return (err);
1957 }
1958 break;
1959
1960 case IEEE80211_S_RUN:
1961 if (ostate == IEEE80211_S_SCAN) {
1962 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1963 }
1964
1965 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1966 /*
1967 * let LED blink when monitoring
1968 */
1969 iwh_set_led(sc, 2, 10, 10);
1970 break;
1971 }
1972
1973 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1974 "associated.\n"));
1975
1976 err = iwh_run_state_config(sc);
1977 if (err != IWH_SUCCESS) {
1978 cmn_err(CE_WARN, "iwh_newstate(): "
1979 "failed to set up association\n");
1980 mutex_exit(&sc->sc_glock);
1981 return (err);
1982 }
1983
1984 /*
1985 * start automatic rate control
1986 */
1987 if ((in->in_flags & IEEE80211_NODE_HT) &&
1988 (sc->sc_ht_conf.ht_support) &&
1989 (in->in_htrates.rs_nrates > 0) &&
1990 (in->in_htrates.rs_nrates <= IEEE80211_HTRATE_MAXSIZE)) {
1991 amrr = (iwh_amrr_t *)in;
1992
1993 for (i = in->in_htrates.rs_nrates - 1; i > 0; i--) {
1994
1995 r = in->in_htrates.rs_rates[i] &
1996 IEEE80211_RATE_VAL;
1997 if ((r != 0) && (r <= 0xd) &&
1998 (sc->sc_ht_conf.tx_support_mcs[r/8] &
1999 (1 << (r%8)))) {
2000 amrr->ht_mcs_idx = r;
2001 atomic_or_32(&sc->sc_flags,
2002 IWH_F_RATE_AUTO_CTL);
2003 break;
2004 }
2005 }
2006 } else {
2007 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
2008 atomic_or_32(&sc->sc_flags,
2009 IWH_F_RATE_AUTO_CTL);
2010
2011 /*
2012 * set rate to some reasonable initial value
2013 */
2014 i = in->in_rates.ir_nrates - 1;
2015 while (i > 0 && IEEE80211_RATE(i) > 72) {
2016 i--;
2017 }
2018 in->in_txrate = i;
2019
2020 } else {
2021 atomic_and_32(&sc->sc_flags,
2022 ~IWH_F_RATE_AUTO_CTL);
2023 }
2024 }
2025
2026 /*
2027 * set LED on after associated
2028 */
2029 iwh_set_led(sc, 2, 0, 1);
2030 break;
2031
2032 case IEEE80211_S_INIT:
2033 if (ostate == IEEE80211_S_SCAN) {
2034 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2035 }
2036 /*
2037 * set LED off after init
2038 */
2039 iwh_set_led(sc, 2, 1, 0);
2040 break;
2041
2042 case IEEE80211_S_ASSOC:
2043 if (ostate == IEEE80211_S_SCAN) {
2044 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2045 }
2046 break;
2047 }
2048
2049 mutex_exit(&sc->sc_glock);
2050
2051 return (sc->sc_newstate(ic, nstate, arg));
2052 }
2053
2054 /*
2055 * exclusive access to mac begin.
2056 */
2057 static void
iwh_mac_access_enter(iwh_sc_t * sc)2058 iwh_mac_access_enter(iwh_sc_t *sc)
2059 {
2060 uint32_t tmp;
2061 int n;
2062
2063 tmp = IWH_READ(sc, CSR_GP_CNTRL);
2064 IWH_WRITE(sc, CSR_GP_CNTRL,
2065 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2066
2067 /*
2068 * wait until we succeed
2069 */
2070 for (n = 0; n < 1000; n++) {
2071 if ((IWH_READ(sc, CSR_GP_CNTRL) &
2072 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2073 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
2074 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
2075 break;
2076 }
2077 DELAY(10);
2078 }
2079
2080 #ifdef DEBUG
2081 if (1000 == n) {
2082 IWH_DBG((IWH_DEBUG_PIO, "iwh_mac_access_enter(): "
2083 "could not lock memory\n"));
2084 }
2085 #endif
2086 }
2087
2088 /*
2089 * exclusive access to mac end.
2090 */
2091 static void
iwh_mac_access_exit(iwh_sc_t * sc)2092 iwh_mac_access_exit(iwh_sc_t *sc)
2093 {
2094 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2095 IWH_WRITE(sc, CSR_GP_CNTRL,
2096 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2097 }
2098
2099 /*
2100 * this function defined here for future use.
2101 * static uint32_t
2102 * iwh_mem_read(iwh_sc_t *sc, uint32_t addr)
2103 * {
2104 * IWH_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2105 * return (IWH_READ(sc, HBUS_TARG_MEM_RDAT));
2106 * }
2107 */
2108
2109 /*
2110 * write mac memory
2111 */
2112 static void
iwh_mem_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2113 iwh_mem_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2114 {
2115 IWH_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2116 IWH_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2117 }
2118
2119 /*
2120 * read mac register
2121 */
2122 static uint32_t
iwh_reg_read(iwh_sc_t * sc,uint32_t addr)2123 iwh_reg_read(iwh_sc_t *sc, uint32_t addr)
2124 {
2125 IWH_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2126 return (IWH_READ(sc, HBUS_TARG_PRPH_RDAT));
2127 }
2128
2129 /*
2130 * write mac register
2131 */
2132 static void
iwh_reg_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2133 iwh_reg_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2134 {
2135 IWH_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2136 IWH_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2137 }
2138
2139
2140 /*
2141 * steps of loading ucode:
2142 * load init ucode=>init alive=>calibrate=>
2143 * receive calibration result=>reinitialize NIC=>
2144 * load runtime ucode=>runtime alive=>
2145 * send calibration result=>running.
2146 */
2147 static int
iwh_load_init_firmware(iwh_sc_t * sc)2148 iwh_load_init_firmware(iwh_sc_t *sc)
2149 {
2150 int err = IWH_FAIL;
2151 clock_t clk;
2152
2153 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2154
2155 /*
2156 * load init_text section of uCode to hardware
2157 */
2158 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2159 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2160 if (err != IWH_SUCCESS) {
2161 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2162 "failed to write init uCode.\n");
2163 return (err);
2164 }
2165
2166 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2167
2168 /*
2169 * wait loading init_text until completed or timeout
2170 */
2171 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2172 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2173 break;
2174 }
2175 }
2176
2177 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2178 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2179 "timeout waiting for init uCode load.\n");
2180 return (IWH_FAIL);
2181 }
2182
2183 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2184
2185 /*
2186 * load init_data section of uCode to hardware
2187 */
2188 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2189 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2190 if (err != IWH_SUCCESS) {
2191 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2192 "failed to write init_data uCode.\n");
2193 return (err);
2194 }
2195
2196 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2197
2198 /*
2199 * wait loading init_data until completed or timeout
2200 */
2201 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2202 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2203 break;
2204 }
2205 }
2206
2207 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2208 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2209 "timeout waiting for init_data uCode load.\n");
2210 return (IWH_FAIL);
2211 }
2212
2213 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2214
2215 return (err);
2216 }
2217
2218 static int
iwh_load_run_firmware(iwh_sc_t * sc)2219 iwh_load_run_firmware(iwh_sc_t *sc)
2220 {
2221 int err = IWH_FAIL;
2222 clock_t clk;
2223
2224 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2225
2226 /*
2227 * load init_text section of uCode to hardware
2228 */
2229 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2230 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2231 if (err != IWH_SUCCESS) {
2232 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2233 "failed to write run uCode.\n");
2234 return (err);
2235 }
2236
2237 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2238
2239 /*
2240 * wait loading run_text until completed or timeout
2241 */
2242 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2243 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2244 break;
2245 }
2246 }
2247
2248 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2249 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2250 "timeout waiting for run uCode load.\n");
2251 return (IWH_FAIL);
2252 }
2253
2254 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2255
2256 /*
2257 * load run_data section of uCode to hardware
2258 */
2259 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2260 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2261 if (err != IWH_SUCCESS) {
2262 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2263 "failed to write run_data uCode.\n");
2264 return (err);
2265 }
2266
2267 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2268
2269 /*
2270 * wait loading run_data until completed or timeout
2271 */
2272 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2273 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2274 break;
2275 }
2276 }
2277
2278 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2279 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2280 "timeout waiting for run_data uCode load.\n");
2281 return (IWH_FAIL);
2282 }
2283
2284 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2285
2286 return (err);
2287 }
2288
2289 /*
2290 * this function will be invoked to receive phy information
2291 * when a frame is received.
2292 */
2293 static void
iwh_rx_phy_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2294 iwh_rx_phy_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2295 {
2296
2297 sc->sc_rx_phy_res.flag = 1;
2298
2299 bcopy((uint8_t *)(desc + 1), sc->sc_rx_phy_res.buf,
2300 sizeof (iwh_rx_phy_res_t));
2301 }
2302
2303 /*
2304 * this function will be invoked to receive body of frame when
2305 * a frame is received.
2306 */
2307 static void
iwh_rx_mpdu_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2308 iwh_rx_mpdu_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2309 {
2310 ieee80211com_t *ic = &sc->sc_ic;
2311 #ifdef DEBUG
2312 iwh_rx_ring_t *ring = &sc->sc_rxq;
2313 #endif
2314 struct ieee80211_frame *wh;
2315 struct iwh_rx_non_cfg_phy *phyinfo;
2316 struct iwh_rx_mpdu_body_size *mpdu_size;
2317 mblk_t *mp;
2318 int16_t t;
2319 uint16_t len, rssi, agc;
2320 uint32_t temp, crc, *tail;
2321 uint32_t arssi, brssi, crssi, mrssi;
2322 iwh_rx_phy_res_t *stat;
2323 ieee80211_node_t *in;
2324
2325 /*
2326 * assuming not 11n here. cope with 11n in phase-II
2327 */
2328 mpdu_size = (struct iwh_rx_mpdu_body_size *)(desc + 1);
2329 stat = (iwh_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2330 if (stat->cfg_phy_cnt > 20) {
2331 return;
2332 }
2333
2334 phyinfo = (struct iwh_rx_non_cfg_phy *)stat->non_cfg_phy;
2335 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_AGC_IDX]);
2336 agc = (temp & IWH_OFDM_AGC_MSK) >> IWH_OFDM_AGC_BIT_POS;
2337
2338 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_AB_IDX]);
2339 arssi = (temp & IWH_OFDM_RSSI_A_MSK) >> IWH_OFDM_RSSI_A_BIT_POS;
2340 brssi = (temp & IWH_OFDM_RSSI_B_MSK) >> IWH_OFDM_RSSI_B_BIT_POS;
2341
2342 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_C_IDX]);
2343 crssi = (temp & IWH_OFDM_RSSI_C_MSK) >> IWH_OFDM_RSSI_C_BIT_POS;
2344
2345 mrssi = MAX(arssi, brssi);
2346 mrssi = MAX(mrssi, crssi);
2347
2348 t = mrssi - agc - IWH_RSSI_OFFSET;
2349 /*
2350 * convert dBm to percentage
2351 */
2352 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2353 / (75 * 75);
2354 if (rssi > 100) {
2355 rssi = 100;
2356 }
2357 if (rssi < 1) {
2358 rssi = 1;
2359 }
2360
2361 /*
2362 * size of frame, not include FCS
2363 */
2364 len = LE_16(mpdu_size->byte_count);
2365 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2366 sizeof (struct iwh_rx_mpdu_body_size) + len);
2367 bcopy(tail, &crc, 4);
2368
2369 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2370 "rx intr: idx=%d phy_len=%x len=%d "
2371 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2372 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2373 len, stat->rate.r.s.rate, stat->channel,
2374 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2375 stat->cfg_phy_cnt, LE_32(crc)));
2376
2377 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2378 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2379 "rx frame oversize\n"));
2380 return;
2381 }
2382
2383 /*
2384 * discard Rx frames with bad CRC
2385 */
2386 if ((LE_32(crc) &
2387 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2388 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2389 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2390 "rx crc error tail: %x\n",
2391 LE_32(crc)));
2392 sc->sc_rx_err++;
2393 return;
2394 }
2395
2396 wh = (struct ieee80211_frame *)
2397 ((uint8_t *)(desc + 1)+ sizeof (struct iwh_rx_mpdu_body_size));
2398
2399 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2400 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2401 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2402 "rx : association id = %x\n",
2403 sc->sc_assoc_id));
2404 }
2405
2406 #ifdef DEBUG
2407 if (iwh_dbg_flags & IWH_DEBUG_RX) {
2408 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2409 }
2410 #endif
2411
2412 in = ieee80211_find_rxnode(ic, wh);
2413 mp = allocb(len, BPRI_MED);
2414 if (mp) {
2415 bcopy(wh, mp->b_wptr, len);
2416 mp->b_wptr += len;
2417
2418 /*
2419 * send the frame to the 802.11 layer
2420 */
2421 (void) ieee80211_input(ic, mp, in, rssi, 0);
2422 } else {
2423 sc->sc_rx_nobuf++;
2424 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2425 "alloc rx buf failed\n"));
2426 }
2427
2428 /*
2429 * release node reference
2430 */
2431 ieee80211_free_node(in);
2432 }
2433
2434 /*
2435 * process correlative affairs after a frame is sent.
2436 */
2437 static void
iwh_tx_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2438 iwh_tx_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2439 {
2440 ieee80211com_t *ic = &sc->sc_ic;
2441 iwh_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2442 iwh_tx_stat_t *stat = (iwh_tx_stat_t *)(desc + 1);
2443 iwh_amrr_t *amrr;
2444
2445 if (NULL == ic->ic_bss) {
2446 return;
2447 }
2448
2449 amrr = (iwh_amrr_t *)ic->ic_bss;
2450
2451 amrr->txcnt++;
2452 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_tx_intr(): "
2453 "tx: %d cnt\n", amrr->txcnt));
2454
2455 if (stat->ntries > 0) {
2456 amrr->retrycnt++;
2457 sc->sc_tx_retries++;
2458 IWH_DBG((IWH_DEBUG_TX, "iwh_tx_intr(): "
2459 "tx: %d retries\n",
2460 sc->sc_tx_retries));
2461 }
2462
2463 mutex_enter(&sc->sc_mt_lock);
2464 sc->sc_tx_timer = 0;
2465 mutex_exit(&sc->sc_mt_lock);
2466
2467 mutex_enter(&sc->sc_tx_lock);
2468
2469 ring->queued--;
2470 if (ring->queued < 0) {
2471 ring->queued = 0;
2472 }
2473
2474 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2475 sc->sc_need_reschedule = 0;
2476 mutex_exit(&sc->sc_tx_lock);
2477 mac_tx_update(ic->ic_mach);
2478 mutex_enter(&sc->sc_tx_lock);
2479 }
2480
2481 mutex_exit(&sc->sc_tx_lock);
2482 }
2483
2484 /*
2485 * inform a given command has been executed
2486 */
2487 static void
iwh_cmd_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2488 iwh_cmd_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2489 {
2490 if ((desc->hdr.qid & 7) != 4) {
2491 return;
2492 }
2493
2494 if (sc->sc_cmd_accum > 0) {
2495 sc->sc_cmd_accum--;
2496 return;
2497 }
2498
2499 mutex_enter(&sc->sc_glock);
2500
2501 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2502
2503 cv_signal(&sc->sc_cmd_cv);
2504
2505 mutex_exit(&sc->sc_glock);
2506
2507 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd_intr(): "
2508 "qid=%x idx=%d flags=%x type=0x%x\n",
2509 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2510 desc->hdr.type));
2511 }
2512
2513 /*
2514 * this function will be invoked when alive notification occur.
2515 */
2516 static void
iwh_ucode_alive(iwh_sc_t * sc,iwh_rx_desc_t * desc)2517 iwh_ucode_alive(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2518 {
2519 uint32_t rv;
2520 struct iwh_calib_cfg_cmd cmd;
2521 struct iwh_alive_resp *ar =
2522 (struct iwh_alive_resp *)(desc + 1);
2523 struct iwh_calib_results *res_p = &sc->sc_calib_results;
2524
2525 /*
2526 * the microcontroller is ready
2527 */
2528 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2529 "microcode alive notification minor: %x major: %x type: "
2530 "%x subtype: %x\n",
2531 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2532
2533 #ifdef DEBUG
2534 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2535 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2536 "microcontroller initialization failed\n"));
2537 }
2538 #endif
2539
2540 /*
2541 * determine if init alive or runtime alive.
2542 */
2543 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2544 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2545 "initialization alive received.\n"));
2546
2547 bcopy(ar, &sc->sc_card_alive_init,
2548 sizeof (struct iwh_init_alive_resp));
2549
2550 /*
2551 * necessary configuration to NIC
2552 */
2553 mutex_enter(&sc->sc_glock);
2554
2555 rv = iwh_alive_common(sc);
2556 if (rv != IWH_SUCCESS) {
2557 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2558 "common alive process failed in init alive.\n");
2559 mutex_exit(&sc->sc_glock);
2560 return;
2561 }
2562
2563 (void) memset(&cmd, 0, sizeof (cmd));
2564
2565 cmd.ucd_calib_cfg.once.is_enable = IWH_CALIB_INIT_CFG_ALL;
2566 cmd.ucd_calib_cfg.once.start = IWH_CALIB_INIT_CFG_ALL;
2567 cmd.ucd_calib_cfg.once.send_res = IWH_CALIB_INIT_CFG_ALL;
2568 cmd.ucd_calib_cfg.flags = IWH_CALIB_INIT_CFG_ALL;
2569
2570 /*
2571 * require ucode execute calibration
2572 */
2573 rv = iwh_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2574 if (rv != IWH_SUCCESS) {
2575 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2576 "failed to send calibration configure command.\n");
2577 mutex_exit(&sc->sc_glock);
2578 return;
2579 }
2580
2581 mutex_exit(&sc->sc_glock);
2582
2583 } else { /* runtime alive */
2584
2585 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2586 "runtime alive received.\n"));
2587
2588 bcopy(ar, &sc->sc_card_alive_run,
2589 sizeof (struct iwh_alive_resp));
2590
2591 mutex_enter(&sc->sc_glock);
2592
2593 /*
2594 * necessary configuration to NIC
2595 */
2596 rv = iwh_alive_common(sc);
2597 if (rv != IWH_SUCCESS) {
2598 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2599 "common alive process failed in run alive.\n");
2600 mutex_exit(&sc->sc_glock);
2601 return;
2602 }
2603
2604 /*
2605 * send the result of local oscilator calibration to uCode.
2606 */
2607 if (res_p->lo_res != NULL) {
2608 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2609 res_p->lo_res, res_p->lo_res_len, 1);
2610 if (rv != IWH_SUCCESS) {
2611 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2612 "failed to send local"
2613 "oscilator calibration command.\n");
2614 mutex_exit(&sc->sc_glock);
2615 return;
2616 }
2617
2618 DELAY(1000);
2619 }
2620
2621 /*
2622 * send the result of TX IQ calibration to uCode.
2623 */
2624 if (res_p->tx_iq_res != NULL) {
2625 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2626 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2627 if (rv != IWH_SUCCESS) {
2628 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2629 "failed to send TX IQ"
2630 "calibration command.\n");
2631 mutex_exit(&sc->sc_glock);
2632 return;
2633 }
2634
2635 DELAY(1000);
2636 }
2637
2638 /*
2639 * sned the result of TX IQ perd calibration to uCode.
2640 */
2641 if (res_p->tx_iq_perd_res != NULL) {
2642 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2643 res_p->tx_iq_perd_res,
2644 res_p->tx_iq_perd_res_len, 1);
2645 if (rv != IWH_SUCCESS) {
2646 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2647 "failed to send TX IQ perd"
2648 "calibration command.\n");
2649 mutex_exit(&sc->sc_glock);
2650 return;
2651 }
2652
2653 DELAY(1000);
2654 }
2655
2656 /*
2657 * send the result of DC calibration to uCode.
2658 */
2659 if (res_p->dc_res != NULL) {
2660 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2661 res_p->dc_res,
2662 res_p->dc_res_len, 1);
2663 if (rv != IWH_SUCCESS) {
2664 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2665 "failed to send DC"
2666 "calibration command.\n");
2667 mutex_exit(&sc->sc_glock);
2668 return;
2669 }
2670
2671 DELAY(1000);
2672 }
2673
2674 /*
2675 * send the result of BASE BAND calibration to uCode.
2676 */
2677 if (res_p->base_band_res != NULL) {
2678 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2679 res_p->base_band_res,
2680 res_p->base_band_res_len, 1);
2681 if (rv != IWH_SUCCESS) {
2682 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2683 "failed to send BASE BAND"
2684 "calibration command.\n");
2685 mutex_exit(&sc->sc_glock);
2686 return;
2687 }
2688
2689 DELAY(1000);
2690 }
2691
2692 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2693 cv_signal(&sc->sc_ucode_cv);
2694
2695 mutex_exit(&sc->sc_glock);
2696 }
2697
2698 }
2699
2700 /*
2701 * deal with receiving frames, command response
2702 * and all notifications from ucode.
2703 */
2704 /* ARGSUSED */
2705 static uint_t
iwh_rx_softintr(caddr_t arg,caddr_t unused)2706 iwh_rx_softintr(caddr_t arg, caddr_t unused)
2707 {
2708 iwh_sc_t *sc;
2709 ieee80211com_t *ic;
2710 iwh_rx_desc_t *desc;
2711 iwh_rx_data_t *data;
2712 uint32_t index;
2713
2714 if (NULL == arg) {
2715 return (DDI_INTR_UNCLAIMED);
2716 }
2717 sc = (iwh_sc_t *)arg;
2718 ic = &sc->sc_ic;
2719
2720 /*
2721 * firmware has moved the index of the rx queue, driver get it,
2722 * and deal with it.
2723 */
2724 index = (sc->sc_shared->val0) & 0xfff;
2725
2726 while (sc->sc_rxq.cur != index) {
2727 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2728 desc = (iwh_rx_desc_t *)data->dma_data.mem_va;
2729
2730 IWH_DBG((IWH_DEBUG_INTR, "iwh_rx_softintr(): "
2731 "rx notification index = %d"
2732 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2733 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2734 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2735
2736 /*
2737 * a command other than a tx need to be replied
2738 */
2739 if (!(desc->hdr.qid & 0x80) &&
2740 (desc->hdr.type != REPLY_SCAN_CMD) &&
2741 (desc->hdr.type != REPLY_TX)) {
2742 iwh_cmd_intr(sc, desc);
2743 }
2744
2745 switch (desc->hdr.type) {
2746 case REPLY_RX_PHY_CMD:
2747 iwh_rx_phy_intr(sc, desc);
2748 break;
2749
2750 case REPLY_RX_MPDU_CMD:
2751 iwh_rx_mpdu_intr(sc, desc);
2752 break;
2753
2754 case REPLY_TX:
2755 iwh_tx_intr(sc, desc);
2756 break;
2757
2758 case REPLY_ALIVE:
2759 iwh_ucode_alive(sc, desc);
2760 break;
2761
2762 case CARD_STATE_NOTIFICATION:
2763 {
2764 uint32_t *status = (uint32_t *)(desc + 1);
2765
2766 IWH_DBG((IWH_DEBUG_RADIO, "iwh_rx_softintr(): "
2767 "state changed to %x\n",
2768 LE_32(*status)));
2769
2770 if (LE_32(*status) & 1) {
2771 /*
2772 * the radio button has to be pushed(OFF). It
2773 * is considered as a hw error, the
2774 * iwh_thread() tries to recover it after the
2775 * button is pushed again(ON)
2776 */
2777 cmn_err(CE_NOTE, "iwh_rx_softintr(): "
2778 "radio transmitter is off\n");
2779 sc->sc_ostate = sc->sc_ic.ic_state;
2780 ieee80211_new_state(&sc->sc_ic,
2781 IEEE80211_S_INIT, -1);
2782 atomic_or_32(&sc->sc_flags,
2783 (IWH_F_HW_ERR_RECOVER | IWH_F_RADIO_OFF));
2784 }
2785
2786 break;
2787 }
2788
2789 case SCAN_START_NOTIFICATION:
2790 {
2791 iwh_start_scan_t *scan =
2792 (iwh_start_scan_t *)(desc + 1);
2793
2794 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2795 "scanning channel %d status %x\n",
2796 scan->chan, LE_32(scan->status)));
2797
2798 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2799 break;
2800 }
2801
2802 case SCAN_COMPLETE_NOTIFICATION:
2803 {
2804 #ifdef DEBUG
2805 iwh_stop_scan_t *scan =
2806 (iwh_stop_scan_t *)(desc + 1);
2807
2808 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2809 "completed channel %d (burst of %d) status %02x\n",
2810 scan->chan, scan->nchan, scan->status));
2811 #endif
2812
2813 sc->sc_scan_pending++;
2814 break;
2815 }
2816
2817 case STATISTICS_NOTIFICATION:
2818 {
2819 /*
2820 * handle statistics notification
2821 */
2822 break;
2823 }
2824
2825 case CALIBRATION_RES_NOTIFICATION:
2826 iwh_save_calib_result(sc, desc);
2827 break;
2828
2829 case CALIBRATION_COMPLETE_NOTIFICATION:
2830 mutex_enter(&sc->sc_glock);
2831 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2832 cv_signal(&sc->sc_ucode_cv);
2833 mutex_exit(&sc->sc_glock);
2834 break;
2835
2836 case MISSED_BEACONS_NOTIFICATION:
2837 /* handle beacon miss by software mechanism */
2838 break;
2839 }
2840
2841 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2842 }
2843
2844 /*
2845 * driver dealt with what received in rx queue and tell the information
2846 * to the firmware.
2847 */
2848 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2849 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2850
2851 /*
2852 * re-enable interrupts
2853 */
2854 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2855
2856 return (DDI_INTR_CLAIMED);
2857 }
2858
2859 /*
2860 * the handle of interrupt
2861 */
2862 /* ARGSUSED */
2863 static uint_t
iwh_intr(caddr_t arg,caddr_t unused)2864 iwh_intr(caddr_t arg, caddr_t unused)
2865 {
2866 iwh_sc_t *sc;
2867 uint32_t r, rfh;
2868
2869 if (NULL == arg) {
2870 return (DDI_INTR_UNCLAIMED);
2871 }
2872 sc = (iwh_sc_t *)arg;
2873
2874 r = IWH_READ(sc, CSR_INT);
2875 if (0 == r || 0xffffffff == r) {
2876 return (DDI_INTR_UNCLAIMED);
2877 }
2878
2879 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2880 "interrupt reg %x\n", r));
2881
2882 rfh = IWH_READ(sc, CSR_FH_INT_STATUS);
2883
2884 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2885 "FH interrupt reg %x\n", rfh));
2886
2887 /*
2888 * disable interrupts
2889 */
2890 IWH_WRITE(sc, CSR_INT_MASK, 0);
2891
2892 /*
2893 * ack interrupts
2894 */
2895 IWH_WRITE(sc, CSR_INT, r);
2896 IWH_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2897
2898 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2899 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2900 "fatal firmware error\n"));
2901 iwh_stop(sc);
2902 sc->sc_ostate = sc->sc_ic.ic_state;
2903
2904 /*
2905 * notify upper layer
2906 */
2907 if (!IWH_CHK_FAST_RECOVER(sc)) {
2908 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2909 }
2910
2911 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
2912 return (DDI_INTR_CLAIMED);
2913 }
2914
2915 if (r & BIT_INT_RF_KILL) {
2916 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2917 if (tmp & (1 << 27)) {
2918 cmn_err(CE_NOTE, "RF switch: radio on\n");
2919 }
2920 }
2921
2922 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2923 (rfh & FH_INT_RX_MASK)) {
2924 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2925 return (DDI_INTR_CLAIMED);
2926 }
2927
2928 if (r & BIT_INT_FH_TX) {
2929 mutex_enter(&sc->sc_glock);
2930 atomic_or_32(&sc->sc_flags, IWH_F_PUT_SEG);
2931 cv_signal(&sc->sc_put_seg_cv);
2932 mutex_exit(&sc->sc_glock);
2933 }
2934
2935 #ifdef DEBUG
2936 if (r & BIT_INT_ALIVE) {
2937 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2938 "firmware initialized.\n"));
2939 }
2940 #endif
2941
2942 /*
2943 * re-enable interrupts
2944 */
2945 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2946
2947 return (DDI_INTR_CLAIMED);
2948 }
2949
2950 static uint8_t
iwh_rate_to_plcp(int rate)2951 iwh_rate_to_plcp(int rate)
2952 {
2953 uint8_t ret;
2954
2955 switch (rate) {
2956 /*
2957 * CCK rates
2958 */
2959 case 2:
2960 ret = 0xa;
2961 break;
2962
2963 case 4:
2964 ret = 0x14;
2965 break;
2966
2967 case 11:
2968 ret = 0x37;
2969 break;
2970
2971 case 22:
2972 ret = 0x6e;
2973 break;
2974
2975 /*
2976 * OFDM rates
2977 */
2978 case 12:
2979 ret = 0xd;
2980 break;
2981
2982 case 18:
2983 ret = 0xf;
2984 break;
2985
2986 case 24:
2987 ret = 0x5;
2988 break;
2989
2990 case 36:
2991 ret = 0x7;
2992 break;
2993
2994 case 48:
2995 ret = 0x9;
2996 break;
2997
2998 case 72:
2999 ret = 0xb;
3000 break;
3001
3002 case 96:
3003 ret = 0x1;
3004 break;
3005
3006 case 108:
3007 ret = 0x3;
3008 break;
3009
3010 default:
3011 ret = 0;
3012 break;
3013 }
3014
3015 return (ret);
3016 }
3017
3018 /*
3019 * invoked by GLD send frames
3020 */
3021 static mblk_t *
iwh_m_tx(void * arg,mblk_t * mp)3022 iwh_m_tx(void *arg, mblk_t *mp)
3023 {
3024 iwh_sc_t *sc;
3025 ieee80211com_t *ic;
3026 mblk_t *next;
3027
3028 if (NULL == arg) {
3029 return (NULL);
3030 }
3031 sc = (iwh_sc_t *)arg;
3032 ic = &sc->sc_ic;
3033
3034 if (sc->sc_flags & IWH_F_SUSPEND) {
3035 freemsgchain(mp);
3036 return (NULL);
3037 }
3038
3039 if (ic->ic_state != IEEE80211_S_RUN) {
3040 freemsgchain(mp);
3041 return (NULL);
3042 }
3043
3044 if ((sc->sc_flags & IWH_F_HW_ERR_RECOVER) &&
3045 IWH_CHK_FAST_RECOVER(sc)) {
3046 IWH_DBG((IWH_DEBUG_FW, "iwh_m_tx(): "
3047 "hold queue\n"));
3048 return (mp);
3049 }
3050
3051 while (mp != NULL) {
3052 next = mp->b_next;
3053 mp->b_next = NULL;
3054 if (iwh_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
3055 mp->b_next = next;
3056 break;
3057 }
3058 mp = next;
3059 }
3060
3061 return (mp);
3062 }
3063
3064 /*
3065 * send frames
3066 */
3067 static int
iwh_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)3068 iwh_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3069 {
3070 iwh_sc_t *sc;
3071 iwh_tx_ring_t *ring;
3072 iwh_tx_desc_t *desc;
3073 iwh_tx_data_t *data;
3074 iwh_tx_data_t *desc_data;
3075 iwh_cmd_t *cmd;
3076 iwh_tx_cmd_t *tx;
3077 ieee80211_node_t *in;
3078 struct ieee80211_frame *wh, *mp_wh;
3079 struct ieee80211_key *k = NULL;
3080 mblk_t *m, *m0;
3081 int hdrlen, len, len0, mblen, off, err = IWH_SUCCESS;
3082 uint16_t masks = 0;
3083 uint32_t rate, s_id = 0;
3084 int txq_id = NON_QOS_TXQ;
3085 struct ieee80211_qosframe *qwh = NULL;
3086 int tid = WME_TID_INVALID;
3087
3088 if (ic == NULL) {
3089 return (IWH_FAIL);
3090 }
3091 rate = 0;
3092 sc = (iwh_sc_t *)ic;
3093
3094 if (sc->sc_flags & IWH_F_SUSPEND) {
3095 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3096 IEEE80211_FC0_TYPE_DATA) {
3097 freemsg(mp);
3098 }
3099 err = IWH_FAIL;
3100 goto exit;
3101 }
3102
3103 if ((NULL == mp) || (MBLKL(mp) <= 0)) {
3104 return (IWH_FAIL);
3105 }
3106
3107 mp_wh = (struct ieee80211_frame *)mp->b_rptr;
3108
3109 /*
3110 * Determine send which AP or station in IBSS
3111 */
3112 in = ieee80211_find_txnode(ic, mp_wh->i_addr1);
3113 if (NULL == in) {
3114 cmn_err(CE_WARN, "iwh_send(): "
3115 "failed to find tx node\n");
3116 freemsg(mp);
3117 sc->sc_tx_err++;
3118 err = IWH_SUCCESS;
3119 goto exit;
3120 }
3121
3122 /*
3123 * Determine TX queue according to traffic ID in frame
3124 * if working in QoS mode.
3125 */
3126 if (in->in_flags & IEEE80211_NODE_QOS) {
3127
3128 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3129 IEEE80211_FC0_TYPE_DATA) {
3130
3131 if (mp_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3132 qwh = (struct ieee80211_qosframe *)mp_wh;
3133
3134 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3135 txq_id = iwh_wme_tid_to_txq(tid);
3136
3137 if (txq_id < TXQ_FOR_AC_MIN ||
3138 (txq_id > TXQ_FOR_AC_MAX)) {
3139 freemsg(mp);
3140 sc->sc_tx_err++;
3141 err = IWH_SUCCESS;
3142 goto exit;
3143 }
3144
3145 } else {
3146 txq_id = NON_QOS_TXQ;
3147 }
3148
3149 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3150 IEEE80211_FC0_TYPE_MGT) {
3151 txq_id = QOS_TXQ_FOR_MGT;
3152 } else {
3153 txq_id = NON_QOS_TXQ;
3154 }
3155
3156 } else {
3157 txq_id = NON_QOS_TXQ;
3158 }
3159
3160 mutex_enter(&sc->sc_tx_lock);
3161 ring = &sc->sc_txq[txq_id];
3162 data = &ring->data[ring->cur];
3163 cmd = data->cmd;
3164 bzero(cmd, sizeof (*cmd));
3165
3166 ring->cur = (ring->cur + 1) % ring->count;
3167
3168 /*
3169 * Need reschedule TX if TX buffer is full.
3170 */
3171 if (ring->queued > ring->count - IWH_MAX_WIN_SIZE) {
3172 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3173 "no txbuf\n"));
3174
3175 sc->sc_need_reschedule = 1;
3176 mutex_exit(&sc->sc_tx_lock);
3177
3178 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3179 IEEE80211_FC0_TYPE_DATA) {
3180 freemsg(mp);
3181 }
3182 sc->sc_tx_nobuf++;
3183 err = IWH_FAIL;
3184 goto exit;
3185 }
3186
3187 ring->queued++;
3188
3189 mutex_exit(&sc->sc_tx_lock);
3190
3191 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3192
3193 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3194 if (NULL == m) { /* can not alloc buf, drop this package */
3195 cmn_err(CE_WARN, "iwh_send(): "
3196 "failed to allocate msgbuf\n");
3197 freemsg(mp);
3198
3199 mutex_enter(&sc->sc_tx_lock);
3200 ring->queued--;
3201 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3202 sc->sc_need_reschedule = 0;
3203 mutex_exit(&sc->sc_tx_lock);
3204 mac_tx_update(ic->ic_mach);
3205 mutex_enter(&sc->sc_tx_lock);
3206 }
3207 mutex_exit(&sc->sc_tx_lock);
3208
3209 err = IWH_SUCCESS;
3210 goto exit;
3211 }
3212
3213 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3214 mblen = MBLKL(m0);
3215 bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3216 off += mblen;
3217 }
3218
3219 m->b_wptr += off;
3220
3221 wh = (struct ieee80211_frame *)m->b_rptr;
3222
3223 /*
3224 * Net80211 module encapsulate outbound data frames.
3225 * Add some feilds of 80211 frame.
3226 */
3227 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3228 IEEE80211_FC0_TYPE_DATA) {
3229 (void) ieee80211_encap(ic, m, in);
3230 }
3231
3232 freemsg(mp);
3233
3234 cmd->hdr.type = REPLY_TX;
3235 cmd->hdr.flags = 0;
3236 cmd->hdr.qid = ring->qid;
3237
3238 tx = (iwh_tx_cmd_t *)cmd->data;
3239 tx->tx_flags = 0;
3240
3241 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3242 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3243 } else {
3244 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3245 }
3246
3247 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3248 k = ieee80211_crypto_encap(ic, m);
3249 if (NULL == k) {
3250 freemsg(m);
3251 sc->sc_tx_err++;
3252
3253 mutex_enter(&sc->sc_tx_lock);
3254 ring->queued--;
3255 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3256 sc->sc_need_reschedule = 0;
3257 mutex_exit(&sc->sc_tx_lock);
3258 mac_tx_update(ic->ic_mach);
3259 mutex_enter(&sc->sc_tx_lock);
3260 }
3261 mutex_exit(&sc->sc_tx_lock);
3262
3263 err = IWH_SUCCESS;
3264 goto exit;
3265 }
3266
3267 /*
3268 * packet header may have moved, reset our local pointer
3269 */
3270 wh = (struct ieee80211_frame *)m->b_rptr;
3271 }
3272
3273 len = msgdsize(m);
3274
3275 #ifdef DEBUG
3276 if (iwh_dbg_flags & IWH_DEBUG_TX) {
3277 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3278 }
3279 #endif
3280
3281 tx->rts_retry_limit = IWH_TX_RTS_RETRY_LIMIT;
3282 tx->data_retry_limit = IWH_TX_DATA_RETRY_LIMIT;
3283
3284 /*
3285 * specific TX parameters for management frames
3286 */
3287 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3288 IEEE80211_FC0_TYPE_MGT) {
3289 /*
3290 * mgmt frames are sent at 1M
3291 */
3292 if ((in->in_rates.ir_rates[0] &
3293 IEEE80211_RATE_VAL) != 0) {
3294 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3295 } else {
3296 rate = 2;
3297 }
3298
3299 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3300
3301 /*
3302 * tell h/w to set timestamp in probe responses
3303 */
3304 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3305 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3306 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3307
3308 tx->data_retry_limit = 3;
3309 if (tx->data_retry_limit < tx->rts_retry_limit) {
3310 tx->rts_retry_limit = tx->data_retry_limit;
3311 }
3312 }
3313
3314 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3315 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3316 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3317 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3318 tx->timeout.pm_frame_timeout = LE_16(3);
3319 } else {
3320 tx->timeout.pm_frame_timeout = LE_16(2);
3321 }
3322
3323 } else {
3324 /*
3325 * do it here for the software way rate scaling.
3326 * later for rate scaling in hardware.
3327 *
3328 * now the txrate is determined in tx cmd flags, set to the
3329 * max value 54M for 11g and 11M for 11b and 96M for 11n
3330 * originally.
3331 */
3332 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3333 rate = ic->ic_fixed_rate;
3334 } else {
3335 if ((in->in_flags & IEEE80211_NODE_HT) &&
3336 (sc->sc_ht_conf.ht_support)) {
3337 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
3338 rate = amrr->ht_mcs_idx;
3339 } else {
3340 if ((in->in_rates.ir_rates[in->in_txrate] &
3341 IEEE80211_RATE_VAL) != 0) {
3342 rate = in->in_rates.
3343 ir_rates[in->in_txrate] &
3344 IEEE80211_RATE_VAL;
3345 }
3346 }
3347 }
3348
3349 if (tid != WME_TID_INVALID) {
3350 tx->tid_tspec = (uint8_t)tid;
3351 tx->tx_flags &= LE_32(~TX_CMD_FLG_SEQ_CTL_MSK);
3352 } else {
3353 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3354 }
3355
3356 tx->timeout.pm_frame_timeout = 0;
3357 }
3358
3359 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3360 "tx rate[%d of %d] = %x",
3361 in->in_txrate, in->in_rates.ir_nrates, rate));
3362
3363 len0 = roundup(4 + sizeof (iwh_tx_cmd_t) + hdrlen, 4);
3364 if (len0 != (4 + sizeof (iwh_tx_cmd_t) + hdrlen)) {
3365 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3366 }
3367
3368 /*
3369 * retrieve destination node's id
3370 */
3371 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3372 tx->sta_id = IWH_BROADCAST_ID;
3373 } else {
3374 tx->sta_id = IWH_AP_ID;
3375 }
3376
3377 if ((in->in_flags & IEEE80211_NODE_HT) &&
3378 (sc->sc_ht_conf.ht_support) &&
3379 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3380 IEEE80211_FC0_TYPE_DATA)) {
3381 if (rate >= HT_2CHAIN_RATE_MIN_IDX) {
3382 rate |= LE_32(RATE_MCS_ANT_AB_MSK);
3383 } else {
3384 rate |= LE_32(RATE_MCS_ANT_B_MSK);
3385 }
3386
3387 rate |= LE_32((1 << RATE_MCS_HT_POS));
3388
3389 tx->rate.r.rate_n_flags = rate;
3390
3391 } else {
3392 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3393 masks |= RATE_MCS_CCK_MSK;
3394 }
3395
3396 masks |= RATE_MCS_ANT_B_MSK;
3397 tx->rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(rate) | masks);
3398 }
3399
3400 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3401 "tx flag = %x",
3402 tx->tx_flags));
3403
3404 tx->stop_time.life_time = LE_32(0xffffffff);
3405
3406 tx->len = LE_16(len);
3407
3408 tx->dram_lsb_ptr =
3409 LE_32(data->paddr_cmd + 4 + offsetof(iwh_tx_cmd_t, scratch));
3410 tx->dram_msb_ptr = 0;
3411 tx->driver_txop = 0;
3412 tx->next_frame_len = 0;
3413
3414 bcopy(m->b_rptr, tx + 1, hdrlen);
3415 m->b_rptr += hdrlen;
3416 bcopy(m->b_rptr, data->dma_data.mem_va, (len - hdrlen));
3417
3418 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3419 "sending data: qid=%d idx=%d len=%d",
3420 ring->qid, ring->cur, len));
3421
3422 /*
3423 * first segment includes the tx cmd plus the 802.11 header,
3424 * the second includes the remaining of the 802.11 frame.
3425 */
3426
3427 mutex_enter(&sc->sc_tx_lock);
3428 cmd->hdr.idx = ring->desc_cur;
3429 desc_data = &ring->data[ring->desc_cur];
3430 desc = desc_data->desc;
3431 bzero(desc, sizeof (*desc));
3432 desc->val0 = 2 << 24;
3433 desc->pa[0].tb1_addr = data->paddr_cmd;
3434 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3435 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3436 desc->pa[0].val2 =
3437 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3438 ((len - hdrlen) << 20);
3439 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3440 "phy addr1 = 0x%x phy addr2 = 0x%x "
3441 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3442 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3443 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3444
3445 /*
3446 * kick ring
3447 */
3448 s_id = tx->sta_id;
3449
3450 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3451 tfd_offset[ring->desc_cur].val =
3452 (8 + len) | (s_id << 12);
3453 if (ring->desc_cur < IWH_MAX_WIN_SIZE) {
3454 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3455 tfd_offset[IWH_QUEUE_SIZE + ring->desc_cur].val =
3456 (8 + len) | (s_id << 12);
3457 }
3458
3459 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3460 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3461
3462 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3463 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3464
3465 mutex_exit(&sc->sc_tx_lock);
3466 freemsg(m);
3467
3468 /*
3469 * release node reference
3470 */
3471 ieee80211_free_node(in);
3472
3473 ic->ic_stats.is_tx_bytes += len;
3474 ic->ic_stats.is_tx_frags++;
3475
3476 mutex_enter(&sc->sc_mt_lock);
3477 if (0 == sc->sc_tx_timer) {
3478 sc->sc_tx_timer = 4;
3479 }
3480 mutex_exit(&sc->sc_mt_lock);
3481
3482 exit:
3483 return (err);
3484 }
3485
3486 /*
3487 * invoked by GLD to deal with IOCTL affaires
3488 */
3489 static void
iwh_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3490 iwh_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3491 {
3492 iwh_sc_t *sc;
3493 ieee80211com_t *ic;
3494 int err = EINVAL;
3495
3496 if (NULL == arg) {
3497 return;
3498 }
3499 sc = (iwh_sc_t *)arg;
3500 ic = &sc->sc_ic;
3501
3502 err = ieee80211_ioctl(ic, wq, mp);
3503 if (ENETRESET == err) {
3504 /*
3505 * This is special for the hidden AP connection.
3506 * In any case, we should make sure only one 'scan'
3507 * in the driver for a 'connect' CLI command. So
3508 * when connecting to a hidden AP, the scan is just
3509 * sent out to the air when we know the desired
3510 * essid of the AP we want to connect.
3511 */
3512 if (ic->ic_des_esslen) {
3513 if (sc->sc_flags & IWH_F_RUNNING) {
3514 iwh_m_stop(sc);
3515 (void) iwh_m_start(sc);
3516 (void) ieee80211_new_state(ic,
3517 IEEE80211_S_SCAN, -1);
3518 }
3519 }
3520 }
3521 }
3522
3523 /*
3524 * Call back functions for get/set proporty
3525 */
3526 static int
iwh_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3527 iwh_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3528 uint_t wldp_length, void *wldp_buf)
3529 {
3530 iwh_sc_t *sc;
3531 int err = EINVAL;
3532
3533 if (NULL == arg) {
3534 return (EINVAL);
3535 }
3536 sc = (iwh_sc_t *)arg;
3537
3538 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3539 wldp_length, wldp_buf);
3540
3541 return (err);
3542 }
3543
3544 static void
iwh_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t mph)3545 iwh_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3546 mac_prop_info_handle_t mph)
3547 {
3548 iwh_sc_t *sc = (iwh_sc_t *)arg;
3549
3550 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, mph);
3551 }
3552
3553 static int
iwh_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3554 iwh_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3555 uint_t wldp_length, const void *wldp_buf)
3556 {
3557 iwh_sc_t *sc;
3558 ieee80211com_t *ic;
3559 int err = EINVAL;
3560
3561 if (NULL == arg) {
3562 return (EINVAL);
3563 }
3564 sc = (iwh_sc_t *)arg;
3565 ic = &sc->sc_ic;
3566
3567 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3568 wldp_buf);
3569
3570 if (err == ENETRESET) {
3571 if (ic->ic_des_esslen) {
3572 if (sc->sc_flags & IWH_F_RUNNING) {
3573 iwh_m_stop(sc);
3574 (void) iwh_m_start(sc);
3575 (void) ieee80211_new_state(ic,
3576 IEEE80211_S_SCAN, -1);
3577 }
3578 }
3579 err = 0;
3580 }
3581 return (err);
3582 }
3583
3584 /*
3585 * invoked by GLD supply statistics NIC and driver
3586 */
3587 static int
iwh_m_stat(void * arg,uint_t stat,uint64_t * val)3588 iwh_m_stat(void *arg, uint_t stat, uint64_t *val)
3589 {
3590 iwh_sc_t *sc;
3591 ieee80211com_t *ic;
3592 ieee80211_node_t *in;
3593
3594 if (NULL == arg) {
3595 return (EINVAL);
3596 }
3597 sc = (iwh_sc_t *)arg;
3598 ic = &sc->sc_ic;
3599
3600 mutex_enter(&sc->sc_glock);
3601
3602 switch (stat) {
3603 case MAC_STAT_IFSPEED:
3604 in = ic->ic_bss;
3605 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3606 IEEE80211_RATE(in->in_txrate) :
3607 ic->ic_fixed_rate) / 2 * 1000000;
3608 break;
3609
3610 case MAC_STAT_NOXMTBUF:
3611 *val = sc->sc_tx_nobuf;
3612 break;
3613
3614 case MAC_STAT_NORCVBUF:
3615 *val = sc->sc_rx_nobuf;
3616 break;
3617
3618 case MAC_STAT_IERRORS:
3619 *val = sc->sc_rx_err;
3620 break;
3621
3622 case MAC_STAT_RBYTES:
3623 *val = ic->ic_stats.is_rx_bytes;
3624 break;
3625
3626 case MAC_STAT_IPACKETS:
3627 *val = ic->ic_stats.is_rx_frags;
3628 break;
3629
3630 case MAC_STAT_OBYTES:
3631 *val = ic->ic_stats.is_tx_bytes;
3632 break;
3633
3634 case MAC_STAT_OPACKETS:
3635 *val = ic->ic_stats.is_tx_frags;
3636 break;
3637
3638 case MAC_STAT_OERRORS:
3639 case WIFI_STAT_TX_FAILED:
3640 *val = sc->sc_tx_err;
3641 break;
3642
3643 case WIFI_STAT_TX_RETRANS:
3644 *val = sc->sc_tx_retries;
3645 break;
3646
3647 case WIFI_STAT_FCS_ERRORS:
3648 case WIFI_STAT_WEP_ERRORS:
3649 case WIFI_STAT_TX_FRAGS:
3650 case WIFI_STAT_MCAST_TX:
3651 case WIFI_STAT_RTS_SUCCESS:
3652 case WIFI_STAT_RTS_FAILURE:
3653 case WIFI_STAT_ACK_FAILURE:
3654 case WIFI_STAT_RX_FRAGS:
3655 case WIFI_STAT_MCAST_RX:
3656 case WIFI_STAT_RX_DUPS:
3657 mutex_exit(&sc->sc_glock);
3658 return (ieee80211_stat(ic, stat, val));
3659
3660 default:
3661 mutex_exit(&sc->sc_glock);
3662 return (ENOTSUP);
3663 }
3664
3665 mutex_exit(&sc->sc_glock);
3666
3667 return (IWH_SUCCESS);
3668 }
3669
3670 /*
3671 * invoked by GLD to start or open NIC
3672 */
3673 static int
iwh_m_start(void * arg)3674 iwh_m_start(void *arg)
3675 {
3676 iwh_sc_t *sc;
3677 ieee80211com_t *ic;
3678 int err = IWH_FAIL;
3679
3680 if (NULL == arg) {
3681 return (EINVAL);
3682 }
3683 sc = (iwh_sc_t *)arg;
3684 ic = &sc->sc_ic;
3685
3686 err = iwh_init(sc);
3687 if (err != IWH_SUCCESS) {
3688 /*
3689 * The hw init err(eg. RF is OFF). Return Success to make
3690 * the 'plumb' succeed. The iwh_thread() tries to re-init
3691 * background.
3692 */
3693 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
3694 return (IWH_SUCCESS);
3695 }
3696
3697 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3698
3699 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3700
3701 return (IWH_SUCCESS);
3702 }
3703
3704 /*
3705 * invoked by GLD to stop or down NIC
3706 */
3707 static void
iwh_m_stop(void * arg)3708 iwh_m_stop(void *arg)
3709 {
3710 iwh_sc_t *sc;
3711 ieee80211com_t *ic;
3712
3713 if (NULL == arg) {
3714 return;
3715 }
3716 sc = (iwh_sc_t *)arg;
3717 ic = &sc->sc_ic;
3718
3719 iwh_stop(sc);
3720
3721 /*
3722 * release buffer for calibration
3723 */
3724 iwh_release_calib_buffer(sc);
3725
3726 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3727
3728 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
3729 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
3730
3731 atomic_and_32(&sc->sc_flags, ~IWH_F_RUNNING);
3732 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
3733 }
3734
3735 /*
3736 * invoked by GLD to configure NIC
3737 */
3738 static int
iwh_m_unicst(void * arg,const uint8_t * macaddr)3739 iwh_m_unicst(void *arg, const uint8_t *macaddr)
3740 {
3741 iwh_sc_t *sc;
3742 ieee80211com_t *ic;
3743 int err = IWH_SUCCESS;
3744
3745 if (NULL == arg) {
3746 return (EINVAL);
3747 }
3748 sc = (iwh_sc_t *)arg;
3749 ic = &sc->sc_ic;
3750
3751 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3752 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3753 mutex_enter(&sc->sc_glock);
3754 err = iwh_config(sc);
3755 mutex_exit(&sc->sc_glock);
3756 if (err != IWH_SUCCESS) {
3757 cmn_err(CE_WARN, "iwh_m_unicst(): "
3758 "failed to configure device\n");
3759 goto fail;
3760 }
3761 }
3762
3763 fail:
3764 return (err);
3765 }
3766
3767 /* ARGSUSED */
3768 static int
iwh_m_multicst(void * arg,boolean_t add,const uint8_t * m)3769 iwh_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3770 {
3771 return (IWH_SUCCESS);
3772 }
3773
3774 /* ARGSUSED */
3775 static int
iwh_m_promisc(void * arg,boolean_t on)3776 iwh_m_promisc(void *arg, boolean_t on)
3777 {
3778 return (IWH_SUCCESS);
3779 }
3780
3781 /*
3782 * kernel thread to deal with exceptional situation
3783 */
3784 static void
iwh_thread(iwh_sc_t * sc)3785 iwh_thread(iwh_sc_t *sc)
3786 {
3787 ieee80211com_t *ic = &sc->sc_ic;
3788 clock_t clk;
3789 int err, n = 0, timeout = 0;
3790 uint32_t tmp;
3791 #ifdef DEBUG
3792 int times = 0;
3793 #endif
3794
3795 while (sc->sc_mf_thread_switch) {
3796 tmp = IWH_READ(sc, CSR_GP_CNTRL);
3797 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3798 atomic_and_32(&sc->sc_flags, ~IWH_F_RADIO_OFF);
3799 } else {
3800 atomic_or_32(&sc->sc_flags, IWH_F_RADIO_OFF);
3801 }
3802
3803 /*
3804 * If in SUSPEND or the RF is OFF, do nothing.
3805 */
3806 if (sc->sc_flags & IWH_F_RADIO_OFF) {
3807 delay(drv_usectohz(100000));
3808 continue;
3809 }
3810
3811 /*
3812 * recovery fatal error
3813 */
3814 if (ic->ic_mach &&
3815 (sc->sc_flags & IWH_F_HW_ERR_RECOVER)) {
3816
3817 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3818 "try to recover fatal hw error: %d\n", times++));
3819
3820 iwh_stop(sc);
3821
3822 if (IWH_CHK_FAST_RECOVER(sc)) {
3823 /*
3824 * save runtime configuration
3825 */
3826 bcopy(&sc->sc_config, &sc->sc_config_save,
3827 sizeof (sc->sc_config));
3828 } else {
3829 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3830 delay(drv_usectohz(2000000 + n*500000));
3831 }
3832
3833 err = iwh_init(sc);
3834 if (err != IWH_SUCCESS) {
3835 n++;
3836 if (n < 20) {
3837 continue;
3838 }
3839 }
3840
3841 n = 0;
3842 if (!err) {
3843 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3844 }
3845
3846
3847 if (!IWH_CHK_FAST_RECOVER(sc) ||
3848 iwh_fast_recover(sc) != IWH_SUCCESS) {
3849 atomic_and_32(&sc->sc_flags,
3850 ~IWH_F_HW_ERR_RECOVER);
3851
3852 delay(drv_usectohz(2000000));
3853 if (sc->sc_ostate != IEEE80211_S_INIT) {
3854 ieee80211_new_state(ic,
3855 IEEE80211_S_SCAN, 0);
3856 }
3857 }
3858 }
3859
3860 if (ic->ic_mach &&
3861 (sc->sc_flags & IWH_F_SCANNING) && sc->sc_scan_pending) {
3862 IWH_DBG((IWH_DEBUG_SCAN, "iwh_thread(): "
3863 "wait for probe response\n"));
3864
3865 sc->sc_scan_pending--;
3866 delay(drv_usectohz(200000));
3867 ieee80211_next_scan(ic);
3868 }
3869
3870 /*
3871 * rate ctl
3872 */
3873 if (ic->ic_mach &&
3874 (sc->sc_flags & IWH_F_RATE_AUTO_CTL)) {
3875 clk = ddi_get_lbolt();
3876 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3877 iwh_amrr_timeout(sc);
3878 }
3879 }
3880
3881 if ((ic->ic_state == IEEE80211_S_RUN) &&
3882 (ic->ic_beaconmiss++ > 100)) { /* 10 seconds */
3883 cmn_err(CE_WARN, "iwh: beacon missed for 10 seconds\n");
3884 (void) ieee80211_new_state(ic,
3885 IEEE80211_S_INIT, -1);
3886 }
3887
3888 delay(drv_usectohz(100000));
3889
3890 mutex_enter(&sc->sc_mt_lock);
3891 if (sc->sc_tx_timer) {
3892 timeout++;
3893 if (10 == timeout) {
3894 sc->sc_tx_timer--;
3895 if (0 == sc->sc_tx_timer) {
3896 atomic_or_32(&sc->sc_flags,
3897 IWH_F_HW_ERR_RECOVER);
3898 sc->sc_ostate = IEEE80211_S_RUN;
3899 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3900 "try to recover from "
3901 "send fail\n"));
3902 }
3903 timeout = 0;
3904 }
3905 }
3906 mutex_exit(&sc->sc_mt_lock);
3907 }
3908
3909 mutex_enter(&sc->sc_mt_lock);
3910 sc->sc_mf_thread = NULL;
3911 cv_signal(&sc->sc_mt_cv);
3912 mutex_exit(&sc->sc_mt_lock);
3913 }
3914
3915 /*
3916 * Send a command to the ucode.
3917 */
3918 static int
iwh_cmd(iwh_sc_t * sc,int code,const void * buf,int size,int async)3919 iwh_cmd(iwh_sc_t *sc, int code, const void *buf, int size, int async)
3920 {
3921 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
3922 iwh_tx_desc_t *desc;
3923 iwh_cmd_t *cmd;
3924
3925 ASSERT(size <= sizeof (cmd->data));
3926 ASSERT(mutex_owned(&sc->sc_glock));
3927
3928 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd() "
3929 "code[%d]", code));
3930 desc = ring->data[ring->cur].desc;
3931 cmd = ring->data[ring->cur].cmd;
3932
3933 cmd->hdr.type = (uint8_t)code;
3934 cmd->hdr.flags = 0;
3935 cmd->hdr.qid = ring->qid;
3936 cmd->hdr.idx = ring->cur;
3937 bcopy(buf, cmd->data, size);
3938 (void) memset(desc, 0, sizeof (*desc));
3939
3940 desc->val0 = 1 << 24;
3941 desc->pa[0].tb1_addr =
3942 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3943 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3944
3945 if (async) {
3946 sc->sc_cmd_accum++;
3947 }
3948
3949 /*
3950 * kick cmd ring XXX
3951 */
3952 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3953 tfd_offset[ring->cur].val = 8;
3954 if (ring->cur < IWH_MAX_WIN_SIZE) {
3955 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3956 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
3957 }
3958 ring->cur = (ring->cur + 1) % ring->count;
3959 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3960
3961 if (async) {
3962 return (IWH_SUCCESS);
3963 } else {
3964 clock_t clk;
3965
3966 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3967 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3968 if (cv_timedwait(&sc->sc_cmd_cv,
3969 &sc->sc_glock, clk) < 0) {
3970 break;
3971 }
3972 }
3973
3974 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3975 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3976 return (IWH_SUCCESS);
3977 } else {
3978 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3979 return (IWH_FAIL);
3980 }
3981 }
3982 }
3983
3984 /*
3985 * require ucode seting led of NIC
3986 */
3987 static void
iwh_set_led(iwh_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3988 iwh_set_led(iwh_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3989 {
3990 iwh_led_cmd_t led;
3991
3992 led.interval = LE_32(100000); /* unit: 100ms */
3993 led.id = id;
3994 led.off = off;
3995 led.on = on;
3996
3997 (void) iwh_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3998 }
3999
4000 /*
4001 * necessary setting to NIC before authentication
4002 */
4003 static int
iwh_hw_set_before_auth(iwh_sc_t * sc)4004 iwh_hw_set_before_auth(iwh_sc_t *sc)
4005 {
4006 ieee80211com_t *ic = &sc->sc_ic;
4007 ieee80211_node_t *in = ic->ic_bss;
4008 int err = IWH_FAIL;
4009
4010 /*
4011 * update adapter's configuration according
4012 * the info of target AP
4013 */
4014 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
4015 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
4016
4017 if (ic->ic_curmode != IEEE80211_MODE_11NG) {
4018
4019 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
4020 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
4021 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
4022
4023 if (IEEE80211_MODE_11B == ic->ic_curmode) {
4024 sc->sc_config.cck_basic_rates = 0x03;
4025 sc->sc_config.ofdm_basic_rates = 0;
4026 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
4027 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
4028 sc->sc_config.cck_basic_rates = 0;
4029 sc->sc_config.ofdm_basic_rates = 0x15;
4030 } else { /* assume 802.11b/g */
4031 sc->sc_config.cck_basic_rates = 0x0f;
4032 sc->sc_config.ofdm_basic_rates = 0xff;
4033 }
4034 }
4035
4036 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
4037 RXON_FLG_SHORT_SLOT_MSK);
4038
4039 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
4040 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
4041 } else {
4042 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
4043 }
4044
4045 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
4046 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4047 } else {
4048 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
4049 }
4050
4051 IWH_DBG((IWH_DEBUG_80211, "iwh_hw_set_before_auth(): "
4052 "config chan %d flags %x "
4053 "filter_flags %x cck %x ofdm %x"
4054 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
4055 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
4056 LE_32(sc->sc_config.filter_flags),
4057 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
4058 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
4059 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
4060 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
4061
4062 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4063 sizeof (iwh_rxon_cmd_t), 1);
4064 if (err != IWH_SUCCESS) {
4065 cmn_err(CE_WARN, "iwh_hw_set_before_auth(): "
4066 "failed to config chan%d\n", sc->sc_config.chan);
4067 return (err);
4068 }
4069
4070 if ((sc->sc_dev_id != 0x423c) &&
4071 (sc->sc_dev_id != 0x423d)) {
4072 err = iwh_tx_power_table(sc, 1);
4073 if (err != IWH_SUCCESS) {
4074 return (err);
4075 }
4076 }
4077
4078 /*
4079 * add default AP node
4080 */
4081 err = iwh_add_ap_sta(sc);
4082 if (err != IWH_SUCCESS) {
4083 return (err);
4084 }
4085
4086 if ((sc->sc_dev_id != 0x423c) &&
4087 (sc->sc_dev_id != 0x423d)) {
4088 /*
4089 * set up retry rate table for AP node
4090 */
4091 err = iwh_ap_lq(sc);
4092 if (err != IWH_SUCCESS) {
4093 return (err);
4094 }
4095 }
4096
4097 return (err);
4098 }
4099
4100 /*
4101 * Send a scan request(assembly scan cmd) to the firmware.
4102 */
4103 static int
iwh_scan(iwh_sc_t * sc)4104 iwh_scan(iwh_sc_t *sc)
4105 {
4106 ieee80211com_t *ic = &sc->sc_ic;
4107 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
4108 iwh_tx_desc_t *desc;
4109 iwh_tx_data_t *data;
4110 iwh_cmd_t *cmd;
4111 iwh_scan_hdr_t *hdr;
4112 iwh_scan_chan_t chan;
4113 struct ieee80211_frame *wh;
4114 ieee80211_node_t *in = ic->ic_bss;
4115 uint8_t essid[IEEE80211_NWID_LEN+1];
4116 struct ieee80211_rateset *rs;
4117 enum ieee80211_phymode mode;
4118 uint8_t *frm;
4119 int i, pktlen, nrates;
4120
4121 data = &ring->data[ring->cur];
4122 desc = data->desc;
4123 cmd = (iwh_cmd_t *)data->dma_data.mem_va;
4124
4125 cmd->hdr.type = REPLY_SCAN_CMD;
4126 cmd->hdr.flags = 0;
4127 cmd->hdr.qid = ring->qid;
4128 cmd->hdr.idx = ring->cur | 0x40;
4129
4130 hdr = (iwh_scan_hdr_t *)cmd->data;
4131 (void) memset(hdr, 0, sizeof (iwh_scan_hdr_t));
4132 hdr->nchan = 1;
4133 hdr->quiet_time = LE_16(50);
4134 hdr->quiet_plcp_th = LE_16(1);
4135
4136 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
4137 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4138 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4139 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4140 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4141
4142 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
4143 hdr->tx_cmd.sta_id = IWH_BROADCAST_ID;
4144 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
4145 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(2));
4146 hdr->tx_cmd.rate.r.rate_n_flags |=
4147 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
4148 hdr->direct_scan[0].len = ic->ic_des_esslen;
4149 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4150
4151 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4152 RXON_FILTER_BCON_AWARE_MSK);
4153
4154 if (ic->ic_des_esslen) {
4155 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
4156 essid[ic->ic_des_esslen] = '\0';
4157 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4158 "directed scan %s\n", essid));
4159
4160 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
4161 ic->ic_des_esslen);
4162 } else {
4163 bzero(hdr->direct_scan[0].ssid,
4164 sizeof (hdr->direct_scan[0].ssid));
4165 }
4166
4167 /*
4168 * a probe request frame is required after the REPLY_SCAN_CMD
4169 */
4170 wh = (struct ieee80211_frame *)(hdr + 1);
4171 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4172 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4173 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4174 (void) memset(wh->i_addr1, 0xff, 6);
4175 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
4176 (void) memset(wh->i_addr3, 0xff, 6);
4177 *(uint16_t *)&wh->i_dur[0] = 0;
4178 *(uint16_t *)&wh->i_seq[0] = 0;
4179
4180 frm = (uint8_t *)(wh + 1);
4181
4182 /*
4183 * essid IE
4184 */
4185 if (in->in_esslen) {
4186 bcopy(in->in_essid, essid, in->in_esslen);
4187 essid[in->in_esslen] = '\0';
4188 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4189 "probe with ESSID %s\n",
4190 essid));
4191 }
4192 *frm++ = IEEE80211_ELEMID_SSID;
4193 *frm++ = in->in_esslen;
4194 bcopy(in->in_essid, frm, in->in_esslen);
4195 frm += in->in_esslen;
4196
4197 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
4198 rs = &ic->ic_sup_rates[mode];
4199
4200 /*
4201 * supported rates IE
4202 */
4203 *frm++ = IEEE80211_ELEMID_RATES;
4204 nrates = rs->ir_nrates;
4205 if (nrates > IEEE80211_RATE_SIZE) {
4206 nrates = IEEE80211_RATE_SIZE;
4207 }
4208
4209 *frm++ = (uint8_t)nrates;
4210 bcopy(rs->ir_rates, frm, nrates);
4211 frm += nrates;
4212
4213 /*
4214 * supported xrates IE
4215 */
4216 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4217 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4218 *frm++ = IEEE80211_ELEMID_XRATES;
4219 *frm++ = (uint8_t)nrates;
4220 bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
4221 frm += nrates;
4222 }
4223
4224 /*
4225 * optionnal IE (usually for wpa)
4226 */
4227 if (ic->ic_opt_ie != NULL) {
4228 bcopy(ic->ic_opt_ie, frm, ic->ic_opt_ie_len);
4229 frm += ic->ic_opt_ie_len;
4230 }
4231
4232 /* setup length of probe request */
4233 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4234 hdr->len = LE_16(hdr->nchan * sizeof (iwh_scan_chan_t) +
4235 LE_16(hdr->tx_cmd.len) + sizeof (iwh_scan_hdr_t));
4236
4237 /*
4238 * the attribute of the scan channels are required after the probe
4239 * request frame.
4240 */
4241 for (i = 1; i <= hdr->nchan; i++) {
4242 if (ic->ic_des_esslen) {
4243 chan.type = LE_32(3);
4244 } else {
4245 chan.type = LE_32(1);
4246 }
4247
4248 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4249 chan.tpc.tx_gain = 0x28;
4250 chan.tpc.dsp_atten = 110;
4251 chan.active_dwell = LE_16(50);
4252 chan.passive_dwell = LE_16(120);
4253
4254 bcopy(&chan, frm, sizeof (iwh_scan_chan_t));
4255 frm += sizeof (iwh_scan_chan_t);
4256 }
4257
4258 pktlen = _PTRDIFF(frm, cmd);
4259
4260 (void) memset(desc, 0, sizeof (*desc));
4261 desc->val0 = 1 << 24;
4262 desc->pa[0].tb1_addr =
4263 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4264 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4265
4266 /*
4267 * maybe for cmd, filling the byte cnt table is not necessary.
4268 * anyway, we fill it here.
4269 */
4270 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4271 .tfd_offset[ring->cur].val = 8;
4272 if (ring->cur < IWH_MAX_WIN_SIZE) {
4273 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4274 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
4275 }
4276
4277 /*
4278 * kick cmd ring
4279 */
4280 ring->cur = (ring->cur + 1) % ring->count;
4281 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4282
4283 return (IWH_SUCCESS);
4284 }
4285
4286 /*
4287 * configure NIC by using ucode commands after loading ucode.
4288 */
4289 static int
iwh_config(iwh_sc_t * sc)4290 iwh_config(iwh_sc_t *sc)
4291 {
4292 ieee80211com_t *ic = &sc->sc_ic;
4293 iwh_powertable_cmd_t powertable;
4294 iwh_bt_cmd_t bt;
4295 iwh_add_sta_t node;
4296 iwh_rem_sta_t rm_sta;
4297 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4298 iwh_link_quality_cmd_t link_quality;
4299 int i, err = IWH_FAIL;
4300 uint16_t masks = 0;
4301
4302 /*
4303 * set power mode. Disable power management at present, do it later
4304 */
4305 (void) memset(&powertable, 0, sizeof (powertable));
4306 powertable.flags = LE_16(0x8);
4307 err = iwh_cmd(sc, POWER_TABLE_CMD, &powertable,
4308 sizeof (powertable), 0);
4309 if (err != IWH_SUCCESS) {
4310 cmn_err(CE_WARN, "iwh_config(): "
4311 "failed to set power mode\n");
4312 return (err);
4313 }
4314
4315 /*
4316 * configure bt coexistence
4317 */
4318 (void) memset(&bt, 0, sizeof (bt));
4319 bt.flags = 3;
4320 bt.lead_time = 0xaa;
4321 bt.max_kill = 1;
4322 err = iwh_cmd(sc, REPLY_BT_CONFIG, &bt,
4323 sizeof (bt), 0);
4324 if (err != IWH_SUCCESS) {
4325 cmn_err(CE_WARN, "iwh_config(): "
4326 "failed to configurate bt coexistence\n");
4327 return (err);
4328 }
4329
4330 /*
4331 * configure rxon
4332 */
4333 (void) memset(&sc->sc_config, 0, sizeof (iwh_rxon_cmd_t));
4334 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4335 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4336 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4337 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4338 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4339 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4340
4341 switch (ic->ic_opmode) {
4342 case IEEE80211_M_STA:
4343 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4344 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4345 RXON_FILTER_DIS_DECRYPT_MSK |
4346 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4347 break;
4348
4349 case IEEE80211_M_IBSS:
4350 case IEEE80211_M_AHDEMO:
4351 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4352
4353 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4354 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4355 RXON_FILTER_DIS_DECRYPT_MSK |
4356 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4357 break;
4358
4359 case IEEE80211_M_HOSTAP:
4360 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4361 break;
4362
4363 case IEEE80211_M_MONITOR:
4364 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4365 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4366 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4367 break;
4368 }
4369
4370 /*
4371 * Support all CCK rates.
4372 */
4373 sc->sc_config.cck_basic_rates = 0x0f;
4374
4375 /*
4376 * Support all OFDM rates.
4377 */
4378 sc->sc_config.ofdm_basic_rates = 0xff;
4379
4380 /*
4381 * Determine HT supported rates.
4382 */
4383 switch (sc->sc_ht_conf.rx_stream_count) {
4384 case 3:
4385 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0xff;
4386 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4387 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4388 break;
4389 case 2:
4390 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4391 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4392 break;
4393 case 1:
4394 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4395 break;
4396 default:
4397 cmn_err(CE_WARN, "iwh_config(): "
4398 "RX stream count %d is not in suitable range\n",
4399 sc->sc_ht_conf.rx_stream_count);
4400 return (IWH_FAIL);
4401 }
4402
4403 /*
4404 * set RX chains/antennas.
4405 */
4406 iwh_config_rxon_chain(sc);
4407
4408 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4409 sizeof (iwh_rxon_cmd_t), 0);
4410 if (err != IWH_SUCCESS) {
4411 cmn_err(CE_WARN, "iwh_config(): "
4412 "failed to set configure command\n");
4413 return (err);
4414 }
4415
4416 /*
4417 * remove all nodes in NIC
4418 */
4419 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4420 rm_sta.num_sta = 1;
4421 bcopy(bcast, rm_sta.addr, 6);
4422
4423 err = iwh_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwh_rem_sta_t), 0);
4424 if (err != IWH_SUCCESS) {
4425 cmn_err(CE_WARN, "iwh_config(): "
4426 "failed to remove broadcast node in hardware.\n");
4427 return (err);
4428 }
4429
4430 if ((sc->sc_dev_id != 0x423c) &&
4431 (sc->sc_dev_id != 0x423d)) {
4432 /*
4433 * configure TX power table
4434 */
4435 err = iwh_tx_power_table(sc, 0);
4436 if (err != IWH_SUCCESS) {
4437 return (err);
4438 }
4439 }
4440
4441 /*
4442 * add broadcast node so that we can send broadcast frame
4443 */
4444 (void) memset(&node, 0, sizeof (node));
4445 (void) memset(node.sta.addr, 0xff, 6);
4446 node.mode = 0;
4447 node.sta.sta_id = IWH_BROADCAST_ID;
4448 node.station_flags = 0;
4449
4450 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4451 if (err != IWH_SUCCESS) {
4452 cmn_err(CE_WARN, "iwh_config(): "
4453 "failed to add broadcast node\n");
4454 return (err);
4455 }
4456
4457 if ((sc->sc_dev_id != 0x423c) &&
4458 (sc->sc_dev_id != 0x423d)) {
4459 /*
4460 * TX_LINK_QUALITY cmd
4461 */
4462 (void) memset(&link_quality, 0, sizeof (link_quality));
4463 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4464 masks |= RATE_MCS_CCK_MSK;
4465 masks |= RATE_MCS_ANT_B_MSK;
4466 masks &= ~RATE_MCS_ANT_A_MSK;
4467 link_quality.rate_n_flags[i] =
4468 LE_32(iwh_rate_to_plcp(2) | masks);
4469 }
4470
4471 link_quality.general_params.single_stream_ant_msk = 2;
4472 link_quality.general_params.dual_stream_ant_msk = 3;
4473 link_quality.agg_params.agg_dis_start_th = 3;
4474 link_quality.agg_params.agg_time_limit = LE_16(4000);
4475 link_quality.sta_id = IWH_BROADCAST_ID;
4476 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
4477 sizeof (link_quality), 0);
4478 if (err != IWH_SUCCESS) {
4479 cmn_err(CE_WARN, "iwh_config(): "
4480 "failed to config link quality table\n");
4481 return (err);
4482 }
4483 }
4484
4485 return (err);
4486 }
4487
4488 /*
4489 * quiesce(9E) entry point.
4490 * This function is called when the system is single-threaded at high
4491 * PIL with preemption disabled. Therefore, this function must not be
4492 * blocked.
4493 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4494 * DDI_FAILURE indicates an error condition and should almost never happen.
4495 */
4496 static int
iwh_quiesce(dev_info_t * dip)4497 iwh_quiesce(dev_info_t *dip)
4498 {
4499 iwh_sc_t *sc;
4500
4501 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
4502 if (sc == NULL) {
4503 return (DDI_FAILURE);
4504 }
4505
4506 #ifdef DEBUG
4507 /*
4508 * by pass any messages, if it's quiesce
4509 */
4510 iwh_dbg_flags = 0;
4511 #endif
4512
4513 /*
4514 * No more blocking is allowed while we are in the
4515 * quiesce(9E) entry point.
4516 */
4517 atomic_or_32(&sc->sc_flags, IWH_F_QUIESCED);
4518
4519 /*
4520 * Disable and mask all interrupts.
4521 */
4522 iwh_stop(sc);
4523
4524 return (DDI_SUCCESS);
4525 }
4526
4527 static void
iwh_stop_master(iwh_sc_t * sc)4528 iwh_stop_master(iwh_sc_t *sc)
4529 {
4530 uint32_t tmp;
4531 int n;
4532
4533 tmp = IWH_READ(sc, CSR_RESET);
4534 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4535
4536 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4537 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4538 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4539 return;
4540 }
4541
4542 for (n = 0; n < 2000; n++) {
4543 if (IWH_READ(sc, CSR_RESET) &
4544 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4545 break;
4546 }
4547 DELAY(1000);
4548 }
4549
4550 #ifdef DEBUG
4551 if (2000 == n) {
4552 IWH_DBG((IWH_DEBUG_HW, "iwh_stop_master(): "
4553 "timeout waiting for master stop\n"));
4554 }
4555 #endif
4556 }
4557
4558 static int
iwh_power_up(iwh_sc_t * sc)4559 iwh_power_up(iwh_sc_t *sc)
4560 {
4561 uint32_t tmp;
4562
4563 iwh_mac_access_enter(sc);
4564 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4565 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4566 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4567 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4568 iwh_mac_access_exit(sc);
4569
4570 DELAY(5000);
4571 return (IWH_SUCCESS);
4572 }
4573
4574 /*
4575 * hardware initialization
4576 */
4577 static int
iwh_preinit(iwh_sc_t * sc)4578 iwh_preinit(iwh_sc_t *sc)
4579 {
4580 int n;
4581 uint8_t vlink;
4582 uint16_t radio_cfg;
4583 uint32_t tmp;
4584
4585 /*
4586 * clear any pending interrupts
4587 */
4588 IWH_WRITE(sc, CSR_INT, 0xffffffff);
4589
4590 tmp = IWH_READ(sc, CSR_GIO_CHICKEN_BITS);
4591 IWH_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4592 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4593
4594 tmp = IWH_READ(sc, CSR_ANA_PLL_CFG);
4595 IWH_WRITE(sc, CSR_ANA_PLL_CFG, tmp | IWH_CSR_ANA_PLL_CFG);
4596
4597 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4598 IWH_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4599
4600 /*
4601 * wait for clock ready
4602 */
4603 for (n = 0; n < 1000; n++) {
4604 if (IWH_READ(sc, CSR_GP_CNTRL) &
4605 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4606 break;
4607 }
4608 DELAY(10);
4609 }
4610
4611 if (1000 == n) {
4612 return (ETIMEDOUT);
4613 }
4614
4615 iwh_mac_access_enter(sc);
4616
4617 iwh_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4618
4619 DELAY(20);
4620 tmp = iwh_reg_read(sc, ALM_APMG_PCIDEV_STT);
4621 iwh_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4622 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4623 iwh_mac_access_exit(sc);
4624
4625 radio_cfg = IWH_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4626 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4627 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4628 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4629 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4630 SP_RADIO_STEP_MSK(radio_cfg) |
4631 SP_RADIO_DASH_MSK(radio_cfg));
4632 } else {
4633 cmn_err(CE_WARN, "iwh_preinit(): "
4634 "radio configuration information in eeprom is wrong\n");
4635 return (IWH_FAIL);
4636 }
4637
4638
4639 IWH_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4640
4641 (void) iwh_power_up(sc);
4642
4643 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4644 tmp = ddi_get32(sc->sc_cfg_handle,
4645 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4646 ddi_put32(sc->sc_cfg_handle,
4647 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4648 tmp & ~(1 << 11));
4649 }
4650
4651 vlink = ddi_get8(sc->sc_cfg_handle,
4652 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4653 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4654 vlink & ~2);
4655
4656 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4657 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4658 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4659 IWH_WRITE(sc, CSR_SW_VER, tmp);
4660
4661 /*
4662 * make sure power supply on each part of the hardware
4663 */
4664 iwh_mac_access_enter(sc);
4665 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4666 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4667 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4668 DELAY(5);
4669
4670 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4671 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4672 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4673 iwh_mac_access_exit(sc);
4674
4675 return (IWH_SUCCESS);
4676 }
4677
4678 /*
4679 * set up semphore flag to own EEPROM
4680 */
4681 static int
iwh_eep_sem_down(iwh_sc_t * sc)4682 iwh_eep_sem_down(iwh_sc_t *sc)
4683 {
4684 int count1, count2;
4685 uint32_t tmp;
4686
4687 for (count1 = 0; count1 < 1000; count1++) {
4688 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4689 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4690 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4691
4692 for (count2 = 0; count2 < 2; count2++) {
4693 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
4694 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4695 return (IWH_SUCCESS);
4696 }
4697 DELAY(10000);
4698 }
4699 }
4700
4701 return (IWH_FAIL);
4702 }
4703
4704 /*
4705 * reset semphore flag to release EEPROM
4706 */
4707 static void
iwh_eep_sem_up(iwh_sc_t * sc)4708 iwh_eep_sem_up(iwh_sc_t *sc)
4709 {
4710 uint32_t tmp;
4711
4712 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4713 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4714 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4715 }
4716
4717 /*
4718 * This function read all infomation from eeprom
4719 */
4720 static int
iwh_eep_load(iwh_sc_t * sc)4721 iwh_eep_load(iwh_sc_t *sc)
4722 {
4723 int i, rr;
4724 uint32_t rv, tmp, eep_gp;
4725 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4726 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4727
4728 /*
4729 * read eeprom gp register in CSR
4730 */
4731 eep_gp = IWH_READ(sc, CSR_EEPROM_GP);
4732 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4733 CSR_EEPROM_GP_BAD_SIGNATURE) {
4734 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4735 "not find eeprom\n"));
4736 return (IWH_FAIL);
4737 }
4738
4739 rr = iwh_eep_sem_down(sc);
4740 if (rr != 0) {
4741 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4742 "driver failed to own EEPROM\n"));
4743 return (IWH_FAIL);
4744 }
4745
4746 for (addr = 0; addr < eep_sz; addr += 2) {
4747 IWH_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4748 tmp = IWH_READ(sc, CSR_EEPROM_REG);
4749 IWH_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4750
4751 for (i = 0; i < 10; i++) {
4752 rv = IWH_READ(sc, CSR_EEPROM_REG);
4753 if (rv & 1) {
4754 break;
4755 }
4756 DELAY(10);
4757 }
4758
4759 if (!(rv & 1)) {
4760 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4761 "time out when read eeprome\n"));
4762 iwh_eep_sem_up(sc);
4763 return (IWH_FAIL);
4764 }
4765
4766 eep_p[addr/2] = LE_16(rv >> 16);
4767 }
4768
4769 iwh_eep_sem_up(sc);
4770 return (IWH_SUCCESS);
4771 }
4772
4773 /*
4774 * initialize mac address in ieee80211com_t struct
4775 */
4776 static void
iwh_get_mac_from_eep(iwh_sc_t * sc)4777 iwh_get_mac_from_eep(iwh_sc_t *sc)
4778 {
4779 ieee80211com_t *ic = &sc->sc_ic;
4780
4781 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4782
4783 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_get_mac_from_eep(): "
4784 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4785 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4786 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4787 }
4788
4789 /*
4790 * main initialization function
4791 */
4792 static int
iwh_init(iwh_sc_t * sc)4793 iwh_init(iwh_sc_t *sc)
4794 {
4795 int err = IWH_FAIL;
4796 clock_t clk;
4797
4798 /*
4799 * release buffer for calibration
4800 */
4801 iwh_release_calib_buffer(sc);
4802
4803 mutex_enter(&sc->sc_glock);
4804 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4805
4806 err = iwh_init_common(sc);
4807 if (err != IWH_SUCCESS) {
4808 mutex_exit(&sc->sc_glock);
4809 return (IWH_FAIL);
4810 }
4811
4812 /*
4813 * backup ucode data part for future use.
4814 */
4815 bcopy(sc->sc_dma_fw_data.mem_va,
4816 sc->sc_dma_fw_data_bak.mem_va,
4817 sc->sc_dma_fw_data.alength);
4818
4819 /* load firmware init segment into NIC */
4820 err = iwh_load_init_firmware(sc);
4821 if (err != IWH_SUCCESS) {
4822 cmn_err(CE_WARN, "iwh_init(): "
4823 "failed to setup init firmware\n");
4824 mutex_exit(&sc->sc_glock);
4825 return (IWH_FAIL);
4826 }
4827
4828 /*
4829 * now press "execute" start running
4830 */
4831 IWH_WRITE(sc, CSR_RESET, 0);
4832
4833 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4834 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4835 if (cv_timedwait(&sc->sc_ucode_cv,
4836 &sc->sc_glock, clk) < 0) {
4837 break;
4838 }
4839 }
4840
4841 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4842 cmn_err(CE_WARN, "iwh_init(): "
4843 "failed to process init alive.\n");
4844 mutex_exit(&sc->sc_glock);
4845 return (IWH_FAIL);
4846 }
4847
4848 mutex_exit(&sc->sc_glock);
4849
4850 /*
4851 * stop chipset for initializing chipset again
4852 */
4853 iwh_stop(sc);
4854
4855 mutex_enter(&sc->sc_glock);
4856 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4857
4858 err = iwh_init_common(sc);
4859 if (err != IWH_SUCCESS) {
4860 mutex_exit(&sc->sc_glock);
4861 return (IWH_FAIL);
4862 }
4863
4864 /*
4865 * load firmware run segment into NIC
4866 */
4867 err = iwh_load_run_firmware(sc);
4868 if (err != IWH_SUCCESS) {
4869 cmn_err(CE_WARN, "iwh_init(): "
4870 "failed to setup run firmware\n");
4871 mutex_exit(&sc->sc_glock);
4872 return (IWH_FAIL);
4873 }
4874
4875 /*
4876 * now press "execute" start running
4877 */
4878 IWH_WRITE(sc, CSR_RESET, 0);
4879
4880 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4881 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4882 if (cv_timedwait(&sc->sc_ucode_cv,
4883 &sc->sc_glock, clk) < 0) {
4884 break;
4885 }
4886 }
4887
4888 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4889 cmn_err(CE_WARN, "iwh_init(): "
4890 "failed to process runtime alive.\n");
4891 mutex_exit(&sc->sc_glock);
4892 return (IWH_FAIL);
4893 }
4894
4895 mutex_exit(&sc->sc_glock);
4896
4897 DELAY(1000);
4898
4899 mutex_enter(&sc->sc_glock);
4900 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4901
4902 /*
4903 * at this point, the firmware is loaded OK, then config the hardware
4904 * with the ucode API, including rxon, txpower, etc.
4905 */
4906 err = iwh_config(sc);
4907 if (err) {
4908 cmn_err(CE_WARN, "iwh_init(): "
4909 "failed to configure device\n");
4910 mutex_exit(&sc->sc_glock);
4911 return (IWH_FAIL);
4912 }
4913
4914 /*
4915 * at this point, hardware may receive beacons :)
4916 */
4917 mutex_exit(&sc->sc_glock);
4918 return (IWH_SUCCESS);
4919 }
4920
4921 /*
4922 * stop or disable NIC
4923 */
4924 static void
iwh_stop(iwh_sc_t * sc)4925 iwh_stop(iwh_sc_t *sc)
4926 {
4927 uint32_t tmp;
4928 int i;
4929
4930 /*
4931 * by pass if it's quiesced
4932 */
4933 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4934 mutex_enter(&sc->sc_glock);
4935 }
4936
4937 IWH_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4938 /*
4939 * disable interrupts
4940 */
4941 IWH_WRITE(sc, CSR_INT_MASK, 0);
4942 IWH_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4943 IWH_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4944
4945 /*
4946 * reset all Tx rings
4947 */
4948 for (i = 0; i < IWH_NUM_QUEUES; i++) {
4949 iwh_reset_tx_ring(sc, &sc->sc_txq[i]);
4950 }
4951
4952 /*
4953 * reset Rx ring
4954 */
4955 iwh_reset_rx_ring(sc);
4956
4957 iwh_mac_access_enter(sc);
4958 iwh_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4959 iwh_mac_access_exit(sc);
4960
4961 DELAY(5);
4962
4963 iwh_stop_master(sc);
4964
4965 mutex_enter(&sc->sc_mt_lock);
4966 sc->sc_tx_timer = 0;
4967 mutex_exit(&sc->sc_mt_lock);
4968
4969 tmp = IWH_READ(sc, CSR_RESET);
4970 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4971
4972 /*
4973 * by pass if it's quiesced
4974 */
4975 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4976 mutex_exit(&sc->sc_glock);
4977 }
4978 }
4979
4980 /*
4981 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4982 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4983 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4984 * INRIA Sophia - Projet Planete
4985 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4986 */
4987 #define is_success(amrr) \
4988 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4989 #define is_failure(amrr) \
4990 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4991 #define is_enough(amrr) \
4992 ((amrr)->txcnt > 200)
4993 #define not_very_few(amrr) \
4994 ((amrr)->txcnt > 40)
4995 #define is_min_rate(in) \
4996 (0 == (in)->in_txrate)
4997 #define is_max_rate(in) \
4998 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4999 #define increase_rate(in) \
5000 ((in)->in_txrate++)
5001 #define decrease_rate(in) \
5002 ((in)->in_txrate--)
5003 #define reset_cnt(amrr) \
5004 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
5005
5006 #define IWH_AMRR_MIN_SUCCESS_THRESHOLD 1
5007 #define IWH_AMRR_MAX_SUCCESS_THRESHOLD 15
5008
5009 static void
iwh_amrr_init(iwh_amrr_t * amrr)5010 iwh_amrr_init(iwh_amrr_t *amrr)
5011 {
5012 amrr->success = 0;
5013 amrr->recovery = 0;
5014 amrr->txcnt = amrr->retrycnt = 0;
5015 amrr->success_threshold = IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5016 amrr->ht_mcs_idx = 0; /* 6Mbps */
5017 }
5018
5019 static void
iwh_amrr_timeout(iwh_sc_t * sc)5020 iwh_amrr_timeout(iwh_sc_t *sc)
5021 {
5022 ieee80211com_t *ic = &sc->sc_ic;
5023
5024 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_timeout(): "
5025 "enter\n"));
5026
5027 if (IEEE80211_M_STA == ic->ic_opmode) {
5028 iwh_amrr_ratectl(NULL, ic->ic_bss);
5029 } else {
5030 ieee80211_iterate_nodes(&ic->ic_sta, iwh_amrr_ratectl, NULL);
5031 }
5032
5033 sc->sc_clk = ddi_get_lbolt();
5034 }
5035
5036 static int
iwh_is_max_rate(ieee80211_node_t * in)5037 iwh_is_max_rate(ieee80211_node_t *in)
5038 {
5039 int i;
5040 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5041 uint8_t r = (uint8_t)amrr->ht_mcs_idx;
5042 ieee80211com_t *ic = in->in_ic;
5043 iwh_sc_t *sc = (iwh_sc_t *)ic;
5044
5045 if (in->in_flags & IEEE80211_NODE_HT) {
5046 for (i = in->in_htrates.rs_nrates - 1; i >= 0; i--) {
5047 r = in->in_htrates.rs_rates[i] &
5048 IEEE80211_RATE_VAL;
5049 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5050 (1 << (r%8))) {
5051 break;
5052 }
5053 }
5054
5055 return (r == (uint8_t)amrr->ht_mcs_idx);
5056 } else {
5057 return (is_max_rate(in));
5058 }
5059 }
5060
5061 static int
iwh_is_min_rate(ieee80211_node_t * in)5062 iwh_is_min_rate(ieee80211_node_t *in)
5063 {
5064 int i;
5065 uint8_t r = 0;
5066 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5067 ieee80211com_t *ic = in->in_ic;
5068 iwh_sc_t *sc = (iwh_sc_t *)ic;
5069
5070 if (in->in_flags & IEEE80211_NODE_HT) {
5071 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5072 r = in->in_htrates.rs_rates[i] &
5073 IEEE80211_RATE_VAL;
5074 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5075 (1 << (r%8))) {
5076 break;
5077 }
5078 }
5079
5080 return (r == (uint8_t)amrr->ht_mcs_idx);
5081 } else {
5082 return (is_min_rate(in));
5083 }
5084 }
5085
5086 static void
iwh_increase_rate(ieee80211_node_t * in)5087 iwh_increase_rate(ieee80211_node_t *in)
5088 {
5089 int i;
5090 uint8_t r;
5091 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5092 ieee80211com_t *ic = in->in_ic;
5093 iwh_sc_t *sc = (iwh_sc_t *)ic;
5094
5095 if (in->in_flags & IEEE80211_NODE_HT) {
5096 again:
5097 amrr->ht_mcs_idx++;
5098
5099 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5100 r = in->in_htrates.rs_rates[i] &
5101 IEEE80211_RATE_VAL;
5102 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5103 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5104 (1 << (r%8)))) {
5105 break;
5106 }
5107 }
5108
5109 if (i >= in->in_htrates.rs_nrates) {
5110 goto again;
5111 }
5112 } else {
5113 increase_rate(in);
5114 }
5115 }
5116
5117 static void
iwh_decrease_rate(ieee80211_node_t * in)5118 iwh_decrease_rate(ieee80211_node_t *in)
5119 {
5120 int i;
5121 uint8_t r;
5122 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5123 ieee80211com_t *ic = in->in_ic;
5124 iwh_sc_t *sc = (iwh_sc_t *)ic;
5125
5126 if (in->in_flags & IEEE80211_NODE_HT) {
5127 again:
5128 amrr->ht_mcs_idx--;
5129
5130 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5131 r = in->in_htrates.rs_rates[i] &
5132 IEEE80211_RATE_VAL;
5133 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5134 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5135 (1 << (r%8)))) {
5136 break;
5137 }
5138 }
5139
5140 if (i >= in->in_htrates.rs_nrates) {
5141 goto again;
5142 }
5143 } else {
5144 decrease_rate(in);
5145 }
5146 }
5147
5148 /* ARGSUSED */
5149 static void
iwh_amrr_ratectl(void * arg,ieee80211_node_t * in)5150 iwh_amrr_ratectl(void *arg, ieee80211_node_t *in)
5151 {
5152 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5153 int need_change = 0;
5154
5155 if (is_success(amrr) && is_enough(amrr)) {
5156 amrr->success++;
5157 if (amrr->success >= amrr->success_threshold &&
5158 !iwh_is_max_rate(in)) {
5159 amrr->recovery = 1;
5160 amrr->success = 0;
5161 iwh_increase_rate(in);
5162 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5163 "AMRR increasing rate %d "
5164 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5165 in->in_txrate, amrr->txcnt,
5166 amrr->retrycnt, amrr->ht_mcs_idx));
5167 need_change = 1;
5168 } else {
5169 amrr->recovery = 0;
5170 }
5171 } else if (not_very_few(amrr) && is_failure(amrr)) {
5172 amrr->success = 0;
5173 if (!iwh_is_min_rate(in)) {
5174 if (amrr->recovery) {
5175 amrr->success_threshold++;
5176 if (amrr->success_threshold >
5177 IWH_AMRR_MAX_SUCCESS_THRESHOLD) {
5178 amrr->success_threshold =
5179 IWH_AMRR_MAX_SUCCESS_THRESHOLD;
5180 }
5181 } else {
5182 amrr->success_threshold =
5183 IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5184 }
5185 iwh_decrease_rate(in);
5186 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5187 "AMRR decreasing rate %d "
5188 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5189 in->in_txrate, amrr->txcnt,
5190 amrr->retrycnt, amrr->ht_mcs_idx));
5191 need_change = 1;
5192 }
5193 amrr->recovery = 0; /* paper is incorrect */
5194 }
5195
5196 if (is_enough(amrr) || need_change) {
5197 reset_cnt(amrr);
5198 }
5199 }
5200
5201 /*
5202 * translate indirect address in eeprom to direct address
5203 * in eeprom and return address of entry whos indirect address
5204 * is indi_addr
5205 */
5206 static uint8_t *
iwh_eep_addr_trans(iwh_sc_t * sc,uint32_t indi_addr)5207 iwh_eep_addr_trans(iwh_sc_t *sc, uint32_t indi_addr)
5208 {
5209 uint32_t di_addr;
5210 uint16_t temp;
5211
5212 if (!(indi_addr & INDIRECT_ADDRESS)) {
5213 di_addr = indi_addr;
5214 return (&sc->sc_eep_map[di_addr]);
5215 }
5216
5217 switch (indi_addr & INDIRECT_TYPE_MSK) {
5218 case INDIRECT_GENERAL:
5219 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
5220 break;
5221
5222 case INDIRECT_HOST:
5223 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_HOST);
5224 break;
5225
5226 case INDIRECT_REGULATORY:
5227 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
5228 break;
5229
5230 case INDIRECT_CALIBRATION:
5231 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
5232 break;
5233
5234 case INDIRECT_PROCESS_ADJST:
5235 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
5236 break;
5237
5238 case INDIRECT_OTHERS:
5239 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
5240 break;
5241
5242 default:
5243 temp = 0;
5244 cmn_err(CE_WARN, "iwh_eep_addr_trans(): "
5245 "incorrect indirect eeprom address.\n");
5246 break;
5247 }
5248
5249 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
5250
5251 return (&sc->sc_eep_map[di_addr]);
5252 }
5253
5254 /*
5255 * loade a section of ucode into NIC
5256 */
5257 static int
iwh_put_seg_fw(iwh_sc_t * sc,uint32_t addr_s,uint32_t addr_d,uint32_t len)5258 iwh_put_seg_fw(iwh_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
5259 {
5260
5261 iwh_mac_access_enter(sc);
5262
5263 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5264 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
5265
5266 IWH_WRITE(sc, IWH_FH_SRVC_CHNL_SRAM_ADDR_REG(IWH_FH_SRVC_CHNL), addr_d);
5267
5268 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL0_REG(IWH_FH_SRVC_CHNL),
5269 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
5270
5271 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL1_REG(IWH_FH_SRVC_CHNL), len);
5272
5273 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_BUF_STS_REG(IWH_FH_SRVC_CHNL),
5274 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
5275 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
5276 IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
5277
5278 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5279 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5280 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
5281 IWH_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
5282
5283 iwh_mac_access_exit(sc);
5284
5285 return (IWH_SUCCESS);
5286 }
5287
5288 /*
5289 * necessary setting during alive notification
5290 */
5291 static int
iwh_alive_common(iwh_sc_t * sc)5292 iwh_alive_common(iwh_sc_t *sc)
5293 {
5294 uint32_t base;
5295 uint32_t i;
5296 iwh_wimax_coex_cmd_t w_cmd;
5297 iwh_calibration_crystal_cmd_t c_cmd;
5298 uint32_t rv = IWH_FAIL;
5299
5300 /*
5301 * initialize SCD related registers to make TX work.
5302 */
5303 iwh_mac_access_enter(sc);
5304
5305 /*
5306 * read sram address of data base.
5307 */
5308 sc->sc_scd_base = iwh_reg_read(sc, IWH_SCD_SRAM_BASE_ADDR);
5309
5310 for (base = sc->sc_scd_base + IWH_SCD_CONTEXT_DATA_OFFSET;
5311 base < sc->sc_scd_base + IWH_SCD_TX_STTS_BITMAP_OFFSET;
5312 base += 4) {
5313 iwh_mem_write(sc, base, 0);
5314 }
5315
5316 for (; base < sc->sc_scd_base + IWH_SCD_TRANSLATE_TBL_OFFSET;
5317 base += 4) {
5318 iwh_mem_write(sc, base, 0);
5319 }
5320
5321 for (i = 0; i < sizeof (uint16_t) * IWH_NUM_QUEUES; i += 4) {
5322 iwh_mem_write(sc, base + i, 0);
5323 }
5324
5325 iwh_reg_write(sc, IWH_SCD_DRAM_BASE_ADDR,
5326 sc->sc_dma_sh.cookie.dmac_address >> 10);
5327
5328 iwh_reg_write(sc, IWH_SCD_QUEUECHAIN_SEL,
5329 IWH_SCD_QUEUECHAIN_SEL_ALL(IWH_NUM_QUEUES));
5330
5331 iwh_reg_write(sc, IWH_SCD_AGGR_SEL, 0);
5332
5333 for (i = 0; i < IWH_NUM_QUEUES; i++) {
5334 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(i), 0);
5335 IWH_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
5336 iwh_mem_write(sc, sc->sc_scd_base +
5337 IWH_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
5338 iwh_mem_write(sc, sc->sc_scd_base +
5339 IWH_SCD_CONTEXT_QUEUE_OFFSET(i) +
5340 sizeof (uint32_t),
5341 ((SCD_WIN_SIZE << IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
5342 IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
5343 ((SCD_FRAME_LIMIT <<
5344 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5345 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
5346 }
5347
5348 iwh_reg_write(sc, IWH_SCD_INTERRUPT_MASK, (1 << IWH_NUM_QUEUES) - 1);
5349
5350 iwh_reg_write(sc, (IWH_SCD_BASE + 0x10),
5351 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
5352
5353 IWH_WRITE(sc, HBUS_TARG_WRPTR, (IWH_CMD_QUEUE_NUM << 8));
5354 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(IWH_CMD_QUEUE_NUM), 0);
5355
5356 /*
5357 * queue 0-7 map to FIFO 0-7 and
5358 * all queues work under FIFO mode(none-scheduler_ack)
5359 */
5360 for (i = 0; i < 4; i++) {
5361 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5362 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5363 ((3-i) << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5364 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5365 IWH_SCD_QUEUE_STTS_REG_MSK);
5366 }
5367
5368 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(IWH_CMD_QUEUE_NUM),
5369 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5370 (IWH_CMD_FIFO_NUM << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5371 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5372 IWH_SCD_QUEUE_STTS_REG_MSK);
5373
5374 for (i = 5; i < 7; i++) {
5375 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5376 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5377 (i << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5378 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5379 IWH_SCD_QUEUE_STTS_REG_MSK);
5380 }
5381
5382 iwh_mac_access_exit(sc);
5383
5384 (void) memset(&w_cmd, 0, sizeof (w_cmd));
5385
5386 rv = iwh_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5387 if (rv != IWH_SUCCESS) {
5388 cmn_err(CE_WARN, "iwh_alive_common(): "
5389 "failed to send wimax coexist command.\n");
5390 return (rv);
5391 }
5392
5393 if ((sc->sc_dev_id != 0x423c) &&
5394 (sc->sc_dev_id != 0x423d)) {
5395 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5396
5397 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5398 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5399 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5400
5401 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5402 &c_cmd, sizeof (c_cmd), 1);
5403 if (rv != IWH_SUCCESS) {
5404 cmn_err(CE_WARN, "iwh_alive_common(): "
5405 "failed to send crystal"
5406 "frq calibration command.\n");
5407 return (rv);
5408 }
5409
5410 /*
5411 * make sure crystal frequency calibration ready
5412 * before next operations.
5413 */
5414 DELAY(1000);
5415 }
5416
5417 return (IWH_SUCCESS);
5418 }
5419
5420 /*
5421 * save results of calibration from ucode
5422 */
5423 static void
iwh_save_calib_result(iwh_sc_t * sc,iwh_rx_desc_t * desc)5424 iwh_save_calib_result(iwh_sc_t *sc, iwh_rx_desc_t *desc)
5425 {
5426 struct iwh_calib_results *res_p = &sc->sc_calib_results;
5427 struct iwh_calib_hdr *calib_hdr = (struct iwh_calib_hdr *)(desc + 1);
5428 int len = LE_32(desc->len);
5429
5430 /*
5431 * ensure the size of buffer is not too big
5432 */
5433 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5434
5435 switch (calib_hdr->op_code) {
5436 case PHY_CALIBRATE_LO_CMD:
5437 if (NULL == res_p->lo_res) {
5438 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5439 }
5440
5441 if (NULL == res_p->lo_res) {
5442 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5443 "failed to allocate memory.\n");
5444 return;
5445 }
5446
5447 res_p->lo_res_len = len;
5448 bcopy(calib_hdr, res_p->lo_res, len);
5449 break;
5450
5451 case PHY_CALIBRATE_TX_IQ_CMD:
5452 if (NULL == res_p->tx_iq_res) {
5453 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5454 }
5455
5456 if (NULL == res_p->tx_iq_res) {
5457 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5458 "failed to allocate memory.\n");
5459 return;
5460 }
5461
5462 res_p->tx_iq_res_len = len;
5463 bcopy(calib_hdr, res_p->tx_iq_res, len);
5464 break;
5465
5466 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5467 if (NULL == res_p->tx_iq_perd_res) {
5468 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5469 }
5470
5471 if (NULL == res_p->tx_iq_perd_res) {
5472 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5473 "failed to allocate memory.\n");
5474 return;
5475 }
5476
5477 res_p->tx_iq_perd_res_len = len;
5478 bcopy(calib_hdr, res_p->tx_iq_perd_res, len);
5479 break;
5480
5481 case PHY_CALIBRATE_DC_CMD:
5482 if (NULL == res_p->dc_res) {
5483 res_p->dc_res = kmem_alloc(len, KM_NOSLEEP);
5484 }
5485
5486 if (NULL == res_p->dc_res) {
5487 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5488 "failed to allocate memory.\n");
5489 return;
5490 }
5491
5492 res_p->dc_res_len = len;
5493 bcopy(calib_hdr, res_p->dc_res, len);
5494 break;
5495
5496 case PHY_CALIBRATE_BASE_BAND_CMD:
5497 if (NULL == res_p->base_band_res) {
5498 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5499 }
5500
5501 if (NULL == res_p->base_band_res) {
5502 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5503 "failed to allocate memory.\n");
5504 return;
5505 }
5506
5507 res_p->base_band_res_len = len;
5508 bcopy(calib_hdr, res_p->base_band_res, len);
5509 break;
5510
5511 default:
5512 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5513 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5514 break;
5515 }
5516
5517 }
5518
5519 /*
5520 * configure TX pwoer table
5521 */
5522 static int
iwh_tx_power_table(iwh_sc_t * sc,int async)5523 iwh_tx_power_table(iwh_sc_t *sc, int async)
5524 {
5525 iwh_tx_power_table_cmd_t txpower;
5526 int i, err = IWH_FAIL;
5527
5528 (void) memset(&txpower, 0, sizeof (txpower));
5529
5530 txpower.band = 1; /* for 2.4G */
5531 txpower.channel = (uint8_t)LE_16(sc->sc_config.chan);
5532 txpower.pa_measurements = 1;
5533 txpower.max_mcs = 23;
5534
5535 for (i = 0; i < 24; i++) {
5536 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[0] = 0x16;
5537 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[1] = 0x16;
5538 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[2] = 0x16;
5539 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[0] = 0x6E;
5540 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[1] = 0x6E;
5541 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[2] = 0x6E;
5542 }
5543
5544 for (i = 0; i < 2; i++) {
5545 txpower.db.cck_power[i].s.radio_tx_gain[0] = 0x16;
5546 txpower.db.cck_power[i].s.radio_tx_gain[1] = 0x16;
5547 txpower.db.cck_power[i].s.radio_tx_gain[2] = 0x16;
5548 txpower.db.cck_power[i].s.dsp_predis_atten[0] = 0x6E;
5549 txpower.db.cck_power[i].s.dsp_predis_atten[1] = 0x6E;
5550 txpower.db.cck_power[i].s.dsp_predis_atten[2] = 0x6E;
5551 }
5552
5553 err = iwh_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
5554 sizeof (txpower), async);
5555 if (err != IWH_SUCCESS) {
5556 cmn_err(CE_WARN, "iwh_tx_power_table(): "
5557 "failed to set tx power table.\n");
5558 return (err);
5559 }
5560
5561 return (err);
5562 }
5563
5564 static void
iwh_release_calib_buffer(iwh_sc_t * sc)5565 iwh_release_calib_buffer(iwh_sc_t *sc)
5566 {
5567 if (sc->sc_calib_results.lo_res != NULL) {
5568 kmem_free(sc->sc_calib_results.lo_res,
5569 sc->sc_calib_results.lo_res_len);
5570 sc->sc_calib_results.lo_res = NULL;
5571 }
5572
5573 if (sc->sc_calib_results.tx_iq_res != NULL) {
5574 kmem_free(sc->sc_calib_results.tx_iq_res,
5575 sc->sc_calib_results.tx_iq_res_len);
5576 sc->sc_calib_results.tx_iq_res = NULL;
5577 }
5578
5579 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5580 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5581 sc->sc_calib_results.tx_iq_perd_res_len);
5582 sc->sc_calib_results.tx_iq_perd_res = NULL;
5583 }
5584
5585 if (sc->sc_calib_results.dc_res != NULL) {
5586 kmem_free(sc->sc_calib_results.dc_res,
5587 sc->sc_calib_results.dc_res_len);
5588 sc->sc_calib_results.dc_res = NULL;
5589 }
5590
5591 if (sc->sc_calib_results.base_band_res != NULL) {
5592 kmem_free(sc->sc_calib_results.base_band_res,
5593 sc->sc_calib_results.base_band_res_len);
5594 sc->sc_calib_results.base_band_res = NULL;
5595 }
5596 }
5597
5598 /*
5599 * common section of intialization
5600 */
5601 static int
iwh_init_common(iwh_sc_t * sc)5602 iwh_init_common(iwh_sc_t *sc)
5603 {
5604 int32_t qid;
5605 uint32_t tmp;
5606
5607 if (iwh_reset_hw(sc) != IWH_SUCCESS) {
5608 cmn_err(CE_WARN, "iwh_init_common(): "
5609 "failed to reset hardware\n");
5610 return (IWH_FAIL);
5611 }
5612
5613 (void) iwh_preinit(sc);
5614
5615 tmp = IWH_READ(sc, CSR_GP_CNTRL);
5616 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5617 cmn_err(CE_NOTE, "iwh_init_common(): "
5618 "radio transmitter is off\n");
5619 return (IWH_FAIL);
5620 }
5621
5622 /*
5623 * init Rx ring
5624 */
5625 iwh_mac_access_enter(sc);
5626 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5627
5628 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5629 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5630 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5631
5632 IWH_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5633 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5634 offsetof(struct iwh_shared, val0)) >> 4));
5635
5636 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5637 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5638 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5639 IWH_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K |
5640 (RX_QUEUE_SIZE_LOG <<
5641 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5642 iwh_mac_access_exit(sc);
5643 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5644 (RX_QUEUE_SIZE - 1) & ~0x7);
5645
5646 /*
5647 * init Tx rings
5648 */
5649 iwh_mac_access_enter(sc);
5650 iwh_reg_write(sc, IWH_SCD_TXFACT, 0);
5651
5652 /*
5653 * keep warm page
5654 */
5655 IWH_WRITE(sc, IWH_FH_KW_MEM_ADDR_REG,
5656 sc->sc_dma_kw.cookie.dmac_address >> 4);
5657
5658 for (qid = 0; qid < IWH_NUM_QUEUES; qid++) {
5659 IWH_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5660 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5661 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5662 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5663 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5664 }
5665
5666 iwh_mac_access_exit(sc);
5667
5668 /*
5669 * clear "radio off" and "disable command" bits
5670 */
5671 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5672 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5673 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5674
5675 /*
5676 * clear any pending interrupts
5677 */
5678 IWH_WRITE(sc, CSR_INT, 0xffffffff);
5679
5680 /*
5681 * enable interrupts
5682 */
5683 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5684
5685 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5686 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5687
5688 return (IWH_SUCCESS);
5689 }
5690
5691 static int
iwh_fast_recover(iwh_sc_t * sc)5692 iwh_fast_recover(iwh_sc_t *sc)
5693 {
5694 ieee80211com_t *ic = &sc->sc_ic;
5695 int err = IWH_FAIL;
5696
5697 mutex_enter(&sc->sc_glock);
5698
5699 /*
5700 * restore runtime configuration
5701 */
5702 bcopy(&sc->sc_config_save, &sc->sc_config,
5703 sizeof (sc->sc_config));
5704
5705 sc->sc_config.assoc_id = 0;
5706 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5707
5708 if ((err = iwh_hw_set_before_auth(sc)) != IWH_SUCCESS) {
5709 cmn_err(CE_WARN, "iwh_fast_recover(): "
5710 "could not setup authentication\n");
5711 mutex_exit(&sc->sc_glock);
5712 return (err);
5713 }
5714
5715 bcopy(&sc->sc_config_save, &sc->sc_config,
5716 sizeof (sc->sc_config));
5717
5718 /*
5719 * update adapter's configuration
5720 */
5721 err = iwh_run_state_config(sc);
5722 if (err != IWH_SUCCESS) {
5723 cmn_err(CE_WARN, "iwh_fast_recover(): "
5724 "failed to setup association\n");
5725 mutex_exit(&sc->sc_glock);
5726 return (err);
5727 }
5728
5729 /*
5730 * set LED on
5731 */
5732 iwh_set_led(sc, 2, 0, 1);
5733
5734 mutex_exit(&sc->sc_glock);
5735
5736 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
5737
5738 /*
5739 * start queue
5740 */
5741 IWH_DBG((IWH_DEBUG_FW, "iwh_fast_recover(): "
5742 "resume xmit\n"));
5743 mac_tx_update(ic->ic_mach);
5744
5745 return (IWH_SUCCESS);
5746 }
5747
5748 static int
iwh_run_state_config(iwh_sc_t * sc)5749 iwh_run_state_config(iwh_sc_t *sc)
5750 {
5751 struct ieee80211com *ic = &sc->sc_ic;
5752 ieee80211_node_t *in = ic->ic_bss;
5753 uint32_t ht_protec = (uint32_t)(-1);
5754 int err = IWH_FAIL;
5755
5756 /*
5757 * update adapter's configuration
5758 */
5759 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5760
5761 /*
5762 * short preamble/slot time are
5763 * negotiated when associating
5764 */
5765 sc->sc_config.flags &=
5766 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5767 RXON_FLG_SHORT_SLOT_MSK);
5768
5769 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5770 sc->sc_config.flags |=
5771 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5772 }
5773
5774 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5775 sc->sc_config.flags |=
5776 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5777 }
5778
5779 if (in->in_flags & IEEE80211_NODE_HT) {
5780 ht_protec = in->in_htopmode;
5781 if (ht_protec > 3) {
5782 cmn_err(CE_WARN, "iwh_run_state_config(): "
5783 "HT protection mode is not correct.\n");
5784 return (IWH_FAIL);
5785 } else if (NO_HT_PROT == ht_protec) {
5786 ht_protec = sc->sc_ht_conf.ht_protection;
5787 }
5788
5789 sc->sc_config.flags |=
5790 LE_32(ht_protec << RXON_FLG_HT_OPERATING_MODE_POS);
5791 }
5792
5793 /*
5794 * set RX chains/antennas.
5795 */
5796 iwh_config_rxon_chain(sc);
5797
5798 sc->sc_config.filter_flags |=
5799 LE_32(RXON_FILTER_ASSOC_MSK);
5800
5801 if (ic->ic_opmode != IEEE80211_M_STA) {
5802 sc->sc_config.filter_flags |=
5803 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5804 }
5805
5806 IWH_DBG((IWH_DEBUG_80211, "iwh_run_state_config(): "
5807 "config chan %d flags %x"
5808 " filter_flags %x\n",
5809 sc->sc_config.chan, sc->sc_config.flags,
5810 sc->sc_config.filter_flags));
5811
5812 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
5813 sizeof (iwh_rxon_cmd_t), 1);
5814 if (err != IWH_SUCCESS) {
5815 cmn_err(CE_WARN, "iwh_run_state_config(): "
5816 "could not update configuration\n");
5817 return (err);
5818 }
5819
5820 if ((sc->sc_dev_id != 0x423c) &&
5821 (sc->sc_dev_id != 0x423d)) {
5822 /*
5823 * send tx power table command
5824 */
5825 err = iwh_tx_power_table(sc, 1);
5826 if (err != IWH_SUCCESS) {
5827 return (err);
5828 }
5829 }
5830
5831 /*
5832 * Not need to update retry rate table for AP node
5833 */
5834 err = iwh_qosparam_to_hw(sc, 1);
5835 if (err != IWH_SUCCESS) {
5836 return (err);
5837 }
5838
5839 return (err);
5840 }
5841
5842 /*
5843 * This function is only for compatibility with Net80211 module.
5844 * iwh_qosparam_to_hw() is the actual function updating EDCA
5845 * parameters to hardware.
5846 */
5847 /* ARGSUSED */
5848 static int
iwh_wme_update(ieee80211com_t * ic)5849 iwh_wme_update(ieee80211com_t *ic)
5850 {
5851 return (0);
5852 }
5853
5854 static int
iwh_wme_to_qos_ac(int wme_ac)5855 iwh_wme_to_qos_ac(int wme_ac)
5856 {
5857 int qos_ac = QOS_AC_INVALID;
5858
5859 if (wme_ac < WME_AC_BE || wme_ac > WME_AC_VO) {
5860 cmn_err(CE_WARN, "iwh_wme_to_qos_ac(): "
5861 "WME AC index is not in suitable range.\n");
5862 return (qos_ac);
5863 }
5864
5865 switch (wme_ac) {
5866 case WME_AC_BE:
5867 qos_ac = QOS_AC_BK;
5868 break;
5869 case WME_AC_BK:
5870 qos_ac = QOS_AC_BE;
5871 break;
5872 case WME_AC_VI:
5873 qos_ac = QOS_AC_VI;
5874 break;
5875 case WME_AC_VO:
5876 qos_ac = QOS_AC_VO;
5877 break;
5878 }
5879
5880 return (qos_ac);
5881 }
5882
5883 static uint16_t
iwh_cw_e_to_cw(uint8_t cw_e)5884 iwh_cw_e_to_cw(uint8_t cw_e)
5885 {
5886 uint16_t cw = 1;
5887
5888 while (cw_e > 0) {
5889 cw <<= 1;
5890 cw_e--;
5891 }
5892
5893 cw -= 1;
5894 return (cw);
5895 }
5896
5897 static int
iwh_wmeparam_check(struct wmeParams * wmeparam)5898 iwh_wmeparam_check(struct wmeParams *wmeparam)
5899 {
5900 int i;
5901
5902 for (i = 0; i < WME_NUM_AC; i++) {
5903
5904 if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
5905 (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
5906 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5907 "Contention window is not in suitable range.\n");
5908 return (IWH_FAIL);
5909 }
5910
5911 if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
5912 (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
5913 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5914 "Arbitration interframe space number"
5915 "is not in suitable range.\n");
5916 return (IWH_FAIL);
5917 }
5918 }
5919
5920 return (IWH_SUCCESS);
5921 }
5922
5923 /*
5924 * This function updates EDCA parameters into hardware.
5925 * FIFO0-background, FIFO1-best effort, FIFO2-viedo, FIFO3-voice.
5926 */
5927 static int
iwh_qosparam_to_hw(iwh_sc_t * sc,int async)5928 iwh_qosparam_to_hw(iwh_sc_t *sc, int async)
5929 {
5930 ieee80211com_t *ic = &sc->sc_ic;
5931 ieee80211_node_t *in = ic->ic_bss;
5932 struct wmeParams *wmeparam;
5933 iwh_qos_param_cmd_t qosparam_cmd;
5934 int i, j;
5935 int err = IWH_FAIL;
5936
5937 if ((in->in_flags & IEEE80211_NODE_QOS) &&
5938 (IEEE80211_M_STA == ic->ic_opmode)) {
5939 wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
5940 } else {
5941 return (IWH_SUCCESS);
5942 }
5943
5944 (void) memset(&qosparam_cmd, 0, sizeof (qosparam_cmd));
5945
5946 err = iwh_wmeparam_check(wmeparam);
5947 if (err != IWH_SUCCESS) {
5948 return (err);
5949 }
5950
5951 if (in->in_flags & IEEE80211_NODE_QOS) {
5952 qosparam_cmd.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
5953 }
5954
5955 if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
5956 qosparam_cmd.flags |= QOS_PARAM_FLG_TGN;
5957 }
5958
5959 for (i = 0; i < WME_NUM_AC; i++) {
5960
5961 j = iwh_wme_to_qos_ac(i);
5962 if (j < QOS_AC_BK || j > QOS_AC_VO) {
5963 return (IWH_FAIL);
5964 }
5965
5966 qosparam_cmd.ac[j].cw_min =
5967 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
5968 qosparam_cmd.ac[j].cw_max =
5969 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
5970 qosparam_cmd.ac[j].aifsn =
5971 wmeparam[i].wmep_aifsn;
5972 qosparam_cmd.ac[j].txop =
5973 (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
5974 }
5975
5976 err = iwh_cmd(sc, REPLY_QOS_PARAM, &qosparam_cmd,
5977 sizeof (qosparam_cmd), async);
5978 if (err != IWH_SUCCESS) {
5979 cmn_err(CE_WARN, "iwh_qosparam_to_hw(): "
5980 "failed to update QoS parameters into hardware.\n");
5981 return (err);
5982 }
5983
5984 #ifdef DEBUG
5985 IWH_DBG((IWH_DEBUG_QOS, "iwh_qosparam_to_hw(): "
5986 "EDCA parameters are as follows:\n"));
5987
5988 IWH_DBG((IWH_DEBUG_QOS, "BK parameters are: "
5989 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5990 qosparam_cmd.ac[0].cw_min, qosparam_cmd.ac[0].cw_max,
5991 qosparam_cmd.ac[0].aifsn, qosparam_cmd.ac[0].txop));
5992
5993 IWH_DBG((IWH_DEBUG_QOS, "BE parameters are: "
5994 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5995 qosparam_cmd.ac[1].cw_min, qosparam_cmd.ac[1].cw_max,
5996 qosparam_cmd.ac[1].aifsn, qosparam_cmd.ac[1].txop));
5997
5998 IWH_DBG((IWH_DEBUG_QOS, "VI parameters are: "
5999 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
6000 qosparam_cmd.ac[2].cw_min, qosparam_cmd.ac[2].cw_max,
6001 qosparam_cmd.ac[2].aifsn, qosparam_cmd.ac[2].txop));
6002
6003 IWH_DBG((IWH_DEBUG_QOS, "VO parameters are: "
6004 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
6005 qosparam_cmd.ac[3].cw_min, qosparam_cmd.ac[3].cw_max,
6006 qosparam_cmd.ac[3].aifsn, qosparam_cmd.ac[3].txop));
6007 #endif
6008 return (err);
6009 }
6010
6011 static inline int
iwh_wme_tid_qos_ac(int tid)6012 iwh_wme_tid_qos_ac(int tid)
6013 {
6014 switch (tid) {
6015 case 1:
6016 case 2:
6017 return (QOS_AC_BK);
6018 case 0:
6019 case 3:
6020 return (QOS_AC_BE);
6021 case 4:
6022 case 5:
6023 return (QOS_AC_VI);
6024 case 6:
6025 case 7:
6026 return (QOS_AC_VO);
6027 }
6028
6029 return (QOS_AC_BE);
6030 }
6031
6032 static inline int
iwh_qos_ac_to_txq(int qos_ac)6033 iwh_qos_ac_to_txq(int qos_ac)
6034 {
6035 switch (qos_ac) {
6036 case QOS_AC_BK:
6037 return (QOS_AC_BK_TO_TXQ);
6038 case QOS_AC_BE:
6039 return (QOS_AC_BE_TO_TXQ);
6040 case QOS_AC_VI:
6041 return (QOS_AC_VI_TO_TXQ);
6042 case QOS_AC_VO:
6043 return (QOS_AC_VO_TO_TXQ);
6044 }
6045
6046 return (QOS_AC_BE_TO_TXQ);
6047 }
6048
6049 static int
iwh_wme_tid_to_txq(int tid)6050 iwh_wme_tid_to_txq(int tid)
6051 {
6052 int queue_n = TXQ_FOR_AC_INVALID;
6053 int qos_ac;
6054
6055 if (tid < WME_TID_MIN ||
6056 tid > WME_TID_MAX) {
6057 cmn_err(CE_WARN, "wme_tid_to_txq(): "
6058 "TID is not in suitable range.\n");
6059 return (queue_n);
6060 }
6061
6062 qos_ac = iwh_wme_tid_qos_ac(tid);
6063 queue_n = iwh_qos_ac_to_txq(qos_ac);
6064
6065 return (queue_n);
6066 }
6067
6068 /*
6069 * This function is used for intializing HT relevant configurations.
6070 */
6071 static void
iwh_init_ht_conf(iwh_sc_t * sc)6072 iwh_init_ht_conf(iwh_sc_t *sc)
6073 {
6074 (void) memset(&sc->sc_ht_conf, 0, sizeof (iwh_ht_conf_t));
6075
6076 if ((0x4235 == sc->sc_dev_id) ||
6077 (0x4236 == sc->sc_dev_id) ||
6078 (0x423a == sc->sc_dev_id)) {
6079 sc->sc_ht_conf.ht_support = 1;
6080
6081 sc->sc_ht_conf.valid_chains = 3;
6082 sc->sc_ht_conf.tx_stream_count = 2;
6083 sc->sc_ht_conf.rx_stream_count = 2;
6084
6085 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6086 sc->sc_ht_conf.tx_support_mcs[1] = 0xff;
6087 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6088 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6089 } else {
6090 sc->sc_ht_conf.ht_support = 1;
6091
6092 sc->sc_ht_conf.valid_chains = 2;
6093 sc->sc_ht_conf.tx_stream_count = 1;
6094 sc->sc_ht_conf.rx_stream_count = 2;
6095
6096 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6097 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6098 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6099 }
6100
6101 if (sc->sc_ht_conf.ht_support) {
6102 sc->sc_ht_conf.cap |= HT_CAP_GRN_FLD;
6103 sc->sc_ht_conf.cap |= HT_CAP_SGI_20;
6104 sc->sc_ht_conf.cap |= HT_CAP_MAX_AMSDU;
6105 /* should disable MIMO */
6106 sc->sc_ht_conf.cap |= HT_CAP_MIMO_PS;
6107
6108 sc->sc_ht_conf.ampdu_p.factor = HT_RX_AMPDU_FACTOR;
6109 sc->sc_ht_conf.ampdu_p.density = HT_MPDU_DENSITY;
6110
6111 sc->sc_ht_conf.ht_protection = HT_PROT_CHAN_NON_HT;
6112 }
6113 }
6114
6115 /*
6116 * This function overwrites default ieee80211_rateset_11n struc.
6117 */
6118 static void
iwh_overwrite_11n_rateset(iwh_sc_t * sc)6119 iwh_overwrite_11n_rateset(iwh_sc_t *sc)
6120 {
6121 uint8_t *ht_rs = sc->sc_ht_conf.rx_support_mcs;
6122 int mcs_idx, mcs_count = 0;
6123 int i, j;
6124
6125 for (i = 0; i < HT_RATESET_NUM; i++) {
6126 for (j = 0; j < 8; j++) {
6127 if (ht_rs[i] & (1 << j)) {
6128 mcs_idx = i * 8 + j;
6129 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
6130 break;
6131 }
6132
6133 ieee80211_rateset_11n.rs_rates[mcs_idx] =
6134 (uint8_t)mcs_idx;
6135 mcs_count++;
6136 }
6137 }
6138 }
6139
6140 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
6141
6142 #ifdef DEBUG
6143 IWH_DBG((IWH_DEBUG_HTRATE, "iwh_overwrite_11n_rateset(): "
6144 "HT rates supported by this station is as follows:\n"));
6145
6146 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
6147 IWH_DBG((IWH_DEBUG_HTRATE, "Rate %d is %d\n",
6148 i, ieee80211_rateset_11n.rs_rates[i]));
6149 }
6150 #endif
6151 }
6152
6153 /*
6154 * This function overwrites default configurations of
6155 * ieee80211com structure in Net80211 module.
6156 */
6157 static void
iwh_overwrite_ic_default(iwh_sc_t * sc)6158 iwh_overwrite_ic_default(iwh_sc_t *sc)
6159 {
6160 ieee80211com_t *ic = &sc->sc_ic;
6161
6162 sc->sc_newstate = ic->ic_newstate;
6163 ic->ic_newstate = iwh_newstate;
6164 ic->ic_node_alloc = iwh_node_alloc;
6165 ic->ic_node_free = iwh_node_free;
6166
6167 if (sc->sc_ht_conf.ht_support) {
6168 sc->sc_recv_action = ic->ic_recv_action;
6169 ic->ic_recv_action = iwh_recv_action;
6170 sc->sc_send_action = ic->ic_send_action;
6171 ic->ic_send_action = iwh_send_action;
6172
6173 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_p.factor;
6174 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_p.density;
6175 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
6176 }
6177 }
6178
6179 /*
6180 * This function sets "RX chain selection" feild
6181 * in RXON command during plumb driver.
6182 */
6183 static void
iwh_config_rxon_chain(iwh_sc_t * sc)6184 iwh_config_rxon_chain(iwh_sc_t *sc)
6185 {
6186 ieee80211com_t *ic = &sc->sc_ic;
6187 ieee80211_node_t *in = ic->ic_bss;
6188
6189 if (3 == sc->sc_ht_conf.valid_chains) {
6190 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6191 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6192 RXON_RX_CHAIN_VALID_POS);
6193
6194 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6195 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6196 RXON_RX_CHAIN_FORCE_SEL_POS);
6197
6198 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6199 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6200 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6201 } else {
6202 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6203 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_VALID_POS);
6204
6205 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6206 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_FORCE_SEL_POS);
6207
6208 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6209 RXON_RX_CHAIN_B_MSK) <<
6210 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6211 }
6212
6213 sc->sc_config.rx_chain |= LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK);
6214
6215 if ((in != NULL) &&
6216 (in->in_flags & IEEE80211_NODE_HT) &&
6217 sc->sc_ht_conf.ht_support) {
6218 if (3 == sc->sc_ht_conf.valid_chains) {
6219 sc->sc_config.rx_chain |= LE_16(3 <<
6220 RXON_RX_CHAIN_CNT_POS);
6221 sc->sc_config.rx_chain |= LE_16(3 <<
6222 RXON_RX_CHAIN_MIMO_CNT_POS);
6223 } else {
6224 sc->sc_config.rx_chain |= LE_16(2 <<
6225 RXON_RX_CHAIN_CNT_POS);
6226 sc->sc_config.rx_chain |= LE_16(2 <<
6227 RXON_RX_CHAIN_MIMO_CNT_POS);
6228 }
6229
6230 sc->sc_config.rx_chain |= LE_16(1 <<
6231 RXON_RX_CHAIN_MIMO_FORCE_POS);
6232 }
6233
6234 IWH_DBG((IWH_DEBUG_RXON, "iwh_config_rxon_chain(): "
6235 "rxon->rx_chain = %x\n", sc->sc_config.rx_chain));
6236 }
6237
6238 /*
6239 * This function adds AP station into hardware.
6240 */
6241 static int
iwh_add_ap_sta(iwh_sc_t * sc)6242 iwh_add_ap_sta(iwh_sc_t *sc)
6243 {
6244 ieee80211com_t *ic = &sc->sc_ic;
6245 ieee80211_node_t *in = ic->ic_bss;
6246 iwh_add_sta_t node;
6247 uint32_t ampdu_factor, ampdu_density;
6248 int err = IWH_FAIL;
6249
6250 /*
6251 * Add AP node into hardware.
6252 */
6253 (void) memset(&node, 0, sizeof (node));
6254 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6255 node.mode = STA_MODE_ADD_MSK;
6256 node.sta.sta_id = IWH_AP_ID;
6257
6258 if (sc->sc_ht_conf.ht_support &&
6259 (in->in_htcap_ie != NULL) &&
6260 (in->in_htcap != 0) &&
6261 (in->in_htparam != 0)) {
6262
6263 if (((in->in_htcap & HT_CAP_MIMO_PS) >> 2)
6264 == HT_CAP_MIMO_PS_DYNAMIC) {
6265 node.station_flags |= LE_32(STA_FLG_RTS_MIMO_PROT);
6266 }
6267
6268 ampdu_factor = in->in_htparam & HT_RX_AMPDU_FACTOR_MSK;
6269 node.station_flags |=
6270 LE_32(ampdu_factor << STA_FLG_MAX_AMPDU_POS);
6271
6272 ampdu_density = (in->in_htparam & HT_MPDU_DENSITY_MSK) >>
6273 HT_MPDU_DENSITY_POS;
6274 node.station_flags |=
6275 LE_32(ampdu_density << STA_FLG_AMPDU_DENSITY_POS);
6276
6277 if (in->in_htcap & LE_16(HT_CAP_SUP_WIDTH)) {
6278 node.station_flags |=
6279 LE_32(STA_FLG_FAT_EN);
6280 }
6281 }
6282
6283 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6284 if (err != IWH_SUCCESS) {
6285 cmn_err(CE_WARN, "iwh_add_ap_lq(): "
6286 "failed to add AP node\n");
6287 return (err);
6288 }
6289
6290 return (err);
6291 }
6292
6293 /*
6294 * Each station in the Shirley Peak's internal station table has
6295 * its own table of 16 TX rates and modulation modes for retrying
6296 * TX when an ACK is not received. This function replaces the entire
6297 * table for one station.Station must already be in Shirley Peak's
6298 * station talbe.
6299 */
6300 static int
iwh_ap_lq(iwh_sc_t * sc)6301 iwh_ap_lq(iwh_sc_t *sc)
6302 {
6303 ieee80211com_t *ic = &sc->sc_ic;
6304 ieee80211_node_t *in = ic->ic_bss;
6305 iwh_link_quality_cmd_t link_quality;
6306 const struct ieee80211_rateset *rs_sup = NULL;
6307 uint32_t masks = 0, rate;
6308 int i, err = IWH_FAIL;
6309
6310 /*
6311 * TX_LINK_QUALITY cmd
6312 */
6313 (void) memset(&link_quality, 0, sizeof (link_quality));
6314 if (in->in_chan == IEEE80211_CHAN_ANYC) /* skip null node */
6315 return (err);
6316 rs_sup = ieee80211_get_suprates(ic, in->in_chan);
6317
6318 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6319 if (i < rs_sup->ir_nrates) {
6320 rate = rs_sup->ir_rates[rs_sup->ir_nrates - i] &
6321 IEEE80211_RATE_VAL;
6322 } else {
6323 rate = 2;
6324 }
6325
6326 if (2 == rate || 4 == rate ||
6327 11 == rate || 22 == rate) {
6328 masks |= LE_32(RATE_MCS_CCK_MSK);
6329 }
6330
6331 masks |= LE_32(RATE_MCS_ANT_B_MSK);
6332
6333 link_quality.rate_n_flags[i] =
6334 LE_32(iwh_rate_to_plcp(rate) | masks);
6335 }
6336
6337 link_quality.general_params.single_stream_ant_msk = LINK_QUAL_ANT_B_MSK;
6338 link_quality.general_params.dual_stream_ant_msk = LINK_QUAL_ANT_MSK;
6339 link_quality.agg_params.agg_dis_start_th = 3;
6340 link_quality.agg_params.agg_time_limit = LE_16(4000);
6341 link_quality.sta_id = IWH_AP_ID;
6342 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
6343 sizeof (link_quality), 1);
6344 if (err != IWH_SUCCESS) {
6345 cmn_err(CE_WARN, "iwh_ap_lq(): "
6346 "failed to config link quality table\n");
6347 return (err);
6348 }
6349
6350 #ifdef DEBUG
6351 IWH_DBG((IWH_DEBUG_HWRATE, "iwh_ap_lq(): "
6352 "Rates in HW are as follows:\n"));
6353
6354 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6355 IWH_DBG((IWH_DEBUG_HWRATE,
6356 "Rate %d in HW is %x\n", i, link_quality.rate_n_flags[i]));
6357 }
6358 #endif
6359
6360 return (err);
6361 }
6362
6363 /*
6364 * When block ACK agreement has been set up between station and AP,
6365 * Net80211 module will call this function to inform hardware about
6366 * informations of this BA agreement.
6367 * When AP wants to delete BA agreement that was originated by it,
6368 * Net80211 modele will call this function to clean up relevant
6369 * information in hardware.
6370 */
6371 static void
iwh_recv_action(struct ieee80211_node * in,const uint8_t * frm,const uint8_t * efrm)6372 iwh_recv_action(struct ieee80211_node *in,
6373 const uint8_t *frm, const uint8_t *efrm)
6374 {
6375 struct ieee80211com *ic;
6376 iwh_sc_t *sc;
6377 const struct ieee80211_action *ia;
6378 uint16_t baparamset, baseqctl;
6379 uint32_t tid, ssn;
6380 iwh_add_sta_t node;
6381 int err = IWH_FAIL;
6382
6383 if ((NULL == in) || (NULL == frm)) {
6384 return;
6385 }
6386
6387 ic = in->in_ic;
6388 if (NULL == ic) {
6389 return;
6390 }
6391
6392 sc = (iwh_sc_t *)ic;
6393
6394 sc->sc_recv_action(in, frm, efrm);
6395
6396 ia = (const struct ieee80211_action *)frm;
6397 if (ia->ia_category != IEEE80211_ACTION_CAT_BA) {
6398 return;
6399 }
6400
6401 switch (ia->ia_action) {
6402 case IEEE80211_ACTION_BA_ADDBA_REQUEST:
6403 baparamset = *(uint16_t *)(frm + 3);
6404 baseqctl = *(uint16_t *)(frm + 7);
6405
6406 tid = MS(baparamset, IEEE80211_BAPS_TID);
6407 ssn = MS(baseqctl, IEEE80211_BASEQ_START);
6408
6409 (void) memset(&node, 0, sizeof (node));
6410 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6411 node.mode = STA_MODE_MODIFY_MSK;
6412 node.sta.sta_id = IWH_AP_ID;
6413
6414 node.station_flags_msk = 0;
6415 node.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
6416 node.add_immediate_ba_tid = (uint8_t)tid;
6417 node.add_immediate_ba_ssn = LE_16(ssn);
6418
6419 mutex_enter(&sc->sc_glock);
6420 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6421 if (err != IWH_SUCCESS) {
6422 cmn_err(CE_WARN, "iwh_recv_action(): "
6423 "failed to setup RX block ACK\n");
6424 mutex_exit(&sc->sc_glock);
6425 return;
6426 }
6427 mutex_exit(&sc->sc_glock);
6428
6429 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6430 "RX block ACK "
6431 "was setup on TID %d and SSN is %d.\n", tid, ssn));
6432
6433 return;
6434
6435 case IEEE80211_ACTION_BA_DELBA:
6436 baparamset = *(uint16_t *)(frm + 2);
6437
6438 if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) {
6439 return;
6440 }
6441
6442 tid = MS(baparamset, IEEE80211_DELBAPS_TID);
6443
6444 (void) memset(&node, 0, sizeof (node));
6445 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6446 node.mode = STA_MODE_MODIFY_MSK;
6447 node.sta.sta_id = IWH_AP_ID;
6448
6449 node.station_flags_msk = 0;
6450 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6451 node.add_immediate_ba_tid = (uint8_t)tid;
6452
6453 mutex_enter(&sc->sc_glock);
6454 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6455 if (err != IWH_SUCCESS) {
6456 cmn_err(CE_WARN, "iwh_recv_action(): "
6457 "failed to delete RX block ACK\n");
6458 mutex_exit(&sc->sc_glock);
6459 return;
6460 }
6461 mutex_exit(&sc->sc_glock);
6462
6463 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6464 "RX block ACK "
6465 "was deleted on TID %d.\n", tid));
6466
6467 return;
6468 }
6469 }
6470
6471 /*
6472 * When local station wants to delete BA agreement that was originated by AP,
6473 * Net80211 module will call this function to clean up relevant information
6474 * in hardware.
6475 */
6476 static int
iwh_send_action(struct ieee80211_node * in,int category,int action,uint16_t args[4])6477 iwh_send_action(struct ieee80211_node *in,
6478 int category, int action, uint16_t args[4])
6479 {
6480 struct ieee80211com *ic;
6481 iwh_sc_t *sc;
6482 uint32_t tid;
6483 iwh_add_sta_t node;
6484 int ret = EIO;
6485 int err = IWH_FAIL;
6486
6487
6488 if (NULL == in) {
6489 return (ret);
6490 }
6491
6492 ic = in->in_ic;
6493 if (NULL == ic) {
6494 return (ret);
6495 }
6496
6497 sc = (iwh_sc_t *)ic;
6498
6499 ret = sc->sc_send_action(in, category, action, args);
6500
6501 if (category != IEEE80211_ACTION_CAT_BA) {
6502 return (ret);
6503 }
6504
6505 switch (action) {
6506 case IEEE80211_ACTION_BA_DELBA:
6507 if (IEEE80211_DELBAPS_INIT == args[1]) {
6508 return (ret);
6509 }
6510
6511 tid = args[0];
6512
6513 (void) memset(&node, 0, sizeof (node));
6514 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6515 node.mode = STA_MODE_MODIFY_MSK;
6516 node.sta.sta_id = IWH_AP_ID;
6517
6518 node.station_flags_msk = 0;
6519 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6520 node.add_immediate_ba_tid = (uint8_t)tid;
6521
6522 mutex_enter(&sc->sc_glock);
6523 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6524 if (err != IWH_SUCCESS) {
6525 cmn_err(CE_WARN, "iwh_send_action(): "
6526 "failed to delete RX balock ACK\n");
6527 mutex_exit(&sc->sc_glock);
6528 return (EIO);
6529 }
6530 mutex_exit(&sc->sc_glock);
6531
6532 IWH_DBG((IWH_DEBUG_BA, "iwh_send_action(): "
6533 "RX block ACK "
6534 "was deleted on TID %d.\n", tid));
6535
6536 break;
6537 }
6538
6539 return (ret);
6540 }
6541
6542 static int
iwh_reset_hw(iwh_sc_t * sc)6543 iwh_reset_hw(iwh_sc_t *sc)
6544 {
6545 uint32_t tmp;
6546 int n;
6547
6548 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6549 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6550 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6551
6552 /*
6553 * wait for HW ready
6554 */
6555 for (n = 0; n < 5; n++) {
6556 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6557 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6558 break;
6559 }
6560 DELAY(10);
6561 }
6562
6563 if (n != 5) {
6564 return (IWH_SUCCESS);
6565 }
6566
6567 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6568 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6569 tmp | CSR_HW_IF_CONFIG_REG_BITS_PREPARE);
6570
6571 for (n = 0; n < 15000; n++) {
6572 if (0 == (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6573 CSR_HW_IF_CONFIG_REG_BITS_NIC_PREPARE_DONE)) {
6574 break;
6575 }
6576 DELAY(10);
6577 }
6578
6579 if (15000 == n) {
6580 return (ETIMEDOUT);
6581 }
6582
6583 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6584 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6585 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6586
6587 /*
6588 * wait for HW ready
6589 */
6590 for (n = 0; n < 5; n++) {
6591 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6592 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6593 break;
6594 }
6595 DELAY(10);
6596 }
6597
6598 if (n != 5) {
6599 return (IWH_SUCCESS);
6600 } else {
6601 return (ETIMEDOUT);
6602 }
6603 }
6604