1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2007, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64
65 #ifdef DEBUG
66 #define IWK_DEBUG_80211 (1 << 0)
67 #define IWK_DEBUG_CMD (1 << 1)
68 #define IWK_DEBUG_DMA (1 << 2)
69 #define IWK_DEBUG_EEPROM (1 << 3)
70 #define IWK_DEBUG_FW (1 << 4)
71 #define IWK_DEBUG_HW (1 << 5)
72 #define IWK_DEBUG_INTR (1 << 6)
73 #define IWK_DEBUG_MRR (1 << 7)
74 #define IWK_DEBUG_PIO (1 << 8)
75 #define IWK_DEBUG_RX (1 << 9)
76 #define IWK_DEBUG_SCAN (1 << 10)
77 #define IWK_DEBUG_TX (1 << 11)
78 #define IWK_DEBUG_RATECTL (1 << 12)
79 #define IWK_DEBUG_RADIO (1 << 13)
80 #define IWK_DEBUG_RESUME (1 << 14)
81 #define IWK_DEBUG_CALIBRATION (1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define IWK_DBG(x) \
84 iwk_dbg x
85 #else
86 #define IWK_DBG(x)
87 #endif
88
89 static void *iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 DMA_ATTR_V0, /* version of this structure */
97 0, /* lowest usable address */
98 0xffffffffU, /* highest usable address */
99 0xffffffffU, /* maximum DMAable byte count */
100 0x1000, /* alignment in bytes */
101 0x1000, /* burst sizes (any?) */
102 1, /* minimum transfer */
103 0xffffffffU, /* maximum transfer */
104 0xffffffffU, /* maximum segment length */
105 1, /* maximum number of segments */
106 1, /* granularity */
107 0, /* flags (reserved) */
108 };
109
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 DMA_ATTR_V0, /* version of this structure */
113 0, /* lowest usable address */
114 0xffffffffU, /* highest usable address */
115 0xffffffffU, /* maximum DMAable byte count */
116 0x1000, /* alignment in bytes */
117 0x1000, /* burst sizes (any?) */
118 1, /* minimum transfer */
119 0xffffffffU, /* maximum transfer */
120 0xffffffffU, /* maximum segment length */
121 1, /* maximum number of segments */
122 1, /* granularity */
123 0, /* flags (reserved) */
124 };
125
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 DMA_ATTR_V0, /* version of this structure */
129 0, /* lowest usable address */
130 0xffffffffU, /* highest usable address */
131 0xffffffffU, /* maximum DMAable byte count */
132 0x100, /* alignment in bytes */
133 0x100, /* burst sizes (any?) */
134 1, /* minimum transfer */
135 0xffffffffU, /* maximum transfer */
136 0xffffffffU, /* maximum segment length */
137 1, /* maximum number of segments */
138 1, /* granularity */
139 0, /* flags (reserved) */
140 };
141
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 DMA_ATTR_V0, /* version of this structure */
145 0, /* lowest usable address */
146 0xffffffffU, /* highest usable address */
147 0xffffffffU, /* maximum DMAable byte count */
148 4, /* alignment in bytes */
149 0x100, /* burst sizes (any?) */
150 1, /* minimum transfer */
151 0xffffffffU, /* maximum transfer */
152 0xffffffffU, /* maximum segment length */
153 1, /* maximum number of segments */
154 1, /* granularity */
155 0, /* flags (reserved) */
156 };
157
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 DMA_ATTR_V0, /* version of this structure */
161 0, /* lowest usable address */
162 0xffffffffU, /* highest usable address */
163 0xffffffffU, /* maximum DMAable byte count */
164 0x100, /* alignment in bytes */
165 0x100, /* burst sizes (any?) */
166 1, /* minimum transfer */
167 0xffffffffU, /* maximum transfer */
168 0xffffffffU, /* maximum segment length */
169 1, /* maximum number of segments */
170 1, /* granularity */
171 0, /* flags (reserved) */
172 };
173
174 /*
175 * DMA attributes for a tx buffer.
176 * the maximum number of segments is 4 for the hardware.
177 * now all the wifi drivers put the whole frame in a single
178 * descriptor, so we define the maximum number of segments 1,
179 * just the same as the rx_buffer. we consider leverage the HW
180 * ability in the future, that is why we don't define rx and tx
181 * buffer_dma_attr as the same.
182 */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 DMA_ATTR_V0, /* version of this structure */
185 0, /* lowest usable address */
186 0xffffffffU, /* highest usable address */
187 0xffffffffU, /* maximum DMAable byte count */
188 4, /* alignment in bytes */
189 0x100, /* burst sizes (any?) */
190 1, /* minimum transfer */
191 0xffffffffU, /* maximum transfer */
192 0xffffffffU, /* maximum segment length */
193 1, /* maximum number of segments */
194 1, /* granularity */
195 0, /* flags (reserved) */
196 };
197
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 DMA_ATTR_V0, /* version of this structure */
201 0, /* lowest usable address */
202 0xffffffffU, /* highest usable address */
203 0x7fffffff, /* maximum DMAable byte count */
204 0x10, /* alignment in bytes */
205 0x100, /* burst sizes (any?) */
206 1, /* minimum transfer */
207 0xffffffffU, /* maximum transfer */
208 0xffffffffU, /* maximum segment length */
209 1, /* maximum number of segments */
210 1, /* granularity */
211 0, /* flags (reserved) */
212 };
213
214
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 DDI_DEVICE_ATTR_V0,
218 DDI_STRUCTURE_LE_ACC,
219 DDI_STRICTORDER_ACC,
220 DDI_DEFAULT_ACC
221 };
222
223 /* DMA access attributes for Descriptor */
224 static ddi_device_acc_attr_t iwk_dma_descattr = {
225 DDI_DEVICE_ATTR_V0,
226 DDI_STRUCTURE_LE_ACC,
227 DDI_STRICTORDER_ACC,
228 DDI_DEFAULT_ACC
229 };
230
231 /* DMA access attributes */
232 static ddi_device_acc_attr_t iwk_dma_accattr = {
233 DDI_DEVICE_ATTR_V0,
234 DDI_NEVERSWAP_ACC,
235 DDI_STRICTORDER_ACC,
236 DDI_DEFAULT_ACC
237 };
238
239 static int iwk_ring_init(iwk_sc_t *);
240 static void iwk_ring_free(iwk_sc_t *);
241 static int iwk_alloc_shared(iwk_sc_t *);
242 static void iwk_free_shared(iwk_sc_t *);
243 static int iwk_alloc_kw(iwk_sc_t *);
244 static void iwk_free_kw(iwk_sc_t *);
245 static int iwk_alloc_fw_dma(iwk_sc_t *);
246 static void iwk_free_fw_dma(iwk_sc_t *);
247 static int iwk_alloc_rx_ring(iwk_sc_t *);
248 static void iwk_reset_rx_ring(iwk_sc_t *);
249 static void iwk_free_rx_ring(iwk_sc_t *);
250 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
251 int, int);
252 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
253 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
254
255 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
256 static void iwk_node_free(ieee80211_node_t *);
257 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
258 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
259 const uint8_t mac[IEEE80211_ADDR_LEN]);
260 static void iwk_mac_access_enter(iwk_sc_t *);
261 static void iwk_mac_access_exit(iwk_sc_t *);
262 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t);
263 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
264 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
265 uint32_t *, int);
266 static int iwk_load_firmware(iwk_sc_t *);
267 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
268 iwk_rx_data_t *);
269 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
270 iwk_rx_data_t *);
271 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
272 static uint_t iwk_intr(caddr_t, caddr_t);
273 static int iwk_eep_load(iwk_sc_t *sc);
274 static void iwk_get_mac_from_eep(iwk_sc_t *sc);
275 static int iwk_eep_sem_down(iwk_sc_t *sc);
276 static void iwk_eep_sem_up(iwk_sc_t *sc);
277 static uint_t iwk_rx_softintr(caddr_t, caddr_t);
278 static uint8_t iwk_rate_to_plcp(int);
279 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int);
280 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
281 static int iwk_hw_set_before_auth(iwk_sc_t *);
282 static int iwk_scan(iwk_sc_t *);
283 static int iwk_config(iwk_sc_t *);
284 static void iwk_stop_master(iwk_sc_t *);
285 static int iwk_power_up(iwk_sc_t *);
286 static int iwk_preinit(iwk_sc_t *);
287 static int iwk_init(iwk_sc_t *);
288 static void iwk_stop(iwk_sc_t *);
289 static void iwk_amrr_init(iwk_amrr_t *);
290 static void iwk_amrr_timeout(iwk_sc_t *);
291 static void iwk_amrr_ratectl(void *, ieee80211_node_t *);
292 static int32_t iwk_curr_tempera(iwk_sc_t *sc);
293 static int iwk_tx_power_calibration(iwk_sc_t *sc);
294 static inline int iwk_is_24G_band(iwk_sc_t *sc);
295 static inline int iwk_is_fat_channel(iwk_sc_t *sc);
296 static int iwk_txpower_grp(uint16_t channel);
297 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
298 uint16_t channel,
299 int is_24G, int is_fat, int is_hi_chan);
300 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel);
301 static int iwk_division(int32_t num, int32_t denom, int32_t *res);
302 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
303 int32_t x2, int32_t y2);
304 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
305 struct iwk_eep_calib_channel_info *chan_info);
306 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
307 int32_t curr_voltage);
308 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
309 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
310 struct iwk_tx_power_db *tp_db);
311 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
312 static int iwk_is_associated(iwk_sc_t *sc);
313 static int iwk_rxgain_diff_init(iwk_sc_t *sc);
314 static int iwk_rxgain_diff(iwk_sc_t *sc);
315 static int iwk_rx_sens_init(iwk_sc_t *sc);
316 static int iwk_rx_sens(iwk_sc_t *sc);
317 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
318 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
319 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
320 struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
321
322 static void iwk_write_event_log(iwk_sc_t *);
323 static void iwk_write_error_log(iwk_sc_t *);
324
325 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
326 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
327 static int iwk_quiesce(dev_info_t *dip);
328
329 /*
330 * GLD specific operations
331 */
332 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
333 static int iwk_m_start(void *arg);
334 static void iwk_m_stop(void *arg);
335 static int iwk_m_unicst(void *arg, const uint8_t *macaddr);
336 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
337 static int iwk_m_promisc(void *arg, boolean_t on);
338 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp);
339 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
340 static int iwk_m_setprop(void *arg, const char *pr_name,
341 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
342 static int iwk_m_getprop(void *arg, const char *pr_name,
343 mac_prop_id_t wldp_pr_name, uint_t wldp_length, void *wldp_buf);
344 static void iwk_m_propinfo(void *arg, const char *pr_name,
345 mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
346 static void iwk_destroy_locks(iwk_sc_t *sc);
347 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
348 static void iwk_thread(iwk_sc_t *sc);
349 static void iwk_watchdog(void *arg);
350 static int iwk_run_state_config_ibss(ieee80211com_t *ic);
351 static int iwk_run_state_config_sta(ieee80211com_t *ic);
352 static int iwk_fast_recover(iwk_sc_t *sc);
353 static int iwk_start_tx_beacon(ieee80211com_t *ic);
354 static int iwk_clean_add_node_ibss(struct ieee80211com *ic,
355 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
356
357 /*
358 * Supported rates for 802.11b/g modes (in 500Kbps unit).
359 * 11a and 11n support will be added later.
360 */
361 static const struct ieee80211_rateset iwk_rateset_11b =
362 { 4, { 2, 4, 11, 22 } };
363
364 static const struct ieee80211_rateset iwk_rateset_11g =
365 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
366
367 /*
368 * For mfthread only
369 */
370 extern pri_t minclsyspri;
371
372 #define DRV_NAME_4965 "iwk"
373
374 /*
375 * Module Loading Data & Entry Points
376 */
377 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
378 iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
379
380 static struct modldrv iwk_modldrv = {
381 &mod_driverops,
382 "Intel(R) 4965AGN driver(N)",
383 &iwk_devops
384 };
385
386 static struct modlinkage iwk_modlinkage = {
387 MODREV_1,
388 &iwk_modldrv,
389 NULL
390 };
391
392 int
_init(void)393 _init(void)
394 {
395 int status;
396
397 status = ddi_soft_state_init(&iwk_soft_state_p,
398 sizeof (iwk_sc_t), 1);
399 if (status != DDI_SUCCESS)
400 return (status);
401
402 mac_init_ops(&iwk_devops, DRV_NAME_4965);
403 status = mod_install(&iwk_modlinkage);
404 if (status != DDI_SUCCESS) {
405 mac_fini_ops(&iwk_devops);
406 ddi_soft_state_fini(&iwk_soft_state_p);
407 }
408
409 return (status);
410 }
411
412 int
_fini(void)413 _fini(void)
414 {
415 int status;
416
417 status = mod_remove(&iwk_modlinkage);
418 if (status == DDI_SUCCESS) {
419 mac_fini_ops(&iwk_devops);
420 ddi_soft_state_fini(&iwk_soft_state_p);
421 }
422
423 return (status);
424 }
425
426 int
_info(struct modinfo * mip)427 _info(struct modinfo *mip)
428 {
429 return (mod_info(&iwk_modlinkage, mip));
430 }
431
432 /*
433 * Mac Call Back entries
434 */
435 mac_callbacks_t iwk_m_callbacks = {
436 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
437 iwk_m_stat,
438 iwk_m_start,
439 iwk_m_stop,
440 iwk_m_promisc,
441 iwk_m_multicst,
442 iwk_m_unicst,
443 iwk_m_tx,
444 NULL,
445 iwk_m_ioctl,
446 NULL,
447 NULL,
448 NULL,
449 iwk_m_setprop,
450 iwk_m_getprop,
451 iwk_m_propinfo
452 };
453
454 #ifdef DEBUG
455 void
iwk_dbg(uint32_t flags,const char * fmt,...)456 iwk_dbg(uint32_t flags, const char *fmt, ...)
457 {
458 va_list ap;
459
460 if (flags & iwk_dbg_flags) {
461 va_start(ap, fmt);
462 vcmn_err(CE_NOTE, fmt, ap);
463 va_end(ap);
464 }
465 }
466 #endif
467
468 /*
469 * device operations
470 */
471 int
iwk_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)472 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
473 {
474 iwk_sc_t *sc;
475 ieee80211com_t *ic;
476 int instance, err, i;
477 char strbuf[32];
478 wifi_data_t wd = { 0 };
479 mac_register_t *macp;
480
481 int intr_type;
482 int intr_count;
483 int intr_actual;
484
485 switch (cmd) {
486 case DDI_ATTACH:
487 break;
488 case DDI_RESUME:
489 sc = ddi_get_soft_state(iwk_soft_state_p,
490 ddi_get_instance(dip));
491 ASSERT(sc != NULL);
492
493 mutex_enter(&sc->sc_glock);
494 sc->sc_flags &= ~IWK_F_SUSPEND;
495 mutex_exit(&sc->sc_glock);
496
497 if (sc->sc_flags & IWK_F_RUNNING)
498 (void) iwk_init(sc);
499
500 mutex_enter(&sc->sc_glock);
501 sc->sc_flags |= IWK_F_LAZY_RESUME;
502 mutex_exit(&sc->sc_glock);
503
504 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
505 return (DDI_SUCCESS);
506 default:
507 err = DDI_FAILURE;
508 goto attach_fail1;
509 }
510
511 instance = ddi_get_instance(dip);
512 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
513 if (err != DDI_SUCCESS) {
514 cmn_err(CE_WARN,
515 "iwk_attach(): failed to allocate soft state\n");
516 goto attach_fail1;
517 }
518 sc = ddi_get_soft_state(iwk_soft_state_p, instance);
519 sc->sc_dip = dip;
520
521 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
522 &iwk_reg_accattr, &sc->sc_cfg_handle);
523 if (err != DDI_SUCCESS) {
524 cmn_err(CE_WARN,
525 "iwk_attach(): failed to map config spaces regs\n");
526 goto attach_fail2;
527 }
528 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
529 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
530 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
531 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
532 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
533 if (!sc->sc_clsz)
534 sc->sc_clsz = 16;
535 sc->sc_clsz = (sc->sc_clsz << 2);
536 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
537 IEEE80211_MTU + IEEE80211_CRC_LEN +
538 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
539 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
540 /*
541 * Map operating registers
542 */
543 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
544 0, 0, &iwk_reg_accattr, &sc->sc_handle);
545 if (err != DDI_SUCCESS) {
546 cmn_err(CE_WARN,
547 "iwk_attach(): failed to map device regs\n");
548 goto attach_fail2a;
549 }
550
551 err = ddi_intr_get_supported_types(dip, &intr_type);
552 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
553 cmn_err(CE_WARN, "iwk_attach(): "
554 "Fixed type interrupt is not supported\n");
555 goto attach_fail_intr_a;
556 }
557
558 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
559 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
560 cmn_err(CE_WARN, "iwk_attach(): "
561 "No fixed interrupts\n");
562 goto attach_fail_intr_a;
563 }
564
565 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
566
567 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
568 intr_count, &intr_actual, 0);
569 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
570 cmn_err(CE_WARN, "iwk_attach(): "
571 "ddi_intr_alloc() failed 0x%x\n", err);
572 goto attach_fail_intr_b;
573 }
574
575 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
576 if (err != DDI_SUCCESS) {
577 cmn_err(CE_WARN, "iwk_attach(): "
578 "ddi_intr_get_pri() failed 0x%x\n", err);
579 goto attach_fail_intr_c;
580 }
581
582 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
583 DDI_INTR_PRI(sc->sc_intr_pri));
584 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
585 DDI_INTR_PRI(sc->sc_intr_pri));
586 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
587 DDI_INTR_PRI(sc->sc_intr_pri));
588 mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
589 DDI_INTR_PRI(sc->sc_intr_pri));
590
591 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
592 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
593 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
594 /*
595 * initialize the mfthread
596 */
597 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
598 sc->sc_mf_thread = NULL;
599 sc->sc_mf_thread_switch = 0;
600
601 /*
602 * Allocate shared page.
603 */
604 err = iwk_alloc_shared(sc);
605 if (err != DDI_SUCCESS) {
606 cmn_err(CE_WARN, "iwk_attach(): "
607 "failed to allocate shared page\n");
608 goto attach_fail3;
609 }
610
611 /*
612 * Allocate keep warm page.
613 */
614 err = iwk_alloc_kw(sc);
615 if (err != DDI_SUCCESS) {
616 cmn_err(CE_WARN, "iwk_attach(): "
617 "failed to allocate keep warm page\n");
618 goto attach_fail3a;
619 }
620
621 /*
622 * Do some necessary hardware initializations.
623 */
624 err = iwk_preinit(sc);
625 if (err != DDI_SUCCESS) {
626 cmn_err(CE_WARN, "iwk_attach(): "
627 "failed to init hardware\n");
628 goto attach_fail4;
629 }
630
631 /* initialize EEPROM */
632 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */
633 if (err != 0) {
634 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
635 goto attach_fail4;
636 }
637
638 if (LE_16(sc->sc_eep_map.calib_version) < EEP_TX_POWER_VERSION_NEW) {
639 cmn_err(CE_WARN, "older EEPROM detected\n");
640 goto attach_fail4;
641 }
642
643 iwk_get_mac_from_eep(sc);
644
645 err = iwk_ring_init(sc);
646 if (err != DDI_SUCCESS) {
647 cmn_err(CE_WARN, "iwk_attach(): "
648 "failed to allocate and initialize ring\n");
649 goto attach_fail4;
650 }
651
652 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
653
654 err = iwk_alloc_fw_dma(sc);
655 if (err != DDI_SUCCESS) {
656 cmn_err(CE_WARN, "iwk_attach(): "
657 "failed to allocate firmware dma\n");
658 goto attach_fail5;
659 }
660
661 /*
662 * Initialize the wifi part, which will be used by
663 * generic layer
664 */
665 ic = &sc->sc_ic;
666 ic->ic_phytype = IEEE80211_T_OFDM;
667 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
668 ic->ic_state = IEEE80211_S_INIT;
669 ic->ic_maxrssi = 100; /* experimental number */
670 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
671 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
672 /*
673 * use software WEP and TKIP, hardware CCMP;
674 */
675 ic->ic_caps |= IEEE80211_C_AES_CCM;
676 /*
677 * Support WPA/WPA2
678 */
679 ic->ic_caps |= IEEE80211_C_WPA;
680 /*
681 * support Adhoc mode
682 */
683 ic->ic_caps |= IEEE80211_C_IBSS;
684
685 /* set supported .11b and .11g rates */
686 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
687 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
688
689 /* set supported .11b and .11g channels (1 through 11) */
690 for (i = 1; i <= 11; i++) {
691 ic->ic_sup_channels[i].ich_freq =
692 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
693 ic->ic_sup_channels[i].ich_flags =
694 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
695 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
696 IEEE80211_CHAN_PASSIVE;
697 }
698 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
699
700 ic->ic_xmit = iwk_send;
701 /*
702 * init Wifi layer
703 */
704 ieee80211_attach(ic);
705
706 /*
707 * different instance has different WPA door
708 */
709 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
710 ddi_driver_name(dip),
711 ddi_get_instance(dip));
712
713 /*
714 * Override 80211 default routines
715 */
716 sc->sc_newstate = ic->ic_newstate;
717 ic->ic_newstate = iwk_newstate;
718 ic->ic_watchdog = iwk_watchdog;
719 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
720 ic->ic_recv_mgmt = iwk_recv_mgmt;
721 ic->ic_node_alloc = iwk_node_alloc;
722 ic->ic_node_free = iwk_node_free;
723 ic->ic_crypto.cs_key_set = iwk_key_set;
724 ieee80211_media_init(ic);
725 /*
726 * initialize default tx key
727 */
728 ic->ic_def_txkey = 0;
729 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
730 iwk_rx_softintr, (caddr_t)sc);
731 if (err != DDI_SUCCESS) {
732 cmn_err(CE_WARN, "iwk_attach(): "
733 "add soft interrupt failed\n");
734 goto attach_fail7;
735 }
736
737 /*
738 * Add the interrupt handler
739 */
740 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
741 (caddr_t)sc, NULL);
742 if (err != DDI_SUCCESS) {
743 cmn_err(CE_WARN, "iwk_attach(): "
744 "ddi_intr_add_handle() failed\n");
745 goto attach_fail8;
746 }
747
748 err = ddi_intr_enable(sc->sc_intr_htable[0]);
749 if (err != DDI_SUCCESS) {
750 cmn_err(CE_WARN, "iwk_attach(): "
751 "ddi_intr_enable() failed\n");
752 goto attach_fail_intr_d;
753 }
754
755 /*
756 * Initialize pointer to device specific functions
757 */
758 wd.wd_secalloc = WIFI_SEC_NONE;
759 wd.wd_opmode = ic->ic_opmode;
760 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
761
762 macp = mac_alloc(MAC_VERSION);
763 if (macp == NULL) {
764 cmn_err(CE_WARN,
765 "iwk_attach(): failed to do mac_alloc()\n");
766 goto attach_fail9;
767 }
768
769 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
770 macp->m_driver = sc;
771 macp->m_dip = dip;
772 macp->m_src_addr = ic->ic_macaddr;
773 macp->m_callbacks = &iwk_m_callbacks;
774 macp->m_min_sdu = 0;
775 macp->m_max_sdu = IEEE80211_MTU;
776 macp->m_pdata = &wd;
777 macp->m_pdata_size = sizeof (wd);
778
779 /*
780 * Register the macp to mac
781 */
782 err = mac_register(macp, &ic->ic_mach);
783 mac_free(macp);
784 if (err != DDI_SUCCESS) {
785 cmn_err(CE_WARN,
786 "iwk_attach(): failed to do mac_register()\n");
787 goto attach_fail9;
788 }
789
790 /*
791 * Create minor node of type DDI_NT_NET_WIFI
792 */
793 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
794 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
795 instance + 1, DDI_NT_NET_WIFI, 0);
796 if (err != DDI_SUCCESS)
797 cmn_err(CE_WARN,
798 "iwk_attach(): failed to do ddi_create_minor_node()\n");
799
800 /*
801 * Notify link is down now
802 */
803 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
804
805 /*
806 * create the mf thread to handle the link status,
807 * recovery fatal error, etc.
808 */
809 sc->sc_mf_thread_switch = 1;
810 if (sc->sc_mf_thread == NULL)
811 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
812 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
813
814 sc->sc_flags |= IWK_F_ATTACHED;
815
816 return (DDI_SUCCESS);
817 attach_fail9:
818 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
819 attach_fail_intr_d:
820 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
821
822 attach_fail8:
823 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
824 sc->sc_soft_hdl = NULL;
825 attach_fail7:
826 ieee80211_detach(ic);
827 attach_fail6:
828 iwk_free_fw_dma(sc);
829 attach_fail5:
830 iwk_ring_free(sc);
831 attach_fail4:
832 iwk_free_kw(sc);
833 attach_fail3a:
834 iwk_free_shared(sc);
835 attach_fail3:
836 iwk_destroy_locks(sc);
837 attach_fail_intr_c:
838 (void) ddi_intr_free(sc->sc_intr_htable[0]);
839 attach_fail_intr_b:
840 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
841 attach_fail_intr_a:
842 ddi_regs_map_free(&sc->sc_handle);
843 attach_fail2a:
844 ddi_regs_map_free(&sc->sc_cfg_handle);
845 attach_fail2:
846 ddi_soft_state_free(iwk_soft_state_p, instance);
847 attach_fail1:
848 return (err);
849 }
850
851 int
iwk_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)852 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
853 {
854 iwk_sc_t *sc;
855 int err;
856
857 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
858 ASSERT(sc != NULL);
859
860 switch (cmd) {
861 case DDI_DETACH:
862 break;
863 case DDI_SUSPEND:
864 mutex_enter(&sc->sc_glock);
865 sc->sc_flags |= IWK_F_SUSPEND;
866 mutex_exit(&sc->sc_glock);
867 if (sc->sc_flags & IWK_F_RUNNING) {
868 iwk_stop(sc);
869 }
870
871 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
872 return (DDI_SUCCESS);
873 default:
874 return (DDI_FAILURE);
875 }
876
877 if (!(sc->sc_flags & IWK_F_ATTACHED))
878 return (DDI_FAILURE);
879
880 err = mac_disable(sc->sc_ic.ic_mach);
881 if (err != DDI_SUCCESS)
882 return (err);
883
884 /*
885 * Destroy the mf_thread
886 */
887 mutex_enter(&sc->sc_mt_lock);
888 sc->sc_mf_thread_switch = 0;
889 while (sc->sc_mf_thread != NULL) {
890 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
891 break;
892 }
893 mutex_exit(&sc->sc_mt_lock);
894
895 iwk_stop(sc);
896 DELAY(500000);
897
898 /*
899 * Unregiste from the MAC layer subsystem
900 */
901 (void) mac_unregister(sc->sc_ic.ic_mach);
902
903 mutex_enter(&sc->sc_glock);
904 iwk_free_fw_dma(sc);
905 iwk_ring_free(sc);
906 iwk_free_kw(sc);
907 iwk_free_shared(sc);
908 mutex_exit(&sc->sc_glock);
909
910 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
911 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
912 (void) ddi_intr_free(sc->sc_intr_htable[0]);
913 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
914
915 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
916 sc->sc_soft_hdl = NULL;
917
918 /*
919 * detach ieee80211
920 */
921 ieee80211_detach(&sc->sc_ic);
922
923 iwk_destroy_locks(sc);
924
925 ddi_regs_map_free(&sc->sc_handle);
926 ddi_regs_map_free(&sc->sc_cfg_handle);
927 ddi_remove_minor_node(dip, NULL);
928 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
929
930 return (DDI_SUCCESS);
931 }
932
933 /*
934 * quiesce(9E) entry point.
935 *
936 * This function is called when the system is single-threaded at high
937 * PIL with preemption disabled. Therefore, this function must not be
938 * blocked.
939 *
940 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
941 * DDI_FAILURE indicates an error condition and should almost never happen.
942 */
943 int
iwk_quiesce(dev_info_t * dip)944 iwk_quiesce(dev_info_t *dip)
945 {
946 iwk_sc_t *sc;
947
948 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
949 ASSERT(sc != NULL);
950
951 /* no message prints and no lock accquisition */
952 #ifdef DEBUG
953 iwk_dbg_flags = 0;
954 #endif
955 sc->sc_flags |= IWK_F_QUIESCED;
956
957 iwk_stop(sc);
958
959 return (DDI_SUCCESS);
960 }
961
962 static void
iwk_destroy_locks(iwk_sc_t * sc)963 iwk_destroy_locks(iwk_sc_t *sc)
964 {
965 cv_destroy(&sc->sc_mt_cv);
966 mutex_destroy(&sc->sc_mt_lock);
967 cv_destroy(&sc->sc_tx_cv);
968 cv_destroy(&sc->sc_cmd_cv);
969 cv_destroy(&sc->sc_fw_cv);
970 mutex_destroy(&sc->sc_tx_lock);
971 mutex_destroy(&sc->sc_glock);
972 }
973
974 /*
975 * Allocate an area of memory and a DMA handle for accessing it
976 */
977 static int
iwk_alloc_dma_mem(iwk_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwk_dma_t * dma_p)978 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
979 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
980 uint_t dma_flags, iwk_dma_t *dma_p)
981 {
982 caddr_t vaddr;
983 int err;
984
985 /*
986 * Allocate handle
987 */
988 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
989 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
990 if (err != DDI_SUCCESS) {
991 dma_p->dma_hdl = NULL;
992 return (DDI_FAILURE);
993 }
994
995 /*
996 * Allocate memory
997 */
998 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
999 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1000 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1001 if (err != DDI_SUCCESS) {
1002 ddi_dma_free_handle(&dma_p->dma_hdl);
1003 dma_p->dma_hdl = NULL;
1004 dma_p->acc_hdl = NULL;
1005 return (DDI_FAILURE);
1006 }
1007
1008 /*
1009 * Bind the two together
1010 */
1011 dma_p->mem_va = vaddr;
1012 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1013 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1014 &dma_p->cookie, &dma_p->ncookies);
1015 if (err != DDI_DMA_MAPPED) {
1016 ddi_dma_mem_free(&dma_p->acc_hdl);
1017 ddi_dma_free_handle(&dma_p->dma_hdl);
1018 dma_p->acc_hdl = NULL;
1019 dma_p->dma_hdl = NULL;
1020 return (DDI_FAILURE);
1021 }
1022
1023 dma_p->nslots = ~0U;
1024 dma_p->size = ~0U;
1025 dma_p->token = ~0U;
1026 dma_p->offset = 0;
1027 return (DDI_SUCCESS);
1028 }
1029
1030 /*
1031 * Free one allocated area of DMAable memory
1032 */
1033 static void
iwk_free_dma_mem(iwk_dma_t * dma_p)1034 iwk_free_dma_mem(iwk_dma_t *dma_p)
1035 {
1036 if (dma_p->dma_hdl != NULL) {
1037 if (dma_p->ncookies) {
1038 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1039 dma_p->ncookies = 0;
1040 }
1041 ddi_dma_free_handle(&dma_p->dma_hdl);
1042 dma_p->dma_hdl = NULL;
1043 }
1044
1045 if (dma_p->acc_hdl != NULL) {
1046 ddi_dma_mem_free(&dma_p->acc_hdl);
1047 dma_p->acc_hdl = NULL;
1048 }
1049 }
1050
1051 /*
1052 *
1053 */
1054 static int
iwk_alloc_fw_dma(iwk_sc_t * sc)1055 iwk_alloc_fw_dma(iwk_sc_t *sc)
1056 {
1057 int err = DDI_SUCCESS;
1058 iwk_dma_t *dma_p;
1059 char *t;
1060
1061 /*
1062 * firmware image layout:
1063 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1064 */
1065 t = (char *)(sc->sc_hdr + 1);
1066 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1067 &fw_dma_attr, &iwk_dma_accattr,
1068 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1069 &sc->sc_dma_fw_text);
1070 dma_p = &sc->sc_dma_fw_text;
1071 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1072 dma_p->ncookies, dma_p->cookie.dmac_address,
1073 dma_p->cookie.dmac_size));
1074 if (err != DDI_SUCCESS) {
1075 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1076 " text dma memory");
1077 goto fail;
1078 }
1079 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1080
1081 t += LE_32(sc->sc_hdr->textsz);
1082 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1083 &fw_dma_attr, &iwk_dma_accattr,
1084 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1085 &sc->sc_dma_fw_data);
1086 dma_p = &sc->sc_dma_fw_data;
1087 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1088 dma_p->ncookies, dma_p->cookie.dmac_address,
1089 dma_p->cookie.dmac_size));
1090 if (err != DDI_SUCCESS) {
1091 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1092 " data dma memory");
1093 goto fail;
1094 }
1095 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1096
1097 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1098 &fw_dma_attr, &iwk_dma_accattr,
1099 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1100 &sc->sc_dma_fw_data_bak);
1101 dma_p = &sc->sc_dma_fw_data_bak;
1102 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1103 "size:%lx]\n",
1104 dma_p->ncookies, dma_p->cookie.dmac_address,
1105 dma_p->cookie.dmac_size));
1106 if (err != DDI_SUCCESS) {
1107 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1108 " data bakeup dma memory");
1109 goto fail;
1110 }
1111 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1112
1113 t += LE_32(sc->sc_hdr->datasz);
1114 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1115 &fw_dma_attr, &iwk_dma_accattr,
1116 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1117 &sc->sc_dma_fw_init_text);
1118 dma_p = &sc->sc_dma_fw_init_text;
1119 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1120 "size:%lx]\n",
1121 dma_p->ncookies, dma_p->cookie.dmac_address,
1122 dma_p->cookie.dmac_size));
1123 if (err != DDI_SUCCESS) {
1124 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1125 "init text dma memory");
1126 goto fail;
1127 }
1128 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1129
1130 t += LE_32(sc->sc_hdr->init_textsz);
1131 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1132 &fw_dma_attr, &iwk_dma_accattr,
1133 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1134 &sc->sc_dma_fw_init_data);
1135 dma_p = &sc->sc_dma_fw_init_data;
1136 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1137 "size:%lx]\n",
1138 dma_p->ncookies, dma_p->cookie.dmac_address,
1139 dma_p->cookie.dmac_size));
1140 if (err != DDI_SUCCESS) {
1141 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1142 "init data dma memory");
1143 goto fail;
1144 }
1145 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1146
1147 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1148 fail:
1149 return (err);
1150 }
1151
1152 static void
iwk_free_fw_dma(iwk_sc_t * sc)1153 iwk_free_fw_dma(iwk_sc_t *sc)
1154 {
1155 iwk_free_dma_mem(&sc->sc_dma_fw_text);
1156 iwk_free_dma_mem(&sc->sc_dma_fw_data);
1157 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1158 iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1159 iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1160 }
1161
1162 /*
1163 * Allocate a shared page between host and NIC.
1164 */
1165 static int
iwk_alloc_shared(iwk_sc_t * sc)1166 iwk_alloc_shared(iwk_sc_t *sc)
1167 {
1168 iwk_dma_t *dma_p;
1169 int err = DDI_SUCCESS;
1170
1171 /* must be aligned on a 4K-page boundary */
1172 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1173 &sh_dma_attr, &iwk_dma_descattr,
1174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1175 &sc->sc_dma_sh);
1176 if (err != DDI_SUCCESS)
1177 goto fail;
1178 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1179
1180 dma_p = &sc->sc_dma_sh;
1181 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1182 dma_p->ncookies, dma_p->cookie.dmac_address,
1183 dma_p->cookie.dmac_size));
1184
1185 return (err);
1186 fail:
1187 iwk_free_shared(sc);
1188 return (err);
1189 }
1190
1191 static void
iwk_free_shared(iwk_sc_t * sc)1192 iwk_free_shared(iwk_sc_t *sc)
1193 {
1194 iwk_free_dma_mem(&sc->sc_dma_sh);
1195 }
1196
1197 /*
1198 * Allocate a keep warm page.
1199 */
1200 static int
iwk_alloc_kw(iwk_sc_t * sc)1201 iwk_alloc_kw(iwk_sc_t *sc)
1202 {
1203 iwk_dma_t *dma_p;
1204 int err = DDI_SUCCESS;
1205
1206 /* must be aligned on a 4K-page boundary */
1207 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1208 &kw_dma_attr, &iwk_dma_accattr,
1209 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1210 &sc->sc_dma_kw);
1211 if (err != DDI_SUCCESS)
1212 goto fail;
1213
1214 dma_p = &sc->sc_dma_kw;
1215 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1216 dma_p->ncookies, dma_p->cookie.dmac_address,
1217 dma_p->cookie.dmac_size));
1218
1219 return (err);
1220 fail:
1221 iwk_free_kw(sc);
1222 return (err);
1223 }
1224
1225 static void
iwk_free_kw(iwk_sc_t * sc)1226 iwk_free_kw(iwk_sc_t *sc)
1227 {
1228 iwk_free_dma_mem(&sc->sc_dma_kw);
1229 }
1230
1231 static int
iwk_alloc_rx_ring(iwk_sc_t * sc)1232 iwk_alloc_rx_ring(iwk_sc_t *sc)
1233 {
1234 iwk_rx_ring_t *ring;
1235 iwk_rx_data_t *data;
1236 iwk_dma_t *dma_p;
1237 int i, err = DDI_SUCCESS;
1238
1239 ring = &sc->sc_rxq;
1240 ring->cur = 0;
1241
1242 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1243 &ring_desc_dma_attr, &iwk_dma_descattr,
1244 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1245 &ring->dma_desc);
1246 if (err != DDI_SUCCESS) {
1247 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1248 goto fail;
1249 }
1250 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1251 dma_p = &ring->dma_desc;
1252 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1253 dma_p->ncookies, dma_p->cookie.dmac_address,
1254 dma_p->cookie.dmac_size));
1255
1256 /*
1257 * Allocate Rx buffers.
1258 */
1259 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1260 data = &ring->data[i];
1261 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1262 &rx_buffer_dma_attr, &iwk_dma_accattr,
1263 DDI_DMA_READ | DDI_DMA_STREAMING,
1264 &data->dma_data);
1265 if (err != DDI_SUCCESS) {
1266 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1267 "failed\n", i);
1268 goto fail;
1269 }
1270 /*
1271 * the physical address bit [8-36] are used,
1272 * instead of bit [0-31] in 3945.
1273 */
1274 ring->desc[i] = (uint32_t)
1275 (data->dma_data.cookie.dmac_address >> 8);
1276 }
1277 dma_p = &ring->data[0].dma_data;
1278 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1279 "size:%lx]\n",
1280 dma_p->ncookies, dma_p->cookie.dmac_address,
1281 dma_p->cookie.dmac_size));
1282
1283 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1284
1285 return (err);
1286
1287 fail:
1288 iwk_free_rx_ring(sc);
1289 return (err);
1290 }
1291
1292 static void
iwk_reset_rx_ring(iwk_sc_t * sc)1293 iwk_reset_rx_ring(iwk_sc_t *sc)
1294 {
1295 int n;
1296
1297 iwk_mac_access_enter(sc);
1298 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1299 for (n = 0; n < 2000; n++) {
1300 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1301 break;
1302 DELAY(1000);
1303 }
1304
1305 if (n == 2000)
1306 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1307
1308 iwk_mac_access_exit(sc);
1309
1310 sc->sc_rxq.cur = 0;
1311 }
1312
1313 static void
iwk_free_rx_ring(iwk_sc_t * sc)1314 iwk_free_rx_ring(iwk_sc_t *sc)
1315 {
1316 int i;
1317
1318 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1319 if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1320 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1321 DDI_DMA_SYNC_FORCPU);
1322 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1323 }
1324
1325 if (sc->sc_rxq.dma_desc.dma_hdl)
1326 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1327 iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1328 }
1329
1330 static int
iwk_alloc_tx_ring(iwk_sc_t * sc,iwk_tx_ring_t * ring,int slots,int qid)1331 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1332 int slots, int qid)
1333 {
1334 iwk_tx_data_t *data;
1335 iwk_tx_desc_t *desc_h;
1336 uint32_t paddr_desc_h;
1337 iwk_cmd_t *cmd_h;
1338 uint32_t paddr_cmd_h;
1339 iwk_dma_t *dma_p;
1340 int i, err = DDI_SUCCESS;
1341
1342 ring->qid = qid;
1343 ring->count = TFD_QUEUE_SIZE_MAX;
1344 ring->window = slots;
1345 ring->queued = 0;
1346 ring->cur = 0;
1347
1348 err = iwk_alloc_dma_mem(sc,
1349 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1350 &ring_desc_dma_attr, &iwk_dma_descattr,
1351 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1352 &ring->dma_desc);
1353 if (err != DDI_SUCCESS) {
1354 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1355 "failed\n", qid);
1356 goto fail;
1357 }
1358 dma_p = &ring->dma_desc;
1359 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1360 dma_p->ncookies, dma_p->cookie.dmac_address,
1361 dma_p->cookie.dmac_size));
1362
1363 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1364 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1365
1366 err = iwk_alloc_dma_mem(sc,
1367 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1368 &cmd_dma_attr, &iwk_dma_accattr,
1369 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1370 &ring->dma_cmd);
1371 if (err != DDI_SUCCESS) {
1372 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1373 "failed\n", qid);
1374 goto fail;
1375 }
1376 dma_p = &ring->dma_cmd;
1377 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1378 dma_p->ncookies, dma_p->cookie.dmac_address,
1379 dma_p->cookie.dmac_size));
1380
1381 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1382 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1383
1384 /*
1385 * Allocate Tx buffers.
1386 */
1387 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1388 KM_NOSLEEP);
1389 if (ring->data == NULL) {
1390 cmn_err(CE_WARN, "could not allocate tx data slots\n");
1391 goto fail;
1392 }
1393
1394 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1395 data = &ring->data[i];
1396 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1397 &tx_buffer_dma_attr, &iwk_dma_accattr,
1398 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1399 &data->dma_data);
1400 if (err != DDI_SUCCESS) {
1401 cmn_err(CE_WARN, "dma alloc tx ring "
1402 "buf[%d] failed\n", i);
1403 goto fail;
1404 }
1405
1406 data->desc = desc_h + i;
1407 data->paddr_desc = paddr_desc_h +
1408 _PTRDIFF(data->desc, desc_h);
1409 data->cmd = cmd_h + i; /* (i % slots); */
1410 /* ((i % slots) * sizeof (iwk_cmd_t)); */
1411 data->paddr_cmd = paddr_cmd_h +
1412 _PTRDIFF(data->cmd, cmd_h);
1413 }
1414 dma_p = &ring->data[0].dma_data;
1415 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1416 "size:%lx]\n",
1417 dma_p->ncookies, dma_p->cookie.dmac_address,
1418 dma_p->cookie.dmac_size));
1419
1420 return (err);
1421
1422 fail:
1423 if (ring->data)
1424 kmem_free(ring->data,
1425 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1426 iwk_free_tx_ring(sc, ring);
1427 return (err);
1428 }
1429
1430 static void
iwk_reset_tx_ring(iwk_sc_t * sc,iwk_tx_ring_t * ring)1431 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1432 {
1433 iwk_tx_data_t *data;
1434 int i, n;
1435
1436 iwk_mac_access_enter(sc);
1437
1438 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1439 for (n = 0; n < 200; n++) {
1440 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1441 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1442 break;
1443 DELAY(10);
1444 }
1445 if (n == 200) {
1446 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1447 ring->qid));
1448 }
1449 iwk_mac_access_exit(sc);
1450
1451 for (i = 0; i < ring->count; i++) {
1452 data = &ring->data[i];
1453 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1454 }
1455
1456 ring->queued = 0;
1457 ring->cur = 0;
1458 }
1459
1460 /*ARGSUSED*/
1461 static void
iwk_free_tx_ring(iwk_sc_t * sc,iwk_tx_ring_t * ring)1462 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1463 {
1464 int i;
1465
1466 if (ring->dma_desc.dma_hdl != NULL)
1467 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1468 iwk_free_dma_mem(&ring->dma_desc);
1469
1470 if (ring->dma_cmd.dma_hdl != NULL)
1471 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1472 iwk_free_dma_mem(&ring->dma_cmd);
1473
1474 if (ring->data != NULL) {
1475 for (i = 0; i < ring->count; i++) {
1476 if (ring->data[i].dma_data.dma_hdl)
1477 IWK_DMA_SYNC(ring->data[i].dma_data,
1478 DDI_DMA_SYNC_FORDEV);
1479 iwk_free_dma_mem(&ring->data[i].dma_data);
1480 }
1481 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1482 }
1483 }
1484
1485 static int
iwk_ring_init(iwk_sc_t * sc)1486 iwk_ring_init(iwk_sc_t *sc)
1487 {
1488 int i, err = DDI_SUCCESS;
1489
1490 for (i = 0; i < IWK_NUM_QUEUES; i++) {
1491 if (i == IWK_CMD_QUEUE_NUM)
1492 continue;
1493 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1494 i);
1495 if (err != DDI_SUCCESS)
1496 goto fail;
1497 }
1498 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1499 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1500 if (err != DDI_SUCCESS)
1501 goto fail;
1502 err = iwk_alloc_rx_ring(sc);
1503 if (err != DDI_SUCCESS)
1504 goto fail;
1505 return (err);
1506
1507 fail:
1508 return (err);
1509 }
1510
1511 static void
iwk_ring_free(iwk_sc_t * sc)1512 iwk_ring_free(iwk_sc_t *sc)
1513 {
1514 int i = IWK_NUM_QUEUES;
1515
1516 iwk_free_rx_ring(sc);
1517 while (--i >= 0) {
1518 iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1519 }
1520 }
1521
1522 /* ARGSUSED */
1523 static ieee80211_node_t *
iwk_node_alloc(ieee80211com_t * ic)1524 iwk_node_alloc(ieee80211com_t *ic)
1525 {
1526 iwk_amrr_t *amrr;
1527
1528 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1529 if (amrr != NULL)
1530 iwk_amrr_init(amrr);
1531 return (&amrr->in);
1532 }
1533
1534 static void
iwk_node_free(ieee80211_node_t * in)1535 iwk_node_free(ieee80211_node_t *in)
1536 {
1537 ieee80211com_t *ic = in->in_ic;
1538
1539 ic->ic_node_cleanup(in);
1540 if (in->in_wpa_ie != NULL)
1541 ieee80211_free(in->in_wpa_ie);
1542 kmem_free(in, sizeof (iwk_amrr_t));
1543 }
1544
1545 /*ARGSUSED*/
1546 static int
iwk_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1547 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1548 {
1549 iwk_sc_t *sc = (iwk_sc_t *)ic;
1550 ieee80211_node_t *in = ic->ic_bss;
1551 enum ieee80211_state ostate = ic->ic_state;
1552 int i, err = IWK_SUCCESS;
1553
1554 mutex_enter(&sc->sc_glock);
1555 switch (nstate) {
1556 case IEEE80211_S_SCAN:
1557 switch (ostate) {
1558 case IEEE80211_S_INIT:
1559 {
1560 iwk_add_sta_t node;
1561
1562 sc->sc_flags |= IWK_F_SCANNING;
1563 sc->sc_scan_pending = 0;
1564 iwk_set_led(sc, 2, 10, 2);
1565
1566 /*
1567 * clear association to receive beacons from
1568 * all BSS'es
1569 */
1570 sc->sc_config.assoc_id = 0;
1571 sc->sc_config.filter_flags &=
1572 ~LE_32(RXON_FILTER_ASSOC_MSK);
1573
1574 IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1575 "flags %x filter_flags %x\n", sc->sc_config.chan,
1576 sc->sc_config.flags, sc->sc_config.filter_flags));
1577
1578 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1579 sizeof (iwk_rxon_cmd_t), 1);
1580 if (err != IWK_SUCCESS) {
1581 cmn_err(CE_WARN,
1582 "could not clear association\n");
1583 sc->sc_flags &= ~IWK_F_SCANNING;
1584 mutex_exit(&sc->sc_glock);
1585 return (err);
1586 }
1587
1588 /* add broadcast node to send probe request */
1589 (void) memset(&node, 0, sizeof (node));
1590 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1591 node.id = IWK_BROADCAST_ID;
1592 err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1593 sizeof (node), 1);
1594 if (err != IWK_SUCCESS) {
1595 cmn_err(CE_WARN, "could not add "
1596 "broadcast node\n");
1597 sc->sc_flags &= ~IWK_F_SCANNING;
1598 mutex_exit(&sc->sc_glock);
1599 return (err);
1600 }
1601 break;
1602 }
1603
1604 case IEEE80211_S_AUTH:
1605 case IEEE80211_S_ASSOC:
1606 case IEEE80211_S_RUN:
1607 sc->sc_flags |= IWK_F_SCANNING;
1608 sc->sc_scan_pending = 0;
1609
1610 iwk_set_led(sc, 2, 10, 2);
1611 /* FALLTHRU */
1612 case IEEE80211_S_SCAN:
1613 mutex_exit(&sc->sc_glock);
1614 /* step to next channel before actual FW scan */
1615 err = sc->sc_newstate(ic, nstate, arg);
1616 mutex_enter(&sc->sc_glock);
1617 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1618 cmn_err(CE_WARN,
1619 "could not initiate scan\n");
1620 sc->sc_flags &= ~IWK_F_SCANNING;
1621 ieee80211_cancel_scan(ic);
1622 }
1623 mutex_exit(&sc->sc_glock);
1624 return (err);
1625 default:
1626 break;
1627
1628 }
1629 sc->sc_clk = 0;
1630 break;
1631
1632 case IEEE80211_S_AUTH:
1633 if (ostate == IEEE80211_S_SCAN) {
1634 sc->sc_flags &= ~IWK_F_SCANNING;
1635 }
1636
1637 /* reset state to handle reassociations correctly */
1638 sc->sc_config.assoc_id = 0;
1639 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1640
1641 /*
1642 * before sending authentication and association request frame,
1643 * we need do something in the hardware, such as setting the
1644 * channel same to the target AP...
1645 */
1646 if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1647 cmn_err(CE_WARN, "could not setup firmware for "
1648 "authentication\n");
1649 mutex_exit(&sc->sc_glock);
1650 return (err);
1651 }
1652 break;
1653
1654 case IEEE80211_S_RUN:
1655 if (ostate == IEEE80211_S_SCAN) {
1656 sc->sc_flags &= ~IWK_F_SCANNING;
1657 }
1658
1659 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1660 /* let LED blink when monitoring */
1661 iwk_set_led(sc, 2, 10, 10);
1662 break;
1663 }
1664 IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1665
1666 /* IBSS mode */
1667 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1668 /*
1669 * clean all nodes in ibss node table
1670 * in order to be consistent with hardware
1671 */
1672 err = iwk_run_state_config_ibss(ic);
1673 if (err != IWK_SUCCESS) {
1674 cmn_err(CE_WARN, "iwk_newstate(): "
1675 "failed to update configuration "
1676 "in IBSS mode\n");
1677 mutex_exit(&sc->sc_glock);
1678 return (err);
1679 }
1680 }
1681
1682 /* none IBSS mode */
1683 if (ic->ic_opmode != IEEE80211_M_IBSS) {
1684 /* update adapter's configuration */
1685 err = iwk_run_state_config_sta(ic);
1686 if (err != IWK_SUCCESS) {
1687 cmn_err(CE_WARN, "iwk_newstate(): "
1688 "failed to update configuration "
1689 "in none IBSS mode\n");
1690 mutex_exit(&sc->sc_glock);
1691 return (err);
1692 }
1693 }
1694
1695 /* obtain current temperature of chipset */
1696 sc->sc_tempera = iwk_curr_tempera(sc);
1697
1698 /*
1699 * make Tx power calibration to determine
1700 * the gains of DSP and radio
1701 */
1702 err = iwk_tx_power_calibration(sc);
1703 if (err) {
1704 cmn_err(CE_WARN, "iwk_newstate(): "
1705 "failed to set tx power table\n");
1706 mutex_exit(&sc->sc_glock);
1707 return (err);
1708 }
1709
1710 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1711
1712 /*
1713 * allocate and transmit beacon frames
1714 */
1715 err = iwk_start_tx_beacon(ic);
1716 if (err != IWK_SUCCESS) {
1717 cmn_err(CE_WARN, "iwk_newstate(): "
1718 "can't transmit beacon frames\n");
1719 mutex_exit(&sc->sc_glock);
1720 return (err);
1721 }
1722 }
1723
1724 /* start automatic rate control */
1725 mutex_enter(&sc->sc_mt_lock);
1726 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1727 sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1728 /* set rate to some reasonable initial value */
1729 i = in->in_rates.ir_nrates - 1;
1730 while (i > 0 && IEEE80211_RATE(i) > 72)
1731 i--;
1732 in->in_txrate = i;
1733 } else {
1734 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1735 }
1736 mutex_exit(&sc->sc_mt_lock);
1737
1738 /* set LED on after associated */
1739 iwk_set_led(sc, 2, 0, 1);
1740 break;
1741
1742 case IEEE80211_S_INIT:
1743 if (ostate == IEEE80211_S_SCAN) {
1744 sc->sc_flags &= ~IWK_F_SCANNING;
1745 }
1746
1747 /* set LED off after init */
1748 iwk_set_led(sc, 2, 1, 0);
1749 break;
1750 case IEEE80211_S_ASSOC:
1751 if (ostate == IEEE80211_S_SCAN) {
1752 sc->sc_flags &= ~IWK_F_SCANNING;
1753 }
1754
1755 break;
1756 }
1757
1758 mutex_exit(&sc->sc_glock);
1759
1760 err = sc->sc_newstate(ic, nstate, arg);
1761
1762 if (nstate == IEEE80211_S_RUN) {
1763
1764 mutex_enter(&sc->sc_glock);
1765
1766 /*
1767 * make initialization for Receiver
1768 * sensitivity calibration
1769 */
1770 err = iwk_rx_sens_init(sc);
1771 if (err) {
1772 cmn_err(CE_WARN, "iwk_newstate(): "
1773 "failed to init RX sensitivity\n");
1774 mutex_exit(&sc->sc_glock);
1775 return (err);
1776 }
1777
1778 /* make initialization for Receiver gain balance */
1779 err = iwk_rxgain_diff_init(sc);
1780 if (err) {
1781 cmn_err(CE_WARN, "iwk_newstate(): "
1782 "failed to init phy calibration\n");
1783 mutex_exit(&sc->sc_glock);
1784 return (err);
1785 }
1786
1787 mutex_exit(&sc->sc_glock);
1788
1789 }
1790
1791 return (err);
1792 }
1793
1794 static void
iwk_watchdog(void * arg)1795 iwk_watchdog(void *arg)
1796 {
1797 iwk_sc_t *sc = arg;
1798 struct ieee80211com *ic = &sc->sc_ic;
1799 #ifdef DEBUG
1800 timeout_id_t timeout_id = ic->ic_watchdog_timer;
1801 #endif
1802
1803 ieee80211_stop_watchdog(ic);
1804
1805 if ((ic->ic_state != IEEE80211_S_AUTH) &&
1806 (ic->ic_state != IEEE80211_S_ASSOC))
1807 return;
1808
1809 if (ic->ic_bss->in_fails > 0) {
1810 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
1811 "node (0x%x)\n", timeout_id, &ic->ic_bss));
1812 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1813 } else {
1814 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
1815 "node (0x%x), retry (%d)\n",
1816 timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
1817 ieee80211_watchdog(ic);
1818 }
1819 }
1820
1821 /*ARGSUSED*/
iwk_key_set(ieee80211com_t * ic,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1822 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1823 const uint8_t mac[IEEE80211_ADDR_LEN])
1824 {
1825 iwk_sc_t *sc = (iwk_sc_t *)ic;
1826 iwk_add_sta_t node;
1827 int err;
1828 uint8_t index1;
1829
1830 switch (k->wk_cipher->ic_cipher) {
1831 case IEEE80211_CIPHER_WEP:
1832 case IEEE80211_CIPHER_TKIP:
1833 return (1); /* sofeware do it. */
1834 case IEEE80211_CIPHER_AES_CCM:
1835 break;
1836 default:
1837 return (0);
1838 }
1839 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_DIS_DECRYPT_MSK |
1840 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1841
1842 mutex_enter(&sc->sc_glock);
1843
1844 /* update ap/multicast node */
1845 (void) memset(&node, 0, sizeof (node));
1846 if (IEEE80211_IS_MULTICAST(mac)) {
1847 (void) memset(node.bssid, 0xff, 6);
1848 node.id = IWK_BROADCAST_ID;
1849 } else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1850 mutex_exit(&sc->sc_glock);
1851 mutex_enter(&sc->sc_ibss.node_tb_lock);
1852
1853 /*
1854 * search for node in ibss node table
1855 */
1856 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1857 index1++) {
1858 if (sc->sc_ibss.ibss_node_tb[index1].used &&
1859 IEEE80211_ADDR_EQ(sc->sc_ibss.
1860 ibss_node_tb[index1].node.bssid,
1861 mac)) {
1862 break;
1863 }
1864 }
1865 if (index1 >= IWK_BROADCAST_ID) {
1866 cmn_err(CE_WARN, "iwk_key_set(): "
1867 "have no this node in hardware node table\n");
1868 mutex_exit(&sc->sc_ibss.node_tb_lock);
1869 return (0);
1870 } else {
1871 /*
1872 * configure key for given node in hardware
1873 */
1874 if (k->wk_flags & IEEE80211_KEY_XMIT) {
1875 sc->sc_ibss.ibss_node_tb[index1].
1876 node.key_flags = 0;
1877 sc->sc_ibss.ibss_node_tb[index1].
1878 node.keyp = k->wk_keyix;
1879 } else {
1880 sc->sc_ibss.ibss_node_tb[index1].
1881 node.key_flags = (1 << 14);
1882 sc->sc_ibss.ibss_node_tb[index1].
1883 node.keyp = k->wk_keyix + 4;
1884 }
1885
1886 (void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1887 k->wk_key, k->wk_keylen);
1888 sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1889 (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1890 sc->sc_ibss.ibss_node_tb[index1].node.key_flags =
1891 LE_16(sc->sc_ibss.ibss_node_tb[index1].
1892 node.key_flags);
1893 sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1894 STA_MODIFY_KEY_MASK;
1895 sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1896
1897 mutex_enter(&sc->sc_glock);
1898 err = iwk_cmd(sc, REPLY_ADD_STA,
1899 &sc->sc_ibss.ibss_node_tb[index1].node,
1900 sizeof (iwk_add_sta_t), 1);
1901 if (err != IWK_SUCCESS) {
1902 cmn_err(CE_WARN, "iwk_key_set(): "
1903 "failed to update IBSS node in hardware\n");
1904 mutex_exit(&sc->sc_glock);
1905 mutex_exit(&sc->sc_ibss.node_tb_lock);
1906 return (0);
1907 }
1908 mutex_exit(&sc->sc_glock);
1909 }
1910 mutex_exit(&sc->sc_ibss.node_tb_lock);
1911 return (1);
1912 } else {
1913 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1914 node.id = IWK_AP_ID;
1915 }
1916 if (k->wk_flags & IEEE80211_KEY_XMIT) {
1917 node.key_flags = 0;
1918 node.keyp = k->wk_keyix;
1919 } else {
1920 node.key_flags = (1 << 14);
1921 node.keyp = k->wk_keyix + 4;
1922 }
1923 (void) memcpy(node.key, k->wk_key, k->wk_keylen);
1924 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1925 node.key_flags = LE_16(node.key_flags);
1926 node.sta_mask = STA_MODIFY_KEY_MASK;
1927 node.control = 1;
1928 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1929 if (err != IWK_SUCCESS) {
1930 cmn_err(CE_WARN, "iwk_key_set():"
1931 "failed to update ap node\n");
1932 mutex_exit(&sc->sc_glock);
1933 return (0);
1934 }
1935 mutex_exit(&sc->sc_glock);
1936 return (1);
1937 }
1938
1939 /*
1940 * exclusive access to mac begin.
1941 */
1942 static void
iwk_mac_access_enter(iwk_sc_t * sc)1943 iwk_mac_access_enter(iwk_sc_t *sc)
1944 {
1945 uint32_t tmp;
1946 int n;
1947
1948 tmp = IWK_READ(sc, CSR_GP_CNTRL);
1949 IWK_WRITE(sc, CSR_GP_CNTRL,
1950 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1951
1952 /* wait until we succeed */
1953 for (n = 0; n < 1000; n++) {
1954 if ((IWK_READ(sc, CSR_GP_CNTRL) &
1955 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1956 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1957 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1958 break;
1959 DELAY(10);
1960 }
1961 if (n == 1000)
1962 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1963 }
1964
1965 /*
1966 * exclusive access to mac end.
1967 */
1968 static void
iwk_mac_access_exit(iwk_sc_t * sc)1969 iwk_mac_access_exit(iwk_sc_t *sc)
1970 {
1971 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1972 IWK_WRITE(sc, CSR_GP_CNTRL,
1973 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1974 }
1975
1976 static uint32_t
iwk_mem_read(iwk_sc_t * sc,uint32_t addr)1977 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1978 {
1979 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1980 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1981 }
1982
1983 static void
iwk_mem_write(iwk_sc_t * sc,uint32_t addr,uint32_t data)1984 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1985 {
1986 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1987 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1988 }
1989
1990 static uint32_t
iwk_reg_read(iwk_sc_t * sc,uint32_t addr)1991 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1992 {
1993 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1994 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1995 }
1996
1997 static void
iwk_reg_write(iwk_sc_t * sc,uint32_t addr,uint32_t data)1998 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1999 {
2000 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2001 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2002 }
2003
2004 static void
iwk_reg_write_region_4(iwk_sc_t * sc,uint32_t addr,uint32_t * data,int wlen)2005 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
2006 uint32_t *data, int wlen)
2007 {
2008 for (; wlen > 0; wlen--, data++, addr += 4)
2009 iwk_reg_write(sc, addr, LE_32(*data));
2010 }
2011
2012
2013 /*
2014 * ucode load/initialization steps:
2015 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
2016 * BSM contains a small memory that *always* stays powered up, so it can
2017 * retain the bootstrap program even when the card is in a power-saving
2018 * power-down state. The BSM loads the small program into ARC processor's
2019 * instruction memory when triggered by power-up.
2020 * 2) load Initialize image via bootstrap program.
2021 * The Initialize image sets up regulatory and calibration data for the
2022 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
2023 * The 4965 reply contains calibration data for temperature, voltage and tx gain
2024 * correction.
2025 */
2026 static int
iwk_load_firmware(iwk_sc_t * sc)2027 iwk_load_firmware(iwk_sc_t *sc)
2028 {
2029 uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
2030 uint32_t size = LE_32(sc->sc_hdr->bootsz);
2031 int n, err = IWK_SUCCESS;
2032
2033 /*
2034 * The physical address bit [4-35] of the initialize uCode.
2035 * In the initialize alive notify interrupt the physical address of
2036 * the runtime ucode will be set for loading.
2037 */
2038 iwk_mac_access_enter(sc);
2039
2040 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2041 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
2042 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2043 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
2044 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2045 sc->sc_dma_fw_init_text.cookie.dmac_size);
2046 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2047 sc->sc_dma_fw_init_data.cookie.dmac_size);
2048
2049 /* load bootstrap code into BSM memory */
2050 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
2051 size / sizeof (uint32_t));
2052
2053 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
2054 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
2055 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
2056
2057 /*
2058 * prepare to load initialize uCode
2059 */
2060 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2061
2062 /* wait while the adapter is busy loading the firmware */
2063 for (n = 0; n < 1000; n++) {
2064 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2065 BSM_WR_CTRL_REG_BIT_START))
2066 break;
2067 DELAY(10);
2068 }
2069 if (n == 1000) {
2070 cmn_err(CE_WARN, "timeout transferring firmware\n");
2071 err = ETIMEDOUT;
2072 return (err);
2073 }
2074
2075 /* for future power-save mode use */
2076 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2077
2078 iwk_mac_access_exit(sc);
2079
2080 return (err);
2081 }
2082
2083 /*ARGSUSED*/
2084 static void
iwk_rx_intr(iwk_sc_t * sc,iwk_rx_desc_t * desc,iwk_rx_data_t * data)2085 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2086 {
2087 ieee80211com_t *ic = &sc->sc_ic;
2088 iwk_rx_ring_t *ring = &sc->sc_rxq;
2089 iwk_rx_phy_res_t *stat;
2090 ieee80211_node_t *in;
2091 uint32_t *tail;
2092 struct ieee80211_frame *wh;
2093 mblk_t *mp;
2094 uint16_t len, rssi, mrssi, agc;
2095 int16_t t;
2096 uint32_t ants, i;
2097 struct iwk_rx_non_cfg_phy *phyinfo;
2098 uint32_t crc;
2099
2100 /* assuming not 11n here. cope with 11n in phase-II */
2101 stat = (iwk_rx_phy_res_t *)(desc + 1);
2102 if (stat->cfg_phy_cnt > 20) {
2103 return;
2104 }
2105
2106 for (i = 0; i < RX_RES_PHY_CNT; i++)
2107 stat->non_cfg_phy[i] = LE_16(stat->non_cfg_phy[i]);
2108
2109 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2110 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2111 mrssi = 0;
2112 ants = (LE_16(stat->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2113 RX_PHY_FLAGS_ANTENNAE_OFFSET;
2114 for (i = 0; i < 3; i++) {
2115 if (ants & (1 << i))
2116 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2117 }
2118 t = mrssi - agc - 44; /* t is the dBM value */
2119 /*
2120 * convert dBm to percentage ???
2121 */
2122 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2123 (75 * 75);
2124 if (rssi > 100)
2125 rssi = 100;
2126 if (rssi < 1)
2127 rssi = 1;
2128 len = LE_16(stat->byte_count);
2129 tail = (uint32_t *)((caddr_t)(stat + 1) + stat->cfg_phy_cnt + len);
2130 bcopy(tail, &crc, 4);
2131
2132 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2133 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2134 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2135 len, stat->rate.r.s.rate, LE_16(stat->channel),
2136 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2137 stat->cfg_phy_cnt, LE_32(crc)));
2138
2139 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2140 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2141 return;
2142 }
2143
2144 /*
2145 * discard Rx frames with bad CRC
2146 */
2147 if ((LE_32(crc) &
2148 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2149 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2150 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2151 LE_32(crc)));
2152 sc->sc_rx_err++;
2153 return;
2154 }
2155
2156 wh = (struct ieee80211_frame *)
2157 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2158 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2159 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2160 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2161 sc->sc_assoc_id));
2162 }
2163 #ifdef DEBUG
2164 if (iwk_dbg_flags & IWK_DEBUG_RX)
2165 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2166 #endif
2167 in = ieee80211_find_rxnode(ic, wh);
2168 mp = allocb(len, BPRI_MED);
2169 if (mp) {
2170 (void) memcpy(mp->b_wptr, wh, len);
2171 mp->b_wptr += len;
2172
2173 /* send the frame to the 802.11 layer */
2174 (void) ieee80211_input(ic, mp, in, rssi, 0);
2175 } else {
2176 sc->sc_rx_nobuf++;
2177 IWK_DBG((IWK_DEBUG_RX,
2178 "iwk_rx_intr(): alloc rx buf failed\n"));
2179 }
2180 /* release node reference */
2181 ieee80211_free_node(in);
2182 }
2183
2184 /*ARGSUSED*/
2185 static void
iwk_tx_intr(iwk_sc_t * sc,iwk_rx_desc_t * desc,iwk_rx_data_t * data)2186 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2187 {
2188 ieee80211com_t *ic = &sc->sc_ic;
2189 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2190 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2191 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2192
2193 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2194 " retries=%d frame_count=%x nkill=%d "
2195 "rate=%x duration=%d status=%x\n",
2196 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2197 stat->bt_kill_count, stat->rate.r.s.rate,
2198 LE_16(stat->duration), LE_32(stat->status)));
2199
2200 amrr->txcnt++;
2201 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2202 if (stat->ntries > 0) {
2203 amrr->retrycnt++;
2204 sc->sc_tx_retries++;
2205 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2206 sc->sc_tx_retries));
2207 }
2208
2209 sc->sc_tx_timer = 0;
2210
2211 mutex_enter(&sc->sc_tx_lock);
2212 ring->queued--;
2213 if (ring->queued < 0)
2214 ring->queued = 0;
2215 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2216 sc->sc_need_reschedule = 0;
2217 mutex_exit(&sc->sc_tx_lock);
2218 mac_tx_update(ic->ic_mach);
2219 mutex_enter(&sc->sc_tx_lock);
2220 }
2221 mutex_exit(&sc->sc_tx_lock);
2222 }
2223
2224 static void
iwk_cmd_intr(iwk_sc_t * sc,iwk_rx_desc_t * desc)2225 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2226 {
2227 if ((desc->hdr.qid & 7) != 4) {
2228 return;
2229 }
2230 mutex_enter(&sc->sc_glock);
2231 sc->sc_flags |= IWK_F_CMD_DONE;
2232 cv_signal(&sc->sc_cmd_cv);
2233 mutex_exit(&sc->sc_glock);
2234 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2235 "qid=%x idx=%d flags=%x type=0x%x\n",
2236 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2237 desc->hdr.type));
2238 }
2239
2240 static void
iwk_ucode_alive(iwk_sc_t * sc,iwk_rx_desc_t * desc)2241 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2242 {
2243 uint32_t base, i;
2244 struct iwk_alive_resp *ar =
2245 (struct iwk_alive_resp *)(desc + 1);
2246
2247 /* the microcontroller is ready */
2248 IWK_DBG((IWK_DEBUG_FW,
2249 "microcode alive notification minor: %x major: %x type:"
2250 " %x subtype: %x\n",
2251 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2252
2253 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2254 IWK_DBG((IWK_DEBUG_FW,
2255 "microcontroller initialization failed\n"));
2256 }
2257 if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2258 IWK_DBG((IWK_DEBUG_FW,
2259 "initialization alive received.\n"));
2260 (void) memcpy(&sc->sc_card_alive_init, ar,
2261 sizeof (struct iwk_init_alive_resp));
2262 /* XXX get temperature */
2263 iwk_mac_access_enter(sc);
2264 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2265 sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2266 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2267 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2268 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2269 sc->sc_dma_fw_data.cookie.dmac_size);
2270 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2271 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2272 iwk_mac_access_exit(sc);
2273 } else {
2274 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2275 (void) memcpy(&sc->sc_card_alive_run, ar,
2276 sizeof (struct iwk_alive_resp));
2277
2278 /*
2279 * Init SCD related registers to make Tx work. XXX
2280 */
2281 iwk_mac_access_enter(sc);
2282
2283 /* read sram address of data base */
2284 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2285
2286 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2287 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2288 i < 128; i += 4)
2289 iwk_mem_write(sc, base + i, 0);
2290
2291 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2292 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2293 i < 256; i += 4)
2294 iwk_mem_write(sc, base + i, 0);
2295
2296 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2297 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2298 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2299 iwk_mem_write(sc, base + i, 0);
2300
2301 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2302 sc->sc_dma_sh.cookie.dmac_address >> 10);
2303 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2304
2305 /* initiate the tx queues */
2306 for (i = 0; i < IWK_NUM_QUEUES; i++) {
2307 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2308 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2309 iwk_mem_write(sc, sc->sc_scd_base +
2310 SCD_CONTEXT_QUEUE_OFFSET(i),
2311 (SCD_WIN_SIZE & 0x7f));
2312 iwk_mem_write(sc, sc->sc_scd_base +
2313 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2314 (SCD_FRAME_LIMIT & 0x7f) << 16);
2315 }
2316 /* interrupt enable on each queue0-7 */
2317 iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2318 (1 << IWK_NUM_QUEUES) - 1);
2319 /* enable each channel 0-7 */
2320 iwk_reg_write(sc, SCD_TXFACT,
2321 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2322 /*
2323 * queue 0-7 maps to FIFO 0-7 and
2324 * all queues work under FIFO mode (none-scheduler-ack)
2325 */
2326 for (i = 0; i < 7; i++) {
2327 iwk_reg_write(sc,
2328 SCD_QUEUE_STATUS_BITS(i),
2329 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2330 (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2331 SCD_QUEUE_STTS_REG_MSK);
2332 }
2333 iwk_mac_access_exit(sc);
2334
2335 sc->sc_flags |= IWK_F_FW_INIT;
2336 cv_signal(&sc->sc_fw_cv);
2337 }
2338
2339 }
2340
2341 static uint_t
2342 /* LINTED: argument unused in function: unused */
iwk_rx_softintr(caddr_t arg,caddr_t unused)2343 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2344 {
2345 iwk_sc_t *sc = (iwk_sc_t *)arg;
2346 ieee80211com_t *ic = &sc->sc_ic;
2347 iwk_rx_desc_t *desc;
2348 iwk_rx_data_t *data;
2349 uint32_t index;
2350
2351 mutex_enter(&sc->sc_glock);
2352 if (sc->sc_rx_softint_pending != 1) {
2353 mutex_exit(&sc->sc_glock);
2354 return (DDI_INTR_UNCLAIMED);
2355 }
2356 /* disable interrupts */
2357 IWK_WRITE(sc, CSR_INT_MASK, 0);
2358 mutex_exit(&sc->sc_glock);
2359
2360 /*
2361 * firmware has moved the index of the rx queue, driver get it,
2362 * and deal with it.
2363 */
2364 index = sc->sc_shared->val0 & 0xfff;
2365
2366 while (sc->sc_rxq.cur != index) {
2367 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2368 desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2369
2370 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2371 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2372 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2373 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2374
2375 /* a command other than a tx need to be replied */
2376 if (!(desc->hdr.qid & 0x80) &&
2377 (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2378 (desc->hdr.type != REPLY_TX) &&
2379 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2380 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2381 (desc->hdr.type != SENSITIVITY_CMD))
2382 iwk_cmd_intr(sc, desc);
2383
2384 switch (desc->hdr.type) {
2385 case REPLY_4965_RX:
2386 iwk_rx_intr(sc, desc, data);
2387 break;
2388
2389 case REPLY_TX:
2390 iwk_tx_intr(sc, desc, data);
2391 break;
2392
2393 case REPLY_ALIVE:
2394 iwk_ucode_alive(sc, desc);
2395 break;
2396
2397 case CARD_STATE_NOTIFICATION:
2398 {
2399 uint32_t *status = (uint32_t *)(desc + 1);
2400
2401 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2402 LE_32(*status)));
2403
2404 if (LE_32(*status) & 1) {
2405 /*
2406 * the radio button has to be pushed(OFF). It
2407 * is considered as a hw error, the
2408 * iwk_thread() tries to recover it after the
2409 * button is pushed again(ON)
2410 */
2411 cmn_err(CE_NOTE,
2412 "iwk_rx_softintr(): "
2413 "Radio transmitter is off\n");
2414 sc->sc_ostate = sc->sc_ic.ic_state;
2415 ieee80211_new_state(&sc->sc_ic,
2416 IEEE80211_S_INIT, -1);
2417 sc->sc_flags |=
2418 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2419 }
2420 break;
2421 }
2422 case SCAN_START_NOTIFICATION:
2423 {
2424 iwk_start_scan_t *scan =
2425 (iwk_start_scan_t *)(desc + 1);
2426
2427 IWK_DBG((IWK_DEBUG_SCAN,
2428 "scanning channel %d status %x\n",
2429 scan->chan, LE_32(scan->status)));
2430
2431 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2432 break;
2433 }
2434 case SCAN_COMPLETE_NOTIFICATION:
2435 {
2436 iwk_stop_scan_t *scan =
2437 (iwk_stop_scan_t *)(desc + 1);
2438
2439 IWK_DBG((IWK_DEBUG_SCAN,
2440 "completed channel %d (burst of %d) status %02x\n",
2441 scan->chan, scan->nchan, scan->status));
2442
2443 sc->sc_scan_pending++;
2444 break;
2445 }
2446 case STATISTICS_NOTIFICATION:
2447 /* handle statistics notification */
2448 iwk_statistics_notify(sc, desc);
2449 break;
2450 }
2451
2452 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2453 }
2454
2455 /*
2456 * driver dealt with what reveived in rx queue and tell the information
2457 * to the firmware.
2458 */
2459 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2460 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2461
2462 mutex_enter(&sc->sc_glock);
2463 /* re-enable interrupts */
2464 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2465 sc->sc_rx_softint_pending = 0;
2466 mutex_exit(&sc->sc_glock);
2467
2468 return (DDI_INTR_CLAIMED);
2469 }
2470
2471 static uint_t
2472 /* LINTED: argument unused in function: unused */
iwk_intr(caddr_t arg,caddr_t unused)2473 iwk_intr(caddr_t arg, caddr_t unused)
2474 {
2475 iwk_sc_t *sc = (iwk_sc_t *)arg;
2476 uint32_t r, rfh;
2477
2478 mutex_enter(&sc->sc_glock);
2479
2480 if (sc->sc_flags & IWK_F_SUSPEND) {
2481 mutex_exit(&sc->sc_glock);
2482 return (DDI_INTR_UNCLAIMED);
2483 }
2484
2485 r = IWK_READ(sc, CSR_INT);
2486 if (r == 0 || r == 0xffffffff) {
2487 mutex_exit(&sc->sc_glock);
2488 return (DDI_INTR_UNCLAIMED);
2489 }
2490
2491 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2492
2493 rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2494 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2495 /* disable interrupts */
2496 IWK_WRITE(sc, CSR_INT_MASK, 0);
2497 /* ack interrupts */
2498 IWK_WRITE(sc, CSR_INT, r);
2499 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2500
2501 if (sc->sc_soft_hdl == NULL) {
2502 mutex_exit(&sc->sc_glock);
2503 return (DDI_INTR_CLAIMED);
2504 }
2505 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2506 cmn_err(CE_WARN, "fatal firmware error\n");
2507 mutex_exit(&sc->sc_glock);
2508 #ifdef DEBUG
2509 /* dump event and error logs to dmesg */
2510 iwk_write_error_log(sc);
2511 iwk_write_event_log(sc);
2512 #endif /* DEBUG */
2513 iwk_stop(sc);
2514 sc->sc_ostate = sc->sc_ic.ic_state;
2515
2516 /* not capable of fast recovery */
2517 if (!IWK_CHK_FAST_RECOVER(sc))
2518 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2519
2520 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2521 return (DDI_INTR_CLAIMED);
2522 }
2523
2524 if (r & BIT_INT_RF_KILL) {
2525 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
2526 if (tmp & (1 << 27))
2527 cmn_err(CE_NOTE, "RF switch: radio on\n");
2528 }
2529
2530 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2531 (rfh & FH_INT_RX_MASK)) {
2532 sc->sc_rx_softint_pending = 1;
2533 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2534 }
2535
2536 if (r & BIT_INT_ALIVE) {
2537 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2538 }
2539
2540 /* re-enable interrupts */
2541 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2542 mutex_exit(&sc->sc_glock);
2543
2544 return (DDI_INTR_CLAIMED);
2545 }
2546
2547 static uint8_t
iwk_rate_to_plcp(int rate)2548 iwk_rate_to_plcp(int rate)
2549 {
2550 uint8_t ret;
2551
2552 switch (rate) {
2553 /* CCK rates */
2554 case 2:
2555 ret = 0xa;
2556 break;
2557 case 4:
2558 ret = 0x14;
2559 break;
2560 case 11:
2561 ret = 0x37;
2562 break;
2563 case 22:
2564 ret = 0x6e;
2565 break;
2566 /* OFDM rates */
2567 case 12:
2568 ret = 0xd;
2569 break;
2570 case 18:
2571 ret = 0xf;
2572 break;
2573 case 24:
2574 ret = 0x5;
2575 break;
2576 case 36:
2577 ret = 0x7;
2578 break;
2579 case 48:
2580 ret = 0x9;
2581 break;
2582 case 72:
2583 ret = 0xb;
2584 break;
2585 case 96:
2586 ret = 0x1;
2587 break;
2588 case 108:
2589 ret = 0x3;
2590 break;
2591 default:
2592 ret = 0;
2593 break;
2594 }
2595 return (ret);
2596 }
2597
2598 static mblk_t *
iwk_m_tx(void * arg,mblk_t * mp)2599 iwk_m_tx(void *arg, mblk_t *mp)
2600 {
2601 iwk_sc_t *sc = (iwk_sc_t *)arg;
2602 ieee80211com_t *ic = &sc->sc_ic;
2603 mblk_t *next;
2604
2605 if (sc->sc_flags & IWK_F_SUSPEND) {
2606 freemsgchain(mp);
2607 return (NULL);
2608 }
2609
2610 if (ic->ic_state != IEEE80211_S_RUN) {
2611 freemsgchain(mp);
2612 return (NULL);
2613 }
2614
2615 if ((sc->sc_flags & IWK_F_HW_ERR_RECOVER) &&
2616 IWK_CHK_FAST_RECOVER(sc)) {
2617 IWK_DBG((IWK_DEBUG_FW, "iwk_m_tx(): hold queue\n"));
2618 return (mp);
2619 }
2620
2621 while (mp != NULL) {
2622 next = mp->b_next;
2623 mp->b_next = NULL;
2624 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2625 mp->b_next = next;
2626 break;
2627 }
2628 mp = next;
2629 }
2630 return (mp);
2631 }
2632
2633 /* ARGSUSED */
2634 static int
iwk_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)2635 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2636 {
2637 iwk_sc_t *sc = (iwk_sc_t *)ic;
2638 iwk_tx_ring_t *ring;
2639 iwk_tx_desc_t *desc;
2640 iwk_tx_data_t *data;
2641 iwk_cmd_t *cmd;
2642 iwk_tx_cmd_t *tx;
2643 ieee80211_node_t *in;
2644 struct ieee80211_frame *wh;
2645 struct ieee80211_key *k = NULL;
2646 mblk_t *m, *m0;
2647 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2648 uint16_t masks = 0;
2649 uint8_t index, index1, index2;
2650
2651 ring = &sc->sc_txq[0];
2652 data = &ring->data[ring->cur];
2653 desc = data->desc;
2654 cmd = data->cmd;
2655 bzero(desc, sizeof (*desc));
2656 bzero(cmd, sizeof (*cmd));
2657
2658 mutex_enter(&sc->sc_tx_lock);
2659 if (sc->sc_flags & IWK_F_SUSPEND) {
2660 mutex_exit(&sc->sc_tx_lock);
2661 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2662 IEEE80211_FC0_TYPE_DATA) {
2663 freemsg(mp);
2664 }
2665 err = IWK_FAIL;
2666 goto exit;
2667 }
2668
2669 if (ring->queued > ring->count - 64) {
2670 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2671 sc->sc_need_reschedule = 1;
2672 mutex_exit(&sc->sc_tx_lock);
2673 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2674 IEEE80211_FC0_TYPE_DATA) {
2675 freemsg(mp);
2676 }
2677 sc->sc_tx_nobuf++;
2678 err = IWK_FAIL;
2679 goto exit;
2680 }
2681 mutex_exit(&sc->sc_tx_lock);
2682
2683 hdrlen = sizeof (struct ieee80211_frame);
2684
2685 m = allocb(msgdsize(mp) + 32, BPRI_MED);
2686 if (m == NULL) { /* can not alloc buf, drop this package */
2687 cmn_err(CE_WARN,
2688 "iwk_send(): failed to allocate msgbuf\n");
2689 freemsg(mp);
2690 err = IWK_SUCCESS;
2691 goto exit;
2692 }
2693 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2694 mblen = MBLKL(m0);
2695 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2696 off += mblen;
2697 }
2698 m->b_wptr += off;
2699 freemsg(mp);
2700
2701 wh = (struct ieee80211_frame *)m->b_rptr;
2702
2703 if (ic->ic_opmode == IEEE80211_M_IBSS &&
2704 (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2705 mutex_enter(&sc->sc_glock);
2706 mutex_enter(&sc->sc_ibss.node_tb_lock);
2707
2708 /*
2709 * search for node in ibss node table
2710 */
2711 for (index1 = IWK_STA_ID;
2712 index1 < IWK_STATION_COUNT; index1++) {
2713 if (sc->sc_ibss.ibss_node_tb[index1].used &&
2714 IEEE80211_ADDR_EQ(sc->sc_ibss.
2715 ibss_node_tb[index1].node.bssid,
2716 wh->i_addr1)) {
2717 break;
2718 }
2719 }
2720
2721 /*
2722 * if don't find in ibss node table
2723 */
2724 if (index1 >= IWK_BROADCAST_ID) {
2725 err = iwk_clean_add_node_ibss(ic,
2726 wh->i_addr1, &index2);
2727 if (err != IWK_SUCCESS) {
2728 cmn_err(CE_WARN, "iwk_send(): "
2729 "failed to clean all nodes "
2730 "and add one node\n");
2731 mutex_exit(&sc->sc_ibss.node_tb_lock);
2732 mutex_exit(&sc->sc_glock);
2733 freemsg(m);
2734 sc->sc_tx_err++;
2735 err = IWK_SUCCESS;
2736 goto exit;
2737 }
2738 index = index2;
2739 } else {
2740 index = index1;
2741 }
2742 mutex_exit(&sc->sc_ibss.node_tb_lock);
2743 mutex_exit(&sc->sc_glock);
2744 }
2745
2746 in = ieee80211_find_txnode(ic, wh->i_addr1);
2747 if (in == NULL) {
2748 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2749 freemsg(m);
2750 sc->sc_tx_err++;
2751 err = IWK_SUCCESS;
2752 goto exit;
2753 }
2754 (void) ieee80211_encap(ic, m, in);
2755
2756 cmd->hdr.type = REPLY_TX;
2757 cmd->hdr.flags = 0;
2758 cmd->hdr.qid = ring->qid;
2759 cmd->hdr.idx = ring->cur;
2760
2761 tx = (iwk_tx_cmd_t *)cmd->data;
2762 tx->tx_flags = 0;
2763
2764 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2765 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2766 } else {
2767 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2768 }
2769
2770 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2771 k = ieee80211_crypto_encap(ic, m);
2772 if (k == NULL) {
2773 freemsg(m);
2774 sc->sc_tx_err++;
2775 err = IWK_SUCCESS;
2776 goto exit;
2777 }
2778
2779 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2780 tx->sec_ctl = 2; /* for CCMP */
2781 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2782 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2783 }
2784
2785 /* packet header may have moved, reset our local pointer */
2786 wh = (struct ieee80211_frame *)m->b_rptr;
2787 }
2788
2789 len = msgdsize(m);
2790
2791 #ifdef DEBUG
2792 if (iwk_dbg_flags & IWK_DEBUG_TX)
2793 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2794 #endif
2795
2796 /* pickup a rate */
2797 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2798 IEEE80211_FC0_TYPE_MGT) {
2799 /* mgmt frames are sent at 1M */
2800 rate = in->in_rates.ir_rates[0];
2801 } else {
2802 /*
2803 * do it here for the software way rate control.
2804 * later for rate scaling in hardware.
2805 * maybe like the following, for management frame:
2806 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2807 * for data frame:
2808 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2809 * rate = in->in_rates.ir_rates[in->in_txrate];
2810 * tx->initial_rate_index = 1;
2811 *
2812 * now the txrate is determined in tx cmd flags, set to the
2813 * max value 54M for 11g and 11M for 11b.
2814 */
2815
2816 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2817 rate = ic->ic_fixed_rate;
2818 } else {
2819 rate = in->in_rates.ir_rates[in->in_txrate];
2820 }
2821 }
2822 rate &= IEEE80211_RATE_VAL;
2823 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2824 in->in_txrate, in->in_rates.ir_nrates, rate));
2825
2826 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2827
2828 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2829 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2830 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
2831
2832 /* retrieve destination node's id */
2833 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2834 tx->sta_id = IWK_BROADCAST_ID;
2835 } else {
2836 if (ic->ic_opmode == IEEE80211_M_IBSS)
2837 tx->sta_id = index;
2838 else
2839 tx->sta_id = IWK_AP_ID;
2840 }
2841
2842 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2843 IEEE80211_FC0_TYPE_MGT) {
2844 /* tell h/w to set timestamp in probe responses */
2845 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2846 IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2847 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2848
2849 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2850 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2851 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2852 IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2853 tx->timeout.pm_frame_timeout = LE_16(3);
2854 else
2855 tx->timeout.pm_frame_timeout = LE_16(2);
2856 } else
2857 tx->timeout.pm_frame_timeout = 0;
2858 if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2859 masks |= RATE_MCS_CCK_MSK;
2860
2861 masks |= RATE_MCS_ANT_B_MSK;
2862 tx->rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(rate) | masks);
2863
2864 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2865 LE_32(tx->tx_flags)));
2866
2867 tx->rts_retry_limit = 60;
2868 tx->data_retry_limit = 15;
2869
2870 tx->stop_time.life_time = LE_32(0xffffffff);
2871
2872 tx->len = LE_16(len);
2873
2874 tx->dram_lsb_ptr =
2875 LE_32(data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch));
2876 tx->dram_msb_ptr = 0;
2877 tx->driver_txop = 0;
2878 tx->next_frame_len = 0;
2879
2880 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
2881 m->b_rptr += hdrlen;
2882 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2883
2884 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2885 ring->qid, ring->cur, len));
2886
2887 /*
2888 * first segment includes the tx cmd plus the 802.11 header,
2889 * the second includes the remaining of the 802.11 frame.
2890 */
2891 desc->val0 = 2 << 24;
2892 desc->pa[0].tb1_addr = data->paddr_cmd;
2893 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2894 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2895 desc->pa[0].val2 =
2896 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2897 ((len - hdrlen) << 20);
2898 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2899 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2900 data->paddr_cmd, data->dma_data.cookie.dmac_address,
2901 len0, len - hdrlen, LE_32(desc->pa[0].val1),
2902 LE_32(desc->pa[0].val2)));
2903
2904 mutex_enter(&sc->sc_tx_lock);
2905 ring->queued++;
2906 mutex_exit(&sc->sc_tx_lock);
2907
2908 /* kick ring */
2909 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2910 tfd_offset[ring->cur].val = 8 + len;
2911 if (ring->cur < IWK_MAX_WIN_SIZE) {
2912 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2913 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2914 }
2915
2916 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2917 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2918
2919 ring->cur = (ring->cur + 1) % ring->count;
2920 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2921 freemsg(m);
2922 /* release node reference */
2923 ieee80211_free_node(in);
2924
2925 ic->ic_stats.is_tx_bytes += len;
2926 ic->ic_stats.is_tx_frags++;
2927
2928 if (sc->sc_tx_timer == 0)
2929 sc->sc_tx_timer = 4;
2930
2931 exit:
2932 return (err);
2933 }
2934
2935 static void
iwk_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2936 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2937 {
2938 iwk_sc_t *sc = (iwk_sc_t *)arg;
2939 ieee80211com_t *ic = &sc->sc_ic;
2940
2941 enum ieee80211_opmode oldmod;
2942 iwk_tx_power_table_cmd_t txpower;
2943 iwk_add_sta_t node;
2944 iwk_link_quality_cmd_t link_quality;
2945 uint16_t masks = 0;
2946 int i, err, err1;
2947
2948 oldmod = ic->ic_opmode;
2949
2950 err = ieee80211_ioctl(ic, wq, mp);
2951
2952 /*
2953 * return to STA mode
2954 */
2955 if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2956 (ic->ic_opmode == IEEE80211_M_STA)) {
2957 /* configure rxon */
2958 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2959 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2960 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2961 sc->sc_config.chan =
2962 LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
2963 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK |
2964 RXON_FLG_AUTO_DETECT_MSK |
2965 RXON_FLG_BAND_24G_MSK);
2966 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK);
2967 switch (ic->ic_opmode) {
2968 case IEEE80211_M_STA:
2969 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2970 sc->sc_config.filter_flags |=
2971 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2972 RXON_FILTER_DIS_DECRYPT_MSK |
2973 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2974 break;
2975 case IEEE80211_M_IBSS:
2976 case IEEE80211_M_AHDEMO:
2977 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2978 sc->sc_config.flags |=
2979 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
2980 sc->sc_config.filter_flags =
2981 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2982 RXON_FILTER_DIS_DECRYPT_MSK |
2983 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2984 break;
2985 case IEEE80211_M_HOSTAP:
2986 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2987 break;
2988 case IEEE80211_M_MONITOR:
2989 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2990 sc->sc_config.filter_flags |=
2991 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2992 RXON_FILTER_CTL2HOST_MSK |
2993 RXON_FILTER_PROMISC_MSK);
2994 break;
2995 }
2996 sc->sc_config.cck_basic_rates = 0x0f;
2997 sc->sc_config.ofdm_basic_rates = 0xff;
2998 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2999 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3000 /* set antenna */
3001 mutex_enter(&sc->sc_glock);
3002 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3003 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3004 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3005 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3006 err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3007 sizeof (iwk_rxon_cmd_t), 1);
3008 if (err1 != IWK_SUCCESS) {
3009 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3010 "failed to set configure command"
3011 " please run (ifconfig unplumb and"
3012 " ifconfig plumb)\n");
3013 }
3014 /*
3015 * set Tx power for 2.4GHz channels
3016 * (need further investigation. fix tx power at present)
3017 */
3018 (void) memset(&txpower, 0, sizeof (txpower));
3019 txpower.band = 1; /* for 2.4G */
3020 txpower.channel = sc->sc_config.chan;
3021 txpower.channel_normal_width = 0;
3022 for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
3023 txpower.tx_power.ht_ofdm_power[i].
3024 s.ramon_tx_gain = LE_16(0x3f3f);
3025 txpower.tx_power.ht_ofdm_power[i].
3026 s.dsp_predis_atten = LE_16(110 | (110 << 8));
3027 }
3028 txpower.tx_power.legacy_cck_power.s.
3029 ramon_tx_gain = LE_16(0x3f3f);
3030 txpower.tx_power.legacy_cck_power.s.
3031 dsp_predis_atten = LE_16(110 | (110 << 8));
3032 err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3033 sizeof (txpower), 1);
3034 if (err1 != IWK_SUCCESS) {
3035 cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
3036 " please run (ifconfig unplumb "
3037 "and ifconfig plumb)\n");
3038 }
3039 /* add broadcast node so that we can send broadcast frame */
3040 (void) memset(&node, 0, sizeof (node));
3041 (void) memset(node.bssid, 0xff, 6);
3042 node.id = IWK_BROADCAST_ID;
3043 err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3044 if (err1 != IWK_SUCCESS) {
3045 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3046 "failed to add broadcast node\n");
3047 }
3048
3049 /* TX_LINK_QUALITY cmd */
3050 (void) memset(&link_quality, 0, sizeof (link_quality));
3051 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3052 masks |= RATE_MCS_CCK_MSK;
3053 masks |= RATE_MCS_ANT_B_MSK;
3054 masks &= ~RATE_MCS_ANT_A_MSK;
3055 link_quality.rate_n_flags[i] =
3056 LE_32(iwk_rate_to_plcp(2) | masks);
3057 }
3058 link_quality.general_params.single_stream_ant_msk = 2;
3059 link_quality.general_params.dual_stream_ant_msk = 3;
3060 link_quality.agg_params.agg_dis_start_th = 3;
3061 link_quality.agg_params.agg_time_limit = LE_16(4000);
3062 link_quality.sta_id = IWK_BROADCAST_ID;
3063 err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3064 sizeof (link_quality), 1);
3065 if (err1 != IWK_SUCCESS) {
3066 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3067 "failed to config link quality table\n");
3068 }
3069 mutex_exit(&sc->sc_glock);
3070 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3071 }
3072
3073 if (err == ENETRESET) {
3074 /*
3075 * This is special for the hidden AP connection.
3076 * In any case, we should make sure only one 'scan'
3077 * in the driver for a 'connect' CLI command. So
3078 * when connecting to a hidden AP, the scan is just
3079 * sent out to the air when we know the desired
3080 * essid of the AP we want to connect.
3081 */
3082 if (ic->ic_des_esslen) {
3083 if (sc->sc_flags & IWK_F_RUNNING) {
3084 iwk_m_stop(sc);
3085 (void) iwk_m_start(sc);
3086 (void) ieee80211_new_state(ic,
3087 IEEE80211_S_SCAN, -1);
3088 }
3089 }
3090 }
3091 }
3092
3093 /*
3094 * callback functions for set/get properties
3095 */
3096
3097 static int
iwk_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3098 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3099 uint_t wldp_length, void *wldp_buf)
3100 {
3101 int err = 0;
3102 iwk_sc_t *sc = (iwk_sc_t *)arg;
3103
3104 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3105 wldp_length, wldp_buf);
3106
3107 return (err);
3108 }
3109
3110 static int
iwk_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3111 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3112 uint_t wldp_length, const void *wldp_buf)
3113 {
3114 int err;
3115 iwk_sc_t *sc = (iwk_sc_t *)arg;
3116 ieee80211com_t *ic = &sc->sc_ic;
3117
3118 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3119 wldp_buf);
3120
3121 if (err == ENETRESET) {
3122 if (ic->ic_des_esslen) {
3123 if (sc->sc_flags & IWK_F_RUNNING) {
3124 iwk_m_stop(sc);
3125 (void) iwk_m_start(sc);
3126 (void) ieee80211_new_state(ic,
3127 IEEE80211_S_SCAN, -1);
3128 }
3129 }
3130 err = 0;
3131 }
3132
3133 return (err);
3134 }
3135
3136 static void
iwk_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t mph)3137 iwk_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3138 mac_prop_info_handle_t mph)
3139 {
3140 iwk_sc_t *sc = (iwk_sc_t *)arg;
3141 ieee80211com_t *ic = &sc->sc_ic;
3142
3143 ieee80211_propinfo(ic, pr_name, wldp_pr_num, mph);
3144 }
3145
3146 /*ARGSUSED*/
3147 static int
iwk_m_stat(void * arg,uint_t stat,uint64_t * val)3148 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3149 {
3150 iwk_sc_t *sc = (iwk_sc_t *)arg;
3151 ieee80211com_t *ic = &sc->sc_ic;
3152 ieee80211_node_t *in;
3153
3154 mutex_enter(&sc->sc_glock);
3155 switch (stat) {
3156 case MAC_STAT_IFSPEED:
3157 in = ic->ic_bss;
3158 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3159 IEEE80211_RATE(in->in_txrate) :
3160 ic->ic_fixed_rate) / 2 * 1000000;
3161 break;
3162 case MAC_STAT_NOXMTBUF:
3163 *val = sc->sc_tx_nobuf;
3164 break;
3165 case MAC_STAT_NORCVBUF:
3166 *val = sc->sc_rx_nobuf;
3167 break;
3168 case MAC_STAT_IERRORS:
3169 *val = sc->sc_rx_err;
3170 break;
3171 case MAC_STAT_RBYTES:
3172 *val = ic->ic_stats.is_rx_bytes;
3173 break;
3174 case MAC_STAT_IPACKETS:
3175 *val = ic->ic_stats.is_rx_frags;
3176 break;
3177 case MAC_STAT_OBYTES:
3178 *val = ic->ic_stats.is_tx_bytes;
3179 break;
3180 case MAC_STAT_OPACKETS:
3181 *val = ic->ic_stats.is_tx_frags;
3182 break;
3183 case MAC_STAT_OERRORS:
3184 case WIFI_STAT_TX_FAILED:
3185 *val = sc->sc_tx_err;
3186 break;
3187 case WIFI_STAT_TX_RETRANS:
3188 *val = sc->sc_tx_retries;
3189 break;
3190 case WIFI_STAT_FCS_ERRORS:
3191 case WIFI_STAT_WEP_ERRORS:
3192 case WIFI_STAT_TX_FRAGS:
3193 case WIFI_STAT_MCAST_TX:
3194 case WIFI_STAT_RTS_SUCCESS:
3195 case WIFI_STAT_RTS_FAILURE:
3196 case WIFI_STAT_ACK_FAILURE:
3197 case WIFI_STAT_RX_FRAGS:
3198 case WIFI_STAT_MCAST_RX:
3199 case WIFI_STAT_RX_DUPS:
3200 mutex_exit(&sc->sc_glock);
3201 return (ieee80211_stat(ic, stat, val));
3202 default:
3203 mutex_exit(&sc->sc_glock);
3204 return (ENOTSUP);
3205 }
3206 mutex_exit(&sc->sc_glock);
3207
3208 return (IWK_SUCCESS);
3209
3210 }
3211
3212 static int
iwk_m_start(void * arg)3213 iwk_m_start(void *arg)
3214 {
3215 iwk_sc_t *sc = (iwk_sc_t *)arg;
3216 ieee80211com_t *ic = &sc->sc_ic;
3217 int err;
3218
3219 err = iwk_init(sc);
3220
3221 if (err != IWK_SUCCESS) {
3222 /*
3223 * The hw init err(eg. RF is OFF). Return Success to make
3224 * the 'plumb' succeed. The iwk_thread() tries to re-init
3225 * background.
3226 */
3227 mutex_enter(&sc->sc_glock);
3228 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3229 mutex_exit(&sc->sc_glock);
3230 return (IWK_SUCCESS);
3231 }
3232
3233 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3234
3235 mutex_enter(&sc->sc_glock);
3236 sc->sc_flags |= IWK_F_RUNNING;
3237 mutex_exit(&sc->sc_glock);
3238
3239 return (IWK_SUCCESS);
3240 }
3241
3242 static void
iwk_m_stop(void * arg)3243 iwk_m_stop(void *arg)
3244 {
3245 iwk_sc_t *sc = (iwk_sc_t *)arg;
3246 ieee80211com_t *ic = &sc->sc_ic;
3247
3248 iwk_stop(sc);
3249 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3250 ieee80211_stop_watchdog(ic);
3251 mutex_enter(&sc->sc_mt_lock);
3252 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3253 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3254 mutex_exit(&sc->sc_mt_lock);
3255 mutex_enter(&sc->sc_glock);
3256 sc->sc_flags &= ~IWK_F_RUNNING;
3257 mutex_exit(&sc->sc_glock);
3258 }
3259
3260 /*ARGSUSED*/
3261 static int
iwk_m_unicst(void * arg,const uint8_t * macaddr)3262 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3263 {
3264 iwk_sc_t *sc = (iwk_sc_t *)arg;
3265 ieee80211com_t *ic = &sc->sc_ic;
3266 int err;
3267
3268 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3269 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3270 mutex_enter(&sc->sc_glock);
3271 err = iwk_config(sc);
3272 mutex_exit(&sc->sc_glock);
3273 if (err != IWK_SUCCESS) {
3274 cmn_err(CE_WARN,
3275 "iwk_m_unicst(): "
3276 "failed to configure device\n");
3277 goto fail;
3278 }
3279 }
3280 return (IWK_SUCCESS);
3281 fail:
3282 return (err);
3283 }
3284
3285 /*ARGSUSED*/
3286 static int
iwk_m_multicst(void * arg,boolean_t add,const uint8_t * m)3287 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3288 {
3289 return (IWK_SUCCESS);
3290 }
3291
3292 /*ARGSUSED*/
3293 static int
iwk_m_promisc(void * arg,boolean_t on)3294 iwk_m_promisc(void *arg, boolean_t on)
3295 {
3296 return (IWK_SUCCESS);
3297 }
3298
3299 static void
iwk_thread(iwk_sc_t * sc)3300 iwk_thread(iwk_sc_t *sc)
3301 {
3302 ieee80211com_t *ic = &sc->sc_ic;
3303 clock_t clk;
3304 int times = 0, err, n = 0, timeout = 0;
3305 uint32_t tmp;
3306
3307 mutex_enter(&sc->sc_mt_lock);
3308 while (sc->sc_mf_thread_switch) {
3309 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3310 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3311 sc->sc_flags &= ~IWK_F_RADIO_OFF;
3312 } else {
3313 sc->sc_flags |= IWK_F_RADIO_OFF;
3314 }
3315 /*
3316 * If in SUSPEND or the RF is OFF, do nothing
3317 */
3318 if ((sc->sc_flags & IWK_F_SUSPEND) ||
3319 (sc->sc_flags & IWK_F_RADIO_OFF)) {
3320 mutex_exit(&sc->sc_mt_lock);
3321 delay(drv_usectohz(100000));
3322 mutex_enter(&sc->sc_mt_lock);
3323 continue;
3324 }
3325
3326 /*
3327 * recovery fatal error
3328 */
3329 if (ic->ic_mach &&
3330 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3331
3332 IWK_DBG((IWK_DEBUG_FW,
3333 "iwk_thread(): "
3334 "try to recover fatal hw error: %d\n", times++));
3335
3336 iwk_stop(sc);
3337
3338 if (IWK_CHK_FAST_RECOVER(sc)) {
3339 /* save runtime configuration */
3340 bcopy(&sc->sc_config, &sc->sc_config_save,
3341 sizeof (sc->sc_config));
3342 } else {
3343 mutex_exit(&sc->sc_mt_lock);
3344 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3345 delay(drv_usectohz(2000000 + n*500000));
3346 mutex_enter(&sc->sc_mt_lock);
3347 }
3348
3349 err = iwk_init(sc);
3350 if (err != IWK_SUCCESS) {
3351 n++;
3352 if (n < 20)
3353 continue;
3354 }
3355 n = 0;
3356 if (!err)
3357 sc->sc_flags |= IWK_F_RUNNING;
3358
3359 if (!IWK_CHK_FAST_RECOVER(sc) ||
3360 iwk_fast_recover(sc) != IWK_SUCCESS) {
3361 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3362
3363 mutex_exit(&sc->sc_mt_lock);
3364 delay(drv_usectohz(2000000));
3365 if (sc->sc_ostate != IEEE80211_S_INIT)
3366 ieee80211_new_state(ic,
3367 IEEE80211_S_SCAN, 0);
3368 mutex_enter(&sc->sc_mt_lock);
3369 }
3370 }
3371
3372 if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3373 IWK_DBG((IWK_DEBUG_RESUME,
3374 "iwk_thread(): lazy resume\n"));
3375
3376 sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3377 mutex_exit(&sc->sc_mt_lock);
3378 /*
3379 * NB: under WPA mode, this call hangs (door problem?)
3380 * when called in iwk_attach() and iwk_detach() while
3381 * system is in the procedure of CPR. To be safe, let
3382 * the thread do this.
3383 */
3384 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3385 mutex_enter(&sc->sc_mt_lock);
3386 }
3387
3388 if (ic->ic_mach &&
3389 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3390 IWK_DBG((IWK_DEBUG_SCAN,
3391 "iwk_thread(): "
3392 "wait for probe response\n"));
3393 sc->sc_scan_pending--;
3394 mutex_exit(&sc->sc_mt_lock);
3395 delay(drv_usectohz(200000));
3396 if (sc->sc_flags & IWK_F_SCANNING)
3397 ieee80211_next_scan(ic);
3398 mutex_enter(&sc->sc_mt_lock);
3399 }
3400
3401 /*
3402 * rate ctl
3403 */
3404 if (ic->ic_mach &&
3405 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3406 clk = ddi_get_lbolt();
3407 if (clk > sc->sc_clk + drv_usectohz(500000)) {
3408 iwk_amrr_timeout(sc);
3409 }
3410 }
3411
3412 if ((ic->ic_state == IEEE80211_S_RUN) &&
3413 (ic->ic_beaconmiss++ > 50)) { /* 5 seconds */
3414 cmn_err(CE_WARN, "iwk: beacon missed for 5 seconds\n");
3415 (void) ieee80211_new_state(ic,
3416 IEEE80211_S_INIT, -1);
3417 }
3418
3419 mutex_exit(&sc->sc_mt_lock);
3420 delay(drv_usectohz(100000));
3421 mutex_enter(&sc->sc_mt_lock);
3422
3423 if (sc->sc_tx_timer) {
3424 timeout++;
3425 if (timeout == 10) {
3426 sc->sc_tx_timer--;
3427 if (sc->sc_tx_timer == 0) {
3428 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3429 sc->sc_ostate = IEEE80211_S_RUN;
3430 IWK_DBG((IWK_DEBUG_FW,
3431 "iwk_thread(): try to recover from"
3432 " 'send fail\n"));
3433 }
3434 timeout = 0;
3435 }
3436 }
3437
3438 }
3439 sc->sc_mf_thread = NULL;
3440 cv_signal(&sc->sc_mt_cv);
3441 mutex_exit(&sc->sc_mt_lock);
3442 }
3443
3444
3445 /*
3446 * Send a command to the firmware.
3447 */
3448 static int
iwk_cmd(iwk_sc_t * sc,int code,const void * buf,int size,int async)3449 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3450 {
3451 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3452 iwk_tx_desc_t *desc;
3453 iwk_cmd_t *cmd;
3454 clock_t clk;
3455
3456 ASSERT(size <= sizeof (cmd->data));
3457 ASSERT(mutex_owned(&sc->sc_glock));
3458
3459 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3460 desc = ring->data[ring->cur].desc;
3461 cmd = ring->data[ring->cur].cmd;
3462
3463 cmd->hdr.type = (uint8_t)code;
3464 cmd->hdr.flags = 0;
3465 cmd->hdr.qid = ring->qid;
3466 cmd->hdr.idx = ring->cur;
3467 (void) memcpy(cmd->data, buf, size);
3468 (void) memset(desc, 0, sizeof (*desc));
3469
3470 desc->val0 = 1 << 24;
3471 desc->pa[0].tb1_addr =
3472 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3473 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3474
3475 /* kick cmd ring XXX */
3476 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3477 tfd_offset[ring->cur].val = 8;
3478 if (ring->cur < IWK_MAX_WIN_SIZE) {
3479 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3480 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3481 }
3482 ring->cur = (ring->cur + 1) % ring->count;
3483 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3484
3485 if (async)
3486 return (IWK_SUCCESS);
3487 else {
3488 sc->sc_flags &= ~IWK_F_CMD_DONE;
3489 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3490 while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3491 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3492 0)
3493 break;
3494 }
3495 if (sc->sc_flags & IWK_F_CMD_DONE)
3496 return (IWK_SUCCESS);
3497 else
3498 return (IWK_FAIL);
3499 }
3500 }
3501
3502 static void
iwk_set_led(iwk_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3503 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3504 {
3505 iwk_led_cmd_t led;
3506
3507 led.interval = LE_32(100000); /* unit: 100ms */
3508 led.id = id;
3509 led.off = off;
3510 led.on = on;
3511
3512 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3513 }
3514
3515 static int
iwk_hw_set_before_auth(iwk_sc_t * sc)3516 iwk_hw_set_before_auth(iwk_sc_t *sc)
3517 {
3518 ieee80211com_t *ic = &sc->sc_ic;
3519 ieee80211_node_t *in = ic->ic_bss;
3520 iwk_add_sta_t node;
3521 iwk_link_quality_cmd_t link_quality;
3522 struct ieee80211_rateset rs;
3523 uint16_t masks = 0, rate;
3524 int i, err;
3525
3526 if (in->in_chan == IEEE80211_CHAN_ANYC) {
3527 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3528 "channel (%d) isn't in proper range\n",
3529 LE_16(ieee80211_chan2ieee(ic, in->in_chan)));
3530 return (IWK_FAIL);
3531 }
3532
3533 /* update adapter's configuration according the info of target AP */
3534 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3535 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3536 if (ic->ic_curmode == IEEE80211_MODE_11B) {
3537 sc->sc_config.cck_basic_rates = 0x03;
3538 sc->sc_config.ofdm_basic_rates = 0;
3539 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3540 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3541 sc->sc_config.cck_basic_rates = 0;
3542 sc->sc_config.ofdm_basic_rates = 0x15;
3543 } else { /* assume 802.11b/g */
3544 sc->sc_config.cck_basic_rates = 0x0f;
3545 sc->sc_config.ofdm_basic_rates = 0xff;
3546 }
3547
3548 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3549 RXON_FLG_SHORT_SLOT_MSK);
3550
3551 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3552 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3553 else
3554 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3555
3556 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3557 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3558 else
3559 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3560
3561 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3562 "filter_flags %x cck %x ofdm %x"
3563 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3564 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3565 LE_32(sc->sc_config.filter_flags),
3566 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3567 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3568 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3569 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3570 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3571 sizeof (iwk_rxon_cmd_t), 1);
3572 if (err != IWK_SUCCESS) {
3573 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3574 " failed to config chan%d\n",
3575 sc->sc_config.chan);
3576 return (err);
3577 }
3578
3579 /* obtain current temperature of chipset */
3580 sc->sc_tempera = iwk_curr_tempera(sc);
3581
3582 /* make Tx power calibration to determine the gains of DSP and radio */
3583 err = iwk_tx_power_calibration(sc);
3584 if (err) {
3585 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3586 "failed to set tx power table\n");
3587 return (err);
3588 }
3589
3590 /* add default AP node */
3591 (void) memset(&node, 0, sizeof (node));
3592 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3593 node.id = IWK_AP_ID;
3594 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3595 if (err != IWK_SUCCESS) {
3596 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3597 "failed to add BSS node\n");
3598 return (err);
3599 }
3600
3601 /* TX_LINK_QUALITY cmd */
3602 (void) memset(&link_quality, 0, sizeof (link_quality));
3603 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3604 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3605 if (i < rs.ir_nrates)
3606 rate = rs.ir_rates[rs.ir_nrates - i];
3607 else
3608 rate = 2;
3609 if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3610 masks |= RATE_MCS_CCK_MSK;
3611 masks |= RATE_MCS_ANT_B_MSK;
3612 masks &= ~RATE_MCS_ANT_A_MSK;
3613 link_quality.rate_n_flags[i] =
3614 LE_32(iwk_rate_to_plcp(rate) | masks);
3615 }
3616
3617 link_quality.general_params.single_stream_ant_msk = 2;
3618 link_quality.general_params.dual_stream_ant_msk = 3;
3619 link_quality.agg_params.agg_dis_start_th = 3;
3620 link_quality.agg_params.agg_time_limit = LE_16(4000);
3621 link_quality.sta_id = IWK_AP_ID;
3622 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3623 sizeof (link_quality), 1);
3624 if (err != IWK_SUCCESS) {
3625 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3626 "failed to config link quality table\n");
3627 return (err);
3628 }
3629
3630 return (IWK_SUCCESS);
3631 }
3632
3633 /*
3634 * Send a scan request(assembly scan cmd) to the firmware.
3635 */
3636 static int
iwk_scan(iwk_sc_t * sc)3637 iwk_scan(iwk_sc_t *sc)
3638 {
3639 ieee80211com_t *ic = &sc->sc_ic;
3640 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3641 iwk_tx_desc_t *desc;
3642 iwk_tx_data_t *data;
3643 iwk_cmd_t *cmd;
3644 iwk_scan_hdr_t *hdr;
3645 iwk_scan_chan_t *chan;
3646 struct ieee80211_frame *wh;
3647 ieee80211_node_t *in = ic->ic_bss;
3648 uint8_t essid[IEEE80211_NWID_LEN+1];
3649 struct ieee80211_rateset *rs;
3650 enum ieee80211_phymode mode;
3651 uint8_t *frm;
3652 int i, pktlen, nrates;
3653
3654 data = &ring->data[ring->cur];
3655 desc = data->desc;
3656 cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3657
3658 cmd->hdr.type = REPLY_SCAN_CMD;
3659 cmd->hdr.flags = 0;
3660 cmd->hdr.qid = ring->qid;
3661 cmd->hdr.idx = ring->cur | 0x40;
3662
3663 hdr = (iwk_scan_hdr_t *)cmd->data;
3664 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3665 hdr->nchan = 1;
3666 hdr->quiet_time = LE_16(50);
3667 hdr->quiet_plcp_th = LE_16(1);
3668
3669 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK);
3670 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3671 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3672 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3673 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3674
3675 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3676 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3677 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3678 hdr->tx_cmd.tx_flags |= LE_32(0x200);
3679 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(2));
3680 hdr->tx_cmd.rate.r.rate_n_flags |=
3681 LE_32(RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3682 hdr->direct_scan[0].len = ic->ic_des_esslen;
3683 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3684
3685 if (ic->ic_des_esslen) {
3686 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3687 essid[ic->ic_des_esslen] = '\0';
3688 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3689
3690 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3691 ic->ic_des_esslen);
3692 } else {
3693 bzero(hdr->direct_scan[0].ssid,
3694 sizeof (hdr->direct_scan[0].ssid));
3695 }
3696 /*
3697 * a probe request frame is required after the REPLY_SCAN_CMD
3698 */
3699 wh = (struct ieee80211_frame *)(hdr + 1);
3700 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3701 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3702 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3703 (void) memset(wh->i_addr1, 0xff, 6);
3704 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3705 (void) memset(wh->i_addr3, 0xff, 6);
3706 *(uint16_t *)&wh->i_dur[0] = 0;
3707 *(uint16_t *)&wh->i_seq[0] = 0;
3708
3709 frm = (uint8_t *)(wh + 1);
3710
3711 /* essid IE */
3712 if (in->in_esslen) {
3713 bcopy(in->in_essid, essid, in->in_esslen);
3714 essid[in->in_esslen] = '\0';
3715 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3716 essid));
3717 }
3718 *frm++ = IEEE80211_ELEMID_SSID;
3719 *frm++ = in->in_esslen;
3720 (void) memcpy(frm, in->in_essid, in->in_esslen);
3721 frm += in->in_esslen;
3722
3723 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3724 rs = &ic->ic_sup_rates[mode];
3725
3726 /* supported rates IE */
3727 *frm++ = IEEE80211_ELEMID_RATES;
3728 nrates = rs->ir_nrates;
3729 if (nrates > IEEE80211_RATE_SIZE)
3730 nrates = IEEE80211_RATE_SIZE;
3731 *frm++ = (uint8_t)nrates;
3732 (void) memcpy(frm, rs->ir_rates, nrates);
3733 frm += nrates;
3734
3735 /* supported xrates IE */
3736 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3737 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3738 *frm++ = IEEE80211_ELEMID_XRATES;
3739 *frm++ = (uint8_t)nrates;
3740 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3741 frm += nrates;
3742 }
3743
3744 /* optionnal IE (usually for wpa) */
3745 if (ic->ic_opt_ie != NULL) {
3746 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3747 frm += ic->ic_opt_ie_len;
3748 }
3749
3750 /* setup length of probe request */
3751 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3752 hdr->len = LE_16(hdr->nchan * sizeof (iwk_scan_chan_t) +
3753 LE_16(hdr->tx_cmd.len) + sizeof (iwk_scan_hdr_t));
3754
3755 /*
3756 * the attribute of the scan channels are required after the probe
3757 * request frame.
3758 */
3759 chan = (iwk_scan_chan_t *)frm;
3760 for (i = 1; i <= hdr->nchan; i++, chan++) {
3761 if (ic->ic_des_esslen) {
3762 chan->type = 3;
3763 } else {
3764 chan->type = 1;
3765 }
3766
3767 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3768 chan->tpc.tx_gain = 0x3f;
3769 chan->tpc.dsp_atten = 110;
3770 chan->active_dwell = LE_16(50);
3771 chan->passive_dwell = LE_16(120);
3772
3773 frm += sizeof (iwk_scan_chan_t);
3774 }
3775
3776 pktlen = _PTRDIFF(frm, cmd);
3777
3778 (void) memset(desc, 0, sizeof (*desc));
3779 desc->val0 = 1 << 24;
3780 desc->pa[0].tb1_addr =
3781 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3782 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3783
3784 /*
3785 * maybe for cmd, filling the byte cnt table is not necessary.
3786 * anyway, we fill it here.
3787 */
3788 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3789 tfd_offset[ring->cur].val = 8;
3790 if (ring->cur < IWK_MAX_WIN_SIZE) {
3791 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3792 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3793 }
3794
3795 /* kick cmd ring */
3796 ring->cur = (ring->cur + 1) % ring->count;
3797 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3798
3799 return (IWK_SUCCESS);
3800 }
3801
3802 static int
iwk_config(iwk_sc_t * sc)3803 iwk_config(iwk_sc_t *sc)
3804 {
3805 ieee80211com_t *ic = &sc->sc_ic;
3806 iwk_powertable_cmd_t powertable;
3807 iwk_bt_cmd_t bt;
3808 iwk_add_sta_t node;
3809 iwk_link_quality_cmd_t link_quality;
3810 int i, err;
3811 uint16_t masks = 0;
3812
3813 /*
3814 * set power mode. Disable power management at present, do it later
3815 */
3816 (void) memset(&powertable, 0, sizeof (powertable));
3817 powertable.flags = LE_16(0x8);
3818 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3819 sizeof (powertable), 0);
3820 if (err != IWK_SUCCESS) {
3821 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3822 return (err);
3823 }
3824
3825 /* configure bt coexistence */
3826 (void) memset(&bt, 0, sizeof (bt));
3827 bt.flags = 3;
3828 bt.lead_time = 0xaa;
3829 bt.max_kill = 1;
3830 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3831 sizeof (bt), 0);
3832 if (err != IWK_SUCCESS) {
3833 cmn_err(CE_WARN,
3834 "iwk_config(): "
3835 "failed to configurate bt coexistence\n");
3836 return (err);
3837 }
3838
3839 /* configure rxon */
3840 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3841 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3842 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3843 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
3844 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK |
3845 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3846 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK);
3847 switch (ic->ic_opmode) {
3848 case IEEE80211_M_STA:
3849 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3850 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3851 RXON_FILTER_DIS_DECRYPT_MSK |
3852 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3853 break;
3854 case IEEE80211_M_IBSS:
3855 case IEEE80211_M_AHDEMO:
3856 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3857 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3858 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3859 RXON_FILTER_DIS_DECRYPT_MSK |
3860 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3861 break;
3862 case IEEE80211_M_HOSTAP:
3863 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3864 break;
3865 case IEEE80211_M_MONITOR:
3866 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3867 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3868 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3869 break;
3870 }
3871 sc->sc_config.cck_basic_rates = 0x0f;
3872 sc->sc_config.ofdm_basic_rates = 0xff;
3873
3874 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3875 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3876
3877 /* set antenna */
3878
3879 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3880 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3881 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3882 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3883
3884 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3885 sizeof (iwk_rxon_cmd_t), 0);
3886 if (err != IWK_SUCCESS) {
3887 cmn_err(CE_WARN, "iwk_config(): "
3888 "failed to set configure command\n");
3889 return (err);
3890 }
3891 /* obtain current temperature of chipset */
3892 sc->sc_tempera = iwk_curr_tempera(sc);
3893
3894 /* make Tx power calibration to determine the gains of DSP and radio */
3895 err = iwk_tx_power_calibration(sc);
3896 if (err) {
3897 cmn_err(CE_WARN, "iwk_config(): "
3898 "failed to set tx power table\n");
3899 return (err);
3900 }
3901
3902 /* add broadcast node so that we can send broadcast frame */
3903 (void) memset(&node, 0, sizeof (node));
3904 (void) memset(node.bssid, 0xff, 6);
3905 node.id = IWK_BROADCAST_ID;
3906 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3907 if (err != IWK_SUCCESS) {
3908 cmn_err(CE_WARN, "iwk_config(): "
3909 "failed to add broadcast node\n");
3910 return (err);
3911 }
3912
3913 /* TX_LINK_QUALITY cmd ? */
3914 (void) memset(&link_quality, 0, sizeof (link_quality));
3915 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3916 masks |= RATE_MCS_CCK_MSK;
3917 masks |= RATE_MCS_ANT_B_MSK;
3918 masks &= ~RATE_MCS_ANT_A_MSK;
3919 link_quality.rate_n_flags[i] =
3920 LE_32(iwk_rate_to_plcp(2) | masks);
3921 }
3922
3923 link_quality.general_params.single_stream_ant_msk = 2;
3924 link_quality.general_params.dual_stream_ant_msk = 3;
3925 link_quality.agg_params.agg_dis_start_th = 3;
3926 link_quality.agg_params.agg_time_limit = LE_16(4000);
3927 link_quality.sta_id = IWK_BROADCAST_ID;
3928 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3929 sizeof (link_quality), 0);
3930 if (err != IWK_SUCCESS) {
3931 cmn_err(CE_WARN, "iwk_config(): "
3932 "failed to config link quality table\n");
3933 return (err);
3934 }
3935
3936 return (IWK_SUCCESS);
3937 }
3938
3939 static void
iwk_stop_master(iwk_sc_t * sc)3940 iwk_stop_master(iwk_sc_t *sc)
3941 {
3942 uint32_t tmp;
3943 int n;
3944
3945 tmp = IWK_READ(sc, CSR_RESET);
3946 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3947
3948 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3949 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3950 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3951 return;
3952
3953 for (n = 0; n < 2000; n++) {
3954 if (IWK_READ(sc, CSR_RESET) &
3955 CSR_RESET_REG_FLAG_MASTER_DISABLED)
3956 break;
3957 DELAY(1000);
3958 }
3959 if (n == 2000)
3960 IWK_DBG((IWK_DEBUG_HW,
3961 "timeout waiting for master stop\n"));
3962 }
3963
3964 static int
iwk_power_up(iwk_sc_t * sc)3965 iwk_power_up(iwk_sc_t *sc)
3966 {
3967 uint32_t tmp;
3968
3969 iwk_mac_access_enter(sc);
3970 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3971 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3972 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3973 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3974 iwk_mac_access_exit(sc);
3975
3976 DELAY(5000);
3977 return (IWK_SUCCESS);
3978 }
3979
3980 static int
iwk_preinit(iwk_sc_t * sc)3981 iwk_preinit(iwk_sc_t *sc)
3982 {
3983 uint32_t tmp;
3984 int n;
3985 uint8_t vlink;
3986
3987 /* clear any pending interrupts */
3988 IWK_WRITE(sc, CSR_INT, 0xffffffff);
3989
3990 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3991 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3992 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3993
3994 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3995 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3996
3997 /* wait for clock ready */
3998 for (n = 0; n < 1000; n++) {
3999 if (IWK_READ(sc, CSR_GP_CNTRL) &
4000 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
4001 break;
4002 DELAY(10);
4003 }
4004 if (n == 1000) {
4005 cmn_err(CE_WARN,
4006 "iwk_preinit(): timeout waiting for clock ready\n");
4007 return (ETIMEDOUT);
4008 }
4009 iwk_mac_access_enter(sc);
4010 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
4011 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
4012 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
4013
4014 DELAY(20);
4015 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
4016 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4017 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4018 iwk_mac_access_exit(sc);
4019
4020 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
4021
4022 (void) iwk_power_up(sc);
4023
4024 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4025 tmp = ddi_get32(sc->sc_cfg_handle,
4026 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4027 ddi_put32(sc->sc_cfg_handle,
4028 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4029 tmp & ~(1 << 11));
4030 }
4031
4032
4033 vlink = ddi_get8(sc->sc_cfg_handle,
4034 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4035 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4036 vlink & ~2);
4037
4038 tmp = IWK_READ(sc, CSR_SW_VER);
4039 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4040 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
4041 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
4042 IWK_WRITE(sc, CSR_SW_VER, tmp);
4043
4044 /* make sure power supply on each part of the hardware */
4045 iwk_mac_access_enter(sc);
4046 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4047 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4048 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4049 DELAY(5);
4050 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4051 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4052 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4053 iwk_mac_access_exit(sc);
4054 return (IWK_SUCCESS);
4055 }
4056
4057 /*
4058 * set up semphore flag to own EEPROM
4059 */
iwk_eep_sem_down(iwk_sc_t * sc)4060 static int iwk_eep_sem_down(iwk_sc_t *sc)
4061 {
4062 int count1, count2;
4063 uint32_t tmp;
4064
4065 for (count1 = 0; count1 < 1000; count1++) {
4066 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4067 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4068 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4069
4070 for (count2 = 0; count2 < 2; count2++) {
4071 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
4072 CSR_HW_IF_CONFIG_REG_EEP_SEM)
4073 return (IWK_SUCCESS);
4074 DELAY(10000);
4075 }
4076 }
4077 return (IWK_FAIL);
4078 }
4079
4080 /*
4081 * reset semphore flag to release EEPROM
4082 */
iwk_eep_sem_up(iwk_sc_t * sc)4083 static void iwk_eep_sem_up(iwk_sc_t *sc)
4084 {
4085 uint32_t tmp;
4086
4087 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4088 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4089 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4090 }
4091
4092 /*
4093 * This function load all infomation in eeprom into iwk_eep
4094 * structure in iwk_sc_t structure
4095 */
iwk_eep_load(iwk_sc_t * sc)4096 static int iwk_eep_load(iwk_sc_t *sc)
4097 {
4098 int i, rr;
4099 uint32_t rv, tmp, eep_gp;
4100 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4101 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4102
4103 /* read eeprom gp register in CSR */
4104 eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4105 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4106 CSR_EEPROM_GP_BAD_SIGNATURE) {
4107 cmn_err(CE_WARN, "EEPROM not found\n");
4108 return (IWK_FAIL);
4109 }
4110
4111 rr = iwk_eep_sem_down(sc);
4112 if (rr != 0) {
4113 cmn_err(CE_WARN, "failed to own EEPROM\n");
4114 return (IWK_FAIL);
4115 }
4116
4117 for (addr = 0; addr < eep_sz; addr += 2) {
4118 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4119 tmp = IWK_READ(sc, CSR_EEPROM_REG);
4120 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4121
4122 for (i = 0; i < 10; i++) {
4123 rv = IWK_READ(sc, CSR_EEPROM_REG);
4124 if (rv & 1)
4125 break;
4126 DELAY(10);
4127 }
4128
4129 if (!(rv & 1)) {
4130 cmn_err(CE_WARN, "time out when read EEPROM\n");
4131 iwk_eep_sem_up(sc);
4132 return (IWK_FAIL);
4133 }
4134
4135 eep_p[addr/2] = LE_16(rv >> 16);
4136 }
4137
4138 iwk_eep_sem_up(sc);
4139 return (IWK_SUCCESS);
4140 }
4141
4142 /*
4143 * init mac address in ieee80211com_t struct
4144 */
iwk_get_mac_from_eep(iwk_sc_t * sc)4145 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4146 {
4147 ieee80211com_t *ic = &sc->sc_ic;
4148 struct iwk_eep *ep = &sc->sc_eep_map;
4149
4150 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4151
4152 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4153 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4154 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4155 }
4156
4157 static int
iwk_init(iwk_sc_t * sc)4158 iwk_init(iwk_sc_t *sc)
4159 {
4160 int qid, n, err;
4161 clock_t clk;
4162 uint32_t tmp;
4163
4164 mutex_enter(&sc->sc_glock);
4165 sc->sc_flags &= ~IWK_F_FW_INIT;
4166
4167 (void) iwk_preinit(sc);
4168
4169 tmp = IWK_READ(sc, CSR_GP_CNTRL);
4170 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4171 cmn_err(CE_NOTE, "iwk_init(): Radio transmitter is off\n");
4172 goto fail1;
4173 }
4174
4175 /* init Rx ring */
4176 iwk_mac_access_enter(sc);
4177 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4178
4179 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4180 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4181 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4182
4183 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4184 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4185 offsetof(struct iwk_shared, val0)) >> 4));
4186
4187 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4188 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4189 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4190 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4191 (RX_QUEUE_SIZE_LOG <<
4192 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4193 iwk_mac_access_exit(sc);
4194 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4195 (RX_QUEUE_SIZE - 1) & ~0x7);
4196
4197 /* init Tx rings */
4198 iwk_mac_access_enter(sc);
4199 iwk_reg_write(sc, SCD_TXFACT, 0);
4200
4201 /* keep warm page */
4202 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4203 sc->sc_dma_kw.cookie.dmac_address >> 4);
4204
4205 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4206 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4207 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4208 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4209 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4210 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4211 }
4212 iwk_mac_access_exit(sc);
4213
4214 /* clear "radio off" and "disable command" bits */
4215 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4216 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4217 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4218
4219 /* clear any pending interrupts */
4220 IWK_WRITE(sc, CSR_INT, 0xffffffff);
4221
4222 /* enable interrupts */
4223 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4224
4225 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4226 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4227
4228 /*
4229 * backup ucode data part for future use.
4230 */
4231 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4232 sc->sc_dma_fw_data.mem_va,
4233 sc->sc_dma_fw_data.alength);
4234
4235 for (n = 0; n < 2; n++) {
4236 /* load firmware init segment into NIC */
4237 err = iwk_load_firmware(sc);
4238 if (err != IWK_SUCCESS) {
4239 cmn_err(CE_WARN, "iwk_init(): "
4240 "failed to setup boot firmware\n");
4241 continue;
4242 }
4243
4244 /* now press "execute" start running */
4245 IWK_WRITE(sc, CSR_RESET, 0);
4246 break;
4247 }
4248 if (n == 2) {
4249 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4250 goto fail1;
4251 }
4252 /* ..and wait at most one second for adapter to initialize */
4253 clk = ddi_get_lbolt() + drv_usectohz(2000000);
4254 while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4255 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4256 break;
4257 }
4258 if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4259 cmn_err(CE_WARN,
4260 "iwk_init(): timeout waiting for firmware init\n");
4261 goto fail1;
4262 }
4263
4264 /*
4265 * at this point, the firmware is loaded OK, then config the hardware
4266 * with the ucode API, including rxon, txpower, etc.
4267 */
4268 err = iwk_config(sc);
4269 if (err) {
4270 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4271 goto fail1;
4272 }
4273
4274 /* at this point, hardware may receive beacons :) */
4275 mutex_exit(&sc->sc_glock);
4276 return (IWK_SUCCESS);
4277
4278 fail1:
4279 err = IWK_FAIL;
4280 mutex_exit(&sc->sc_glock);
4281 return (err);
4282 }
4283
4284 static void
iwk_stop(iwk_sc_t * sc)4285 iwk_stop(iwk_sc_t *sc)
4286 {
4287 uint32_t tmp;
4288 int i;
4289
4290 if (!(sc->sc_flags & IWK_F_QUIESCED))
4291 mutex_enter(&sc->sc_glock);
4292
4293 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4294 /* disable interrupts */
4295 IWK_WRITE(sc, CSR_INT_MASK, 0);
4296 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4297 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4298
4299 /* reset all Tx rings */
4300 for (i = 0; i < IWK_NUM_QUEUES; i++)
4301 iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4302
4303 /* reset Rx ring */
4304 iwk_reset_rx_ring(sc);
4305
4306 iwk_mac_access_enter(sc);
4307 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4308 iwk_mac_access_exit(sc);
4309
4310 DELAY(5);
4311
4312 iwk_stop_master(sc);
4313
4314 sc->sc_tx_timer = 0;
4315 sc->sc_flags &= ~IWK_F_SCANNING;
4316 sc->sc_scan_pending = 0;
4317
4318 tmp = IWK_READ(sc, CSR_RESET);
4319 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4320
4321 if (!(sc->sc_flags & IWK_F_QUIESCED))
4322 mutex_exit(&sc->sc_glock);
4323 }
4324
4325 /*
4326 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4327 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4328 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4329 * INRIA Sophia - Projet Planete
4330 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4331 */
4332 #define is_success(amrr) \
4333 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4334 #define is_failure(amrr) \
4335 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4336 #define is_enough(amrr) \
4337 ((amrr)->txcnt > 100)
4338 #define is_min_rate(in) \
4339 ((in)->in_txrate == 0)
4340 #define is_max_rate(in) \
4341 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4342 #define increase_rate(in) \
4343 ((in)->in_txrate++)
4344 #define decrease_rate(in) \
4345 ((in)->in_txrate--)
4346 #define reset_cnt(amrr) \
4347 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4348
4349 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1
4350 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15
4351
4352 static void
iwk_amrr_init(iwk_amrr_t * amrr)4353 iwk_amrr_init(iwk_amrr_t *amrr)
4354 {
4355 amrr->success = 0;
4356 amrr->recovery = 0;
4357 amrr->txcnt = amrr->retrycnt = 0;
4358 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4359 }
4360
4361 static void
iwk_amrr_timeout(iwk_sc_t * sc)4362 iwk_amrr_timeout(iwk_sc_t *sc)
4363 {
4364 ieee80211com_t *ic = &sc->sc_ic;
4365
4366 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4367 if (ic->ic_opmode == IEEE80211_M_STA)
4368 iwk_amrr_ratectl(NULL, ic->ic_bss);
4369 else
4370 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4371 sc->sc_clk = ddi_get_lbolt();
4372 }
4373
4374 /* ARGSUSED */
4375 static void
iwk_amrr_ratectl(void * arg,ieee80211_node_t * in)4376 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4377 {
4378 iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4379 int need_change = 0;
4380
4381 if (is_success(amrr) && is_enough(amrr)) {
4382 amrr->success++;
4383 if (amrr->success >= amrr->success_threshold &&
4384 !is_max_rate(in)) {
4385 amrr->recovery = 1;
4386 amrr->success = 0;
4387 increase_rate(in);
4388 IWK_DBG((IWK_DEBUG_RATECTL,
4389 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4390 in->in_txrate, amrr->txcnt, amrr->retrycnt));
4391 need_change = 1;
4392 } else {
4393 amrr->recovery = 0;
4394 }
4395 } else if (is_failure(amrr)) {
4396 amrr->success = 0;
4397 if (!is_min_rate(in)) {
4398 if (amrr->recovery) {
4399 amrr->success_threshold++;
4400 if (amrr->success_threshold >
4401 IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4402 amrr->success_threshold =
4403 IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4404 } else {
4405 amrr->success_threshold =
4406 IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4407 }
4408 decrease_rate(in);
4409 IWK_DBG((IWK_DEBUG_RATECTL,
4410 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4411 in->in_txrate, amrr->txcnt, amrr->retrycnt));
4412 need_change = 1;
4413 }
4414 amrr->recovery = 0; /* paper is incorrect */
4415 }
4416
4417 if (is_enough(amrr) || need_change)
4418 reset_cnt(amrr);
4419 }
4420
4421 /*
4422 * calculate 4965 chipset's kelvin temperature according to
4423 * the data of init alive and satistics notification.
4424 * The details is described in iwk_calibration.h file
4425 */
iwk_curr_tempera(iwk_sc_t * sc)4426 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4427 {
4428 int32_t tempera;
4429 int32_t r1, r2, r3;
4430 uint32_t r4_u;
4431 int32_t r4_s;
4432
4433 if (iwk_is_fat_channel(sc)) {
4434 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[1]);
4435 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[1]);
4436 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[1]);
4437 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[1]);
4438 } else {
4439 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[0]);
4440 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[0]);
4441 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[0]);
4442 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[0]);
4443 }
4444
4445 if (sc->sc_flags & IWK_F_STATISTICS) {
4446 r4_s = (int32_t)(LE_32(sc->sc_statistics.general.temperature) <<
4447 (31-23)) >> (31-23);
4448 } else {
4449 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4450 }
4451
4452 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4453 r1, r2, r3, r4_s));
4454
4455 if (r3 == r1) {
4456 cmn_err(CE_WARN, "iwk_curr_tempera(): "
4457 "failed to calculate temperature"
4458 "because r3 = r1\n");
4459 return (DDI_FAILURE);
4460 }
4461
4462 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4463 tempera /= (r3 - r1);
4464 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4465
4466 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4467 tempera, KELVIN_TO_CELSIUS(tempera)));
4468
4469 return (tempera);
4470 }
4471
4472 /* Determine whether 4965 is using 2.4 GHz band */
iwk_is_24G_band(iwk_sc_t * sc)4473 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4474 {
4475 return (LE_32(sc->sc_config.flags) & RXON_FLG_BAND_24G_MSK);
4476 }
4477
4478 /* Determine whether 4965 is using fat channel */
iwk_is_fat_channel(iwk_sc_t * sc)4479 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4480 {
4481 return ((LE_32(sc->sc_config.flags) &
4482 RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4483 (LE_32(sc->sc_config.flags) & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4484 }
4485
4486 /*
4487 * In MIMO mode, determine which group 4965's current channel belong to.
4488 * For more infomation about "channel group",
4489 * please refer to iwk_calibration.h file
4490 */
iwk_txpower_grp(uint16_t channel)4491 static int iwk_txpower_grp(uint16_t channel)
4492 {
4493 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4494 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4495 return (CALIB_CH_GROUP_5);
4496 }
4497
4498 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4499 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4500 return (CALIB_CH_GROUP_1);
4501 }
4502
4503 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4504 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4505 return (CALIB_CH_GROUP_2);
4506 }
4507
4508 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4509 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4510 return (CALIB_CH_GROUP_3);
4511 }
4512
4513 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4514 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4515 return (CALIB_CH_GROUP_4);
4516 }
4517
4518 cmn_err(CE_WARN, "iwk_txpower_grp(): "
4519 "can't find txpower group for channel %d.\n", channel);
4520
4521 return (DDI_FAILURE);
4522 }
4523
4524 /* 2.4 GHz */
4525 static uint16_t iwk_eep_band_1[14] = {
4526 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4527 };
4528
4529 /* 5.2 GHz bands */
4530 static uint16_t iwk_eep_band_2[13] = {
4531 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4532 };
4533
4534 static uint16_t iwk_eep_band_3[12] = {
4535 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4536 };
4537
4538 static uint16_t iwk_eep_band_4[11] = {
4539 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4540 };
4541
4542 static uint16_t iwk_eep_band_5[6] = {
4543 145, 149, 153, 157, 161, 165
4544 };
4545
4546 static uint16_t iwk_eep_band_6[7] = {
4547 1, 2, 3, 4, 5, 6, 7
4548 };
4549
4550 static uint16_t iwk_eep_band_7[11] = {
4551 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4552 };
4553
4554 /* Get regulatory data from eeprom for a given channel */
iwk_get_eep_channel(iwk_sc_t * sc,uint16_t channel,int is_24G,int is_fat,int is_hi_chan)4555 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4556 uint16_t channel,
4557 int is_24G, int is_fat, int is_hi_chan)
4558 {
4559 int32_t i;
4560 uint16_t chan;
4561
4562 if (is_fat) { /* 11n mode */
4563
4564 if (is_hi_chan) {
4565 chan = channel - 4;
4566 } else {
4567 chan = channel;
4568 }
4569
4570 for (i = 0; i < 7; i++) {
4571 if (iwk_eep_band_6[i] == chan) {
4572 return (&sc->sc_eep_map.band_24_channels[i]);
4573 }
4574 }
4575 for (i = 0; i < 11; i++) {
4576 if (iwk_eep_band_7[i] == chan) {
4577 return (&sc->sc_eep_map.band_52_channels[i]);
4578 }
4579 }
4580 } else if (is_24G) { /* 2.4 GHz band */
4581 for (i = 0; i < 14; i++) {
4582 if (iwk_eep_band_1[i] == channel) {
4583 return (&sc->sc_eep_map.band_1_channels[i]);
4584 }
4585 }
4586 } else { /* 5 GHz band */
4587 for (i = 0; i < 13; i++) {
4588 if (iwk_eep_band_2[i] == channel) {
4589 return (&sc->sc_eep_map.band_2_channels[i]);
4590 }
4591 }
4592 for (i = 0; i < 12; i++) {
4593 if (iwk_eep_band_3[i] == channel) {
4594 return (&sc->sc_eep_map.band_3_channels[i]);
4595 }
4596 }
4597 for (i = 0; i < 11; i++) {
4598 if (iwk_eep_band_4[i] == channel) {
4599 return (&sc->sc_eep_map.band_4_channels[i]);
4600 }
4601 }
4602 for (i = 0; i < 6; i++) {
4603 if (iwk_eep_band_5[i] == channel) {
4604 return (&sc->sc_eep_map.band_5_channels[i]);
4605 }
4606 }
4607 }
4608
4609 return (NULL);
4610 }
4611
4612 /*
4613 * Determine which subband a given channel belongs
4614 * to in 2.4 GHz or 5 GHz band
4615 */
iwk_band_number(iwk_sc_t * sc,uint16_t channel)4616 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4617 {
4618 int32_t b_n = -1;
4619
4620 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4621 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4622 continue;
4623 }
4624
4625 if ((channel >=
4626 (uint16_t)sc->sc_eep_map.calib_info.
4627 band_info_tbl[b_n].ch_from) &&
4628 (channel <=
4629 (uint16_t)sc->sc_eep_map.calib_info.
4630 band_info_tbl[b_n].ch_to)) {
4631 break;
4632 }
4633 }
4634
4635 return (b_n);
4636 }
4637
4638 /* Make a special division for interpolation operation */
iwk_division(int32_t num,int32_t denom,int32_t * res)4639 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4640 {
4641 int32_t sign = 1;
4642
4643 if (num < 0) {
4644 sign = -sign;
4645 num = -num;
4646 }
4647
4648 if (denom < 0) {
4649 sign = -sign;
4650 denom = -denom;
4651 }
4652
4653 *res = ((num*2 + denom) / (denom*2)) * sign;
4654
4655 return (IWK_SUCCESS);
4656 }
4657
4658 /* Make interpolation operation */
iwk_interpolate_value(int32_t x,int32_t x1,int32_t y1,int32_t x2,int32_t y2)4659 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4660 int32_t x2, int32_t y2)
4661 {
4662 int32_t val;
4663
4664 if (x2 == x1) {
4665 return (y1);
4666 } else {
4667 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4668 return (val + y2);
4669 }
4670 }
4671
4672 /* Get interpolation measurement data of a given channel for all chains. */
iwk_channel_interpolate(iwk_sc_t * sc,uint16_t channel,struct iwk_eep_calib_channel_info * chan_info)4673 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4674 struct iwk_eep_calib_channel_info *chan_info)
4675 {
4676 int32_t ban_n;
4677 uint32_t ch1_n, ch2_n;
4678 int32_t c, m;
4679 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4680
4681 /* determine subband number */
4682 ban_n = iwk_band_number(sc, channel);
4683 if (ban_n >= EEP_TX_POWER_BANDS) {
4684 return (DDI_FAILURE);
4685 }
4686
4687 ch1_n =
4688 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4689 ch2_n =
4690 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4691
4692 chan_info->ch_num = (uint8_t)channel; /* given channel number */
4693
4694 /*
4695 * go through all chains on chipset
4696 */
4697 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4698 /*
4699 * go through all factory measurements
4700 */
4701 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4702 m1_p =
4703 &(sc->sc_eep_map.calib_info.
4704 band_info_tbl[ban_n].ch1.measure[c][m]);
4705 m2_p =
4706 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4707 ch2.measure[c][m]);
4708 m_p = &(chan_info->measure[c][m]);
4709
4710 /*
4711 * make interpolation to get actual
4712 * Tx power for given channel
4713 */
4714 m_p->actual_pow = iwk_interpolate_value(channel,
4715 ch1_n, m1_p->actual_pow,
4716 ch2_n, m2_p->actual_pow);
4717
4718 /* make interpolation to get index into gain table */
4719 m_p->gain_idx = iwk_interpolate_value(channel,
4720 ch1_n, m1_p->gain_idx,
4721 ch2_n, m2_p->gain_idx);
4722
4723 /* make interpolation to get chipset temperature */
4724 m_p->temperature = iwk_interpolate_value(channel,
4725 ch1_n, m1_p->temperature,
4726 ch2_n, m2_p->temperature);
4727
4728 /*
4729 * make interpolation to get power
4730 * amp detector level
4731 */
4732 m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4733 m1_p->pa_det,
4734 ch2_n, m2_p->pa_det);
4735 }
4736 }
4737
4738 return (IWK_SUCCESS);
4739 }
4740
4741 /*
4742 * Calculate voltage compensation for Tx power. For more infomation,
4743 * please refer to iwk_calibration.h file
4744 */
iwk_voltage_compensation(int32_t eep_voltage,int32_t curr_voltage)4745 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4746 int32_t curr_voltage)
4747 {
4748 int32_t vol_comp = 0;
4749
4750 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4751 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4752 return (vol_comp);
4753 }
4754
4755 (void) iwk_division(curr_voltage-eep_voltage,
4756 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4757
4758 if (curr_voltage > eep_voltage) {
4759 vol_comp *= 2;
4760 }
4761 if ((vol_comp < -2) || (vol_comp > 2)) {
4762 vol_comp = 0;
4763 }
4764
4765 return (vol_comp);
4766 }
4767
4768 /*
4769 * Thermal compensation values for txpower for various frequency ranges ...
4770 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4771 */
4772 static struct iwk_txpower_tempera_comp {
4773 int32_t degrees_per_05db_a;
4774 int32_t degrees_per_05db_a_denom;
4775 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4776 {9, 2}, /* group 0 5.2, ch 34-43 */
4777 {4, 1}, /* group 1 5.2, ch 44-70 */
4778 {4, 1}, /* group 2 5.2, ch 71-124 */
4779 {4, 1}, /* group 3 5.2, ch 125-200 */
4780 {3, 1} /* group 4 2.4, ch all */
4781 };
4782
4783 /*
4784 * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4785 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4786 */
4787 static int32_t back_off_table[] = {
4788 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4789 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4790 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4791 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4792 10 /* CCK */
4793 };
4794
4795 /* determine minimum Tx power index in gain table */
iwk_min_power_index(int32_t rate_pow_idx,int32_t is_24G)4796 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4797 {
4798 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4799 return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4800 }
4801
4802 return (MIN_TX_GAIN_INDEX);
4803 }
4804
4805 /*
4806 * Determine DSP and radio gain according to temperature and other factors.
4807 * This function is the majority of Tx power calibration
4808 */
iwk_txpower_table_cmd_init(iwk_sc_t * sc,struct iwk_tx_power_db * tp_db)4809 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4810 struct iwk_tx_power_db *tp_db)
4811 {
4812 int is_24G, is_fat, is_high_chan, is_mimo;
4813 int c, r;
4814 int32_t target_power;
4815 int32_t tx_grp = CALIB_CH_GROUP_MAX;
4816 uint16_t channel;
4817 uint8_t saturation_power;
4818 int32_t regu_power;
4819 int32_t curr_regu_power;
4820 struct iwk_eep_channel *eep_chan_p;
4821 struct iwk_eep_calib_channel_info eep_chan_calib;
4822 int32_t eep_voltage, init_voltage;
4823 int32_t voltage_compensation;
4824 int32_t temperature;
4825 int32_t degrees_per_05db_num;
4826 int32_t degrees_per_05db_denom;
4827 struct iwk_eep_calib_measure *measure_p;
4828 int32_t interpo_temp;
4829 int32_t power_limit;
4830 int32_t atten_value;
4831 int32_t tempera_comp[2];
4832 int32_t interpo_gain_idx[2];
4833 int32_t interpo_actual_pow[2];
4834 union iwk_tx_power_dual_stream txpower_gains;
4835 int32_t txpower_gains_idx;
4836
4837 channel = LE_16(sc->sc_config.chan);
4838
4839 /* 2.4 GHz or 5 GHz band */
4840 is_24G = iwk_is_24G_band(sc);
4841
4842 /* fat channel or not */
4843 is_fat = iwk_is_fat_channel(sc);
4844
4845 /*
4846 * using low half channel number or high half channel number
4847 * identify fat channel
4848 */
4849 if (is_fat && (LE_32(sc->sc_config.flags) &
4850 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4851 is_high_chan = 1;
4852 }
4853
4854 if ((channel > 0) && (channel < 200)) {
4855 /* get regulatory channel data from eeprom */
4856 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4857 is_fat, is_high_chan);
4858 if (NULL == eep_chan_p) {
4859 cmn_err(CE_WARN,
4860 "iwk_txpower_table_cmd_init(): "
4861 "can't get channel infomation\n");
4862 return (DDI_FAILURE);
4863 }
4864 } else {
4865 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4866 "channel(%d) isn't in proper range\n",
4867 channel);
4868 return (DDI_FAILURE);
4869 }
4870
4871 /* initial value of Tx power */
4872 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4873 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4874 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4875 "user TX power is too weak\n");
4876 return (DDI_FAILURE);
4877 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4878 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4879 "user TX power is too strong\n");
4880 return (DDI_FAILURE);
4881 }
4882
4883 target_power = 2 * sc->sc_user_txpower;
4884
4885 /* determine which group current channel belongs to */
4886 tx_grp = iwk_txpower_grp(channel);
4887 if (tx_grp < 0) {
4888 return (tx_grp);
4889 }
4890
4891
4892 if (is_fat) {
4893 if (is_high_chan) {
4894 channel -= 2;
4895 } else {
4896 channel += 2;
4897 }
4898 }
4899
4900 /* determine saturation power */
4901 if (is_24G) {
4902 saturation_power =
4903 sc->sc_eep_map.calib_info.saturation_power24;
4904 } else {
4905 saturation_power =
4906 sc->sc_eep_map.calib_info.saturation_power52;
4907 }
4908
4909 if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4910 saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4911 if (is_24G) {
4912 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4913 } else {
4914 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4915 }
4916 }
4917
4918 /* determine regulatory power */
4919 regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4920 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4921 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4922 if (is_24G) {
4923 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4924 } else {
4925 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4926 }
4927 }
4928
4929 /*
4930 * get measurement data for current channel
4931 * suach as temperature,index to gain table,actual Tx power
4932 */
4933 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4934
4935 eep_voltage = (int32_t)LE_16(sc->sc_eep_map.calib_info.voltage);
4936 init_voltage = (int32_t)LE_32(sc->sc_card_alive_init.voltage);
4937
4938 /* calculate voltage compensation to Tx power */
4939 voltage_compensation =
4940 iwk_voltage_compensation(eep_voltage, init_voltage);
4941
4942 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4943 temperature = sc->sc_tempera;
4944 } else {
4945 temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4946 }
4947 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4948 temperature = sc->sc_tempera;
4949 } else {
4950 temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4951 }
4952 temperature = KELVIN_TO_CELSIUS(temperature);
4953
4954 degrees_per_05db_num =
4955 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4956 degrees_per_05db_denom =
4957 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4958
4959 for (c = 0; c < 2; c++) { /* go through all chains */
4960 measure_p = &eep_chan_calib.measure[c][1];
4961 interpo_temp = measure_p->temperature;
4962
4963 /* determine temperature compensation to Tx power */
4964 (void) iwk_division(
4965 (temperature-interpo_temp)*degrees_per_05db_denom,
4966 degrees_per_05db_num, &tempera_comp[c]);
4967
4968 interpo_gain_idx[c] = measure_p->gain_idx;
4969 interpo_actual_pow[c] = measure_p->actual_pow;
4970 }
4971
4972 /*
4973 * go through all rate entries in Tx power table
4974 */
4975 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4976 if (r & 0x8) {
4977 /* need to lower regulatory power for MIMO mode */
4978 curr_regu_power = regu_power -
4979 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4980 is_mimo = 1;
4981 } else {
4982 curr_regu_power = regu_power;
4983 is_mimo = 0;
4984 }
4985
4986 power_limit = saturation_power - back_off_table[r];
4987 if (power_limit > curr_regu_power) {
4988 /* final Tx power limit */
4989 power_limit = curr_regu_power;
4990 }
4991
4992 if (target_power > power_limit) {
4993 target_power = power_limit; /* final target Tx power */
4994 }
4995
4996 for (c = 0; c < 2; c++) { /* go through all Tx chains */
4997 if (is_mimo) {
4998 atten_value =
4999 LE_32(sc->sc_card_alive_init.
5000 tx_atten[tx_grp][c]);
5001 } else {
5002 atten_value = 0;
5003 }
5004
5005 /*
5006 * calculate index in gain table
5007 * this step is very important
5008 */
5009 txpower_gains_idx = interpo_gain_idx[c] -
5010 (target_power - interpo_actual_pow[c]) -
5011 tempera_comp[c] - voltage_compensation +
5012 atten_value;
5013
5014 if (txpower_gains_idx <
5015 iwk_min_power_index(r, is_24G)) {
5016 txpower_gains_idx =
5017 iwk_min_power_index(r, is_24G);
5018 }
5019
5020 if (!is_24G) {
5021 /*
5022 * support negative index for 5 GHz
5023 * band
5024 */
5025 txpower_gains_idx += 9;
5026 }
5027
5028 if (POWER_TABLE_CCK_ENTRY == r) {
5029 /* for CCK mode, make necessary attenuaton */
5030 txpower_gains_idx +=
5031 IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
5032 }
5033
5034 if (txpower_gains_idx > 107) {
5035 txpower_gains_idx = 107;
5036 } else if (txpower_gains_idx < 0) {
5037 txpower_gains_idx = 0;
5038 }
5039
5040 /* search DSP and radio gains in gain table */
5041 txpower_gains.s.radio_tx_gain[c] =
5042 gains_table[is_24G][txpower_gains_idx].radio;
5043 txpower_gains.s.dsp_predis_atten[c] =
5044 gains_table[is_24G][txpower_gains_idx].dsp;
5045
5046 IWK_DBG((IWK_DEBUG_CALIBRATION,
5047 "rate_index: %d, "
5048 "gain_index %d, c: %d,is_mimo: %d\n",
5049 r, txpower_gains_idx, c, is_mimo));
5050 }
5051
5052 /* initialize Tx power table */
5053 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
5054 tp_db->ht_ofdm_power[r].dw = LE_32(txpower_gains.dw);
5055 } else {
5056 tp_db->legacy_cck_power.dw = LE_32(txpower_gains.dw);
5057 }
5058 }
5059
5060 return (IWK_SUCCESS);
5061 }
5062
5063 /*
5064 * make Tx power calibration to adjust Tx power.
5065 * This is completed by sending out Tx power table command.
5066 */
iwk_tx_power_calibration(iwk_sc_t * sc)5067 static int iwk_tx_power_calibration(iwk_sc_t *sc)
5068 {
5069 iwk_tx_power_table_cmd_t cmd;
5070 int rv;
5071
5072 if (sc->sc_flags & IWK_F_SCANNING) {
5073 return (IWK_SUCCESS);
5074 }
5075
5076 /* necessary initialization to Tx power table command */
5077 cmd.band = (uint8_t)iwk_is_24G_band(sc);
5078 cmd.channel = sc->sc_config.chan;
5079 cmd.channel_normal_width = 0;
5080
5081 /* initialize Tx power table */
5082 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
5083 if (rv) {
5084 cmn_err(CE_NOTE, "rv= %d\n", rv);
5085 return (rv);
5086 }
5087
5088 /* send out Tx power table command */
5089 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
5090 if (rv) {
5091 return (rv);
5092 }
5093
5094 /* record current temperature */
5095 sc->sc_last_tempera = sc->sc_tempera;
5096
5097 return (IWK_SUCCESS);
5098 }
5099
5100 /* This function is the handler of statistics notification from uCode */
iwk_statistics_notify(iwk_sc_t * sc,iwk_rx_desc_t * desc)5101 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
5102 {
5103 int is_diff;
5104 struct iwk_notif_statistics *statistics_p =
5105 (struct iwk_notif_statistics *)(desc + 1);
5106
5107 mutex_enter(&sc->sc_glock);
5108
5109 is_diff = (sc->sc_statistics.general.temperature !=
5110 statistics_p->general.temperature) ||
5111 (LE_32(sc->sc_statistics.flag) &
5112 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5113 (LE_32(statistics_p->flag) & STATISTICS_REPLY_FLG_FAT_MODE_MSK);
5114
5115 /* update statistics data */
5116 (void) memcpy(&sc->sc_statistics, statistics_p,
5117 sizeof (struct iwk_notif_statistics));
5118
5119 sc->sc_flags |= IWK_F_STATISTICS;
5120
5121 if (!(sc->sc_flags & IWK_F_SCANNING)) {
5122 /* make Receiver gain balance calibration */
5123 (void) iwk_rxgain_diff(sc);
5124
5125 /* make Receiver sensitivity calibration */
5126 (void) iwk_rx_sens(sc);
5127 }
5128
5129
5130 if (!is_diff) {
5131 mutex_exit(&sc->sc_glock);
5132 return;
5133 }
5134
5135 /* calibration current temperature of 4965 chipset */
5136 sc->sc_tempera = iwk_curr_tempera(sc);
5137
5138 /* distinct temperature change will trigger Tx power calibration */
5139 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5140 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5141 /* make Tx power calibration */
5142 (void) iwk_tx_power_calibration(sc);
5143 }
5144
5145 mutex_exit(&sc->sc_glock);
5146 }
5147
5148 /* Determine this station is in associated state or not */
iwk_is_associated(iwk_sc_t * sc)5149 static int iwk_is_associated(iwk_sc_t *sc)
5150 {
5151 return (LE_32(sc->sc_config.filter_flags) & RXON_FILTER_ASSOC_MSK);
5152 }
5153
5154 /* Make necessary preparation for Receiver gain balance calibration */
iwk_rxgain_diff_init(iwk_sc_t * sc)5155 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5156 {
5157 int i, rv;
5158 struct iwk_calibration_cmd cmd;
5159 struct iwk_rx_gain_diff *gain_diff_p;
5160
5161 gain_diff_p = &sc->sc_rxgain_diff;
5162
5163 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5164 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5165
5166 for (i = 0; i < RX_CHAINS_NUM; i++) {
5167 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5168 }
5169
5170 if (iwk_is_associated(sc)) {
5171 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5172 cmd.diff_gain_a = 0;
5173 cmd.diff_gain_b = 0;
5174 cmd.diff_gain_c = 0;
5175
5176 /* assume the gains of every Rx chains is balanceable */
5177 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5178 sizeof (cmd), 1);
5179 if (rv) {
5180 return (rv);
5181 }
5182
5183 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5184 }
5185
5186 return (IWK_SUCCESS);
5187 }
5188
5189 /*
5190 * make Receiver gain balance to balance Rx gain between Rx chains
5191 * and determine which chain is disconnected
5192 */
iwk_rxgain_diff(iwk_sc_t * sc)5193 static int iwk_rxgain_diff(iwk_sc_t *sc)
5194 {
5195 int i, is_24G, rv;
5196 int max_beacon_chain_n;
5197 int min_noise_chain_n;
5198 uint16_t channel_n;
5199 int32_t beacon_diff;
5200 int32_t noise_diff;
5201 uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5202 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5203 struct iwk_calibration_cmd cmd;
5204 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5205 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5206 struct statistics_rx_non_phy *rx_general_p =
5207 &sc->sc_statistics.rx.general;
5208 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5209
5210 if (INTERFERENCE_DATA_AVAILABLE !=
5211 LE_32(rx_general_p->interference_data_flag)) {
5212 return (IWK_SUCCESS);
5213 }
5214
5215 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5216 return (IWK_SUCCESS);
5217 }
5218
5219 is_24G = iwk_is_24G_band(sc);
5220 channel_n = sc->sc_config.chan; /* channel number */
5221
5222 if ((channel_n != (LE_32(sc->sc_statistics.flag) >> 16)) ||
5223 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5224 (LE_32(sc->sc_statistics.flag) &
5225 STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5226 !is_24G)) {
5227 return (IWK_SUCCESS);
5228 }
5229
5230 /* Rx chain's noise strength from statistics notification */
5231 noise_chain_a = LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF;
5232 noise_chain_b = LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF;
5233 noise_chain_c = LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF;
5234
5235 /* Rx chain's beacon strength from statistics notification */
5236 beacon_chain_a = LE_32(rx_general_p->beacon_rssi_a) & 0xFF;
5237 beacon_chain_b = LE_32(rx_general_p->beacon_rssi_b) & 0xFF;
5238 beacon_chain_c = LE_32(rx_general_p->beacon_rssi_c) & 0xFF;
5239
5240 gain_diff_p->beacon_count++;
5241
5242 /* accumulate chain's noise strength */
5243 gain_diff_p->noise_stren_a += noise_chain_a;
5244 gain_diff_p->noise_stren_b += noise_chain_b;
5245 gain_diff_p->noise_stren_c += noise_chain_c;
5246
5247 /* accumulate chain's beacon strength */
5248 gain_diff_p->beacon_stren_a += beacon_chain_a;
5249 gain_diff_p->beacon_stren_b += beacon_chain_b;
5250 gain_diff_p->beacon_stren_c += beacon_chain_c;
5251
5252 if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5253 /* calculate average beacon strength */
5254 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5255 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5256 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5257
5258 /* calculate average noise strength */
5259 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5260 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5261 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5262
5263 /* determine maximum beacon strength among 3 chains */
5264 if ((beacon_aver[0] >= beacon_aver[1]) &&
5265 (beacon_aver[0] >= beacon_aver[2])) {
5266 max_beacon_chain_n = 0;
5267 gain_diff_p->connected_chains = 1 << 0;
5268 } else if (beacon_aver[1] >= beacon_aver[2]) {
5269 max_beacon_chain_n = 1;
5270 gain_diff_p->connected_chains = 1 << 1;
5271 } else {
5272 max_beacon_chain_n = 2;
5273 gain_diff_p->connected_chains = 1 << 2;
5274 }
5275
5276 /* determine which chain is disconnected */
5277 for (i = 0; i < RX_CHAINS_NUM; i++) {
5278 if (i != max_beacon_chain_n) {
5279 beacon_diff = beacon_aver[max_beacon_chain_n] -
5280 beacon_aver[i];
5281 if (beacon_diff > MAX_ALLOWED_DIFF) {
5282 gain_diff_p->disconnect_chain[i] = 1;
5283 } else {
5284 gain_diff_p->connected_chains |=
5285 (1 << i);
5286 }
5287 }
5288 }
5289
5290 /*
5291 * if chain A and B are both disconnected,
5292 * assume the stronger in beacon strength is connected
5293 */
5294 if (gain_diff_p->disconnect_chain[0] &&
5295 gain_diff_p->disconnect_chain[1]) {
5296 if (beacon_aver[0] >= beacon_aver[1]) {
5297 gain_diff_p->disconnect_chain[0] = 0;
5298 gain_diff_p->connected_chains |= (1 << 0);
5299 } else {
5300 gain_diff_p->disconnect_chain[1] = 0;
5301 gain_diff_p->connected_chains |= (1 << 1);
5302 }
5303 }
5304
5305 /* determine minimum noise strength among 3 chains */
5306 if (!gain_diff_p->disconnect_chain[0]) {
5307 min_noise_chain_n = 0;
5308
5309 for (i = 0; i < RX_CHAINS_NUM; i++) {
5310 if (!gain_diff_p->disconnect_chain[i] &&
5311 (noise_aver[i] <=
5312 noise_aver[min_noise_chain_n])) {
5313 min_noise_chain_n = i;
5314 }
5315
5316 }
5317 } else {
5318 min_noise_chain_n = 1;
5319
5320 for (i = 0; i < RX_CHAINS_NUM; i++) {
5321 if (!gain_diff_p->disconnect_chain[i] &&
5322 (noise_aver[i] <=
5323 noise_aver[min_noise_chain_n])) {
5324 min_noise_chain_n = i;
5325 }
5326 }
5327 }
5328
5329 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5330
5331 /* determine gain difference between chains */
5332 for (i = 0; i < RX_CHAINS_NUM; i++) {
5333 if (!gain_diff_p->disconnect_chain[i] &&
5334 (CHAIN_GAIN_DIFF_INIT_VAL ==
5335 gain_diff_p->gain_diff_chain[i])) {
5336
5337 noise_diff = noise_aver[i] -
5338 noise_aver[min_noise_chain_n];
5339 gain_diff_p->gain_diff_chain[i] =
5340 (uint8_t)((noise_diff * 10) / 15);
5341
5342 if (gain_diff_p->gain_diff_chain[i] > 3) {
5343 gain_diff_p->gain_diff_chain[i] = 3;
5344 }
5345
5346 gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5347 } else {
5348 gain_diff_p->gain_diff_chain[i] = 0;
5349 }
5350 }
5351
5352 if (!gain_diff_p->gain_diff_send) {
5353 gain_diff_p->gain_diff_send = 1;
5354
5355 (void) memset(&cmd, 0, sizeof (cmd));
5356
5357 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5358 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5359 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5360 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5361
5362 /*
5363 * send out PHY calibration command to
5364 * adjust every chain's Rx gain
5365 */
5366 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5367 &cmd, sizeof (cmd), 1);
5368 if (rv) {
5369 return (rv);
5370 }
5371
5372 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5373 }
5374
5375 gain_diff_p->beacon_stren_a = 0;
5376 gain_diff_p->beacon_stren_b = 0;
5377 gain_diff_p->beacon_stren_c = 0;
5378
5379 gain_diff_p->noise_stren_a = 0;
5380 gain_diff_p->noise_stren_b = 0;
5381 gain_diff_p->noise_stren_c = 0;
5382 }
5383
5384 return (IWK_SUCCESS);
5385 }
5386
5387 /* Make necessary preparation for Receiver sensitivity calibration */
iwk_rx_sens_init(iwk_sc_t * sc)5388 static int iwk_rx_sens_init(iwk_sc_t *sc)
5389 {
5390 int i, rv;
5391 struct iwk_rx_sensitivity_cmd cmd;
5392 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5393
5394 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5395 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5396
5397 rx_sens_p->auto_corr_ofdm_x4 = 90;
5398 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5399 rx_sens_p->auto_corr_ofdm_x1 = 105;
5400 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5401
5402 rx_sens_p->auto_corr_cck_x4 = 125;
5403 rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5404 rx_sens_p->min_energy_det_cck = 100;
5405
5406 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5407 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5408 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5409
5410 rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5411 rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5412 rx_sens_p->last_bad_plcp_cnt_cck = 0;
5413 rx_sens_p->last_false_alarm_cnt_cck = 0;
5414
5415 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5416 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5417 rx_sens_p->cck_no_false_alarm_num = 0;
5418 rx_sens_p->cck_beacon_idx = 0;
5419
5420 for (i = 0; i < 10; i++) {
5421 rx_sens_p->cck_beacon_min[i] = 0;
5422 }
5423
5424 rx_sens_p->cck_noise_idx = 0;
5425 rx_sens_p->cck_noise_ref = 0;
5426
5427 for (i = 0; i < 20; i++) {
5428 rx_sens_p->cck_noise_max[i] = 0;
5429 }
5430
5431 rx_sens_p->cck_noise_diff = 0;
5432 rx_sens_p->cck_no_false_alarm_num = 0;
5433
5434 cmd.control = LE_16(IWK_SENSITIVITY_CONTROL_WORK_TABLE);
5435
5436 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5437 LE_16(rx_sens_p->auto_corr_ofdm_x4);
5438 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5439 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x4);
5440 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5441 LE_16(rx_sens_p->auto_corr_ofdm_x1);
5442 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5443 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x1);
5444
5445 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5446 LE_16(rx_sens_p->auto_corr_cck_x4);
5447 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5448 LE_16(rx_sens_p->auto_corr_mrc_cck_x4);
5449 cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5450 LE_16(rx_sens_p->min_energy_det_cck);
5451
5452 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = LE_16(100);
5453 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = LE_16(190);
5454 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = LE_16(390);
5455 cmd.table[PTAM_ENERGY_TH_IDX] = LE_16(62);
5456
5457 /* at first, set up Rx to maximum sensitivity */
5458 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5459 if (rv) {
5460 cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5461 "in the process of initialization, "
5462 "failed to send rx sensitivity command\n");
5463 return (rv);
5464 }
5465
5466 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5467
5468 return (IWK_SUCCESS);
5469 }
5470
5471 /*
5472 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5473 * for more infomation, please refer to iwk_calibration.h file
5474 */
iwk_rx_sens(iwk_sc_t * sc)5475 static int iwk_rx_sens(iwk_sc_t *sc)
5476 {
5477 int rv;
5478 uint32_t actual_rx_time;
5479 struct statistics_rx_non_phy *rx_general_p =
5480 &sc->sc_statistics.rx.general;
5481 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5482 struct iwk_rx_sensitivity_cmd cmd;
5483
5484 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5485 cmn_err(CE_WARN, "iwk_rx_sens(): "
5486 "sensitivity initialization has not finished.\n");
5487 return (DDI_FAILURE);
5488 }
5489
5490 if (INTERFERENCE_DATA_AVAILABLE !=
5491 LE_32(rx_general_p->interference_data_flag)) {
5492 cmn_err(CE_WARN, "iwk_rx_sens(): "
5493 "can't make rx sensitivity calibration,"
5494 "because of invalid statistics\n");
5495 return (DDI_FAILURE);
5496 }
5497
5498 actual_rx_time = LE_32(rx_general_p->channel_load);
5499 if (!actual_rx_time) {
5500 IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5501 "can't make rx sensitivity calibration,"
5502 "because has not enough rx time\n"));
5503 return (DDI_FAILURE);
5504 }
5505
5506 /* make Rx sensitivity calibration for OFDM mode */
5507 rv = iwk_ofdm_sens(sc, actual_rx_time);
5508 if (rv) {
5509 return (rv);
5510 }
5511
5512 /* make Rx sensitivity calibration for CCK mode */
5513 rv = iwk_cck_sens(sc, actual_rx_time);
5514 if (rv) {
5515 return (rv);
5516 }
5517
5518 /*
5519 * if the sum of false alarm had not changed, nothing will be done
5520 */
5521 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5522 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5523 return (IWK_SUCCESS);
5524 }
5525
5526 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5527
5528 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5529 rx_sens_p->auto_corr_ofdm_x4;
5530 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5531 rx_sens_p->auto_corr_mrc_ofdm_x4;
5532 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5533 rx_sens_p->auto_corr_ofdm_x1;
5534 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5535 rx_sens_p->auto_corr_mrc_ofdm_x1;
5536
5537 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5538 rx_sens_p->auto_corr_cck_x4;
5539 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5540 rx_sens_p->auto_corr_mrc_cck_x4;
5541 cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5542 rx_sens_p->min_energy_det_cck;
5543
5544 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5545 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5546 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5547 cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5548
5549 /*
5550 * send sensitivity command to complete actual sensitivity calibration
5551 */
5552 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5553 if (rv) {
5554 cmn_err(CE_WARN, "iwk_rx_sens(): "
5555 "fail to send rx sensitivity command\n");
5556 return (rv);
5557 }
5558
5559 return (IWK_SUCCESS);
5560
5561 }
5562
5563 /*
5564 * make Rx sensitivity calibration for CCK mode.
5565 * This is preparing parameters for Sensitivity command
5566 */
iwk_cck_sens(iwk_sc_t * sc,uint32_t actual_rx_time)5567 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5568 {
5569 int i;
5570 uint8_t noise_a, noise_b, noise_c;
5571 uint8_t max_noise_abc, max_noise_20;
5572 uint32_t beacon_a, beacon_b, beacon_c;
5573 uint32_t min_beacon_abc, max_beacon_10;
5574 uint32_t cck_fa, cck_bp;
5575 uint32_t cck_sum_fa_bp;
5576 uint32_t temp;
5577 struct statistics_rx_non_phy *rx_general_p =
5578 &sc->sc_statistics.rx.general;
5579 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5580
5581 cck_fa = LE_32(sc->sc_statistics.rx.cck.false_alarm_cnt);
5582 cck_bp = LE_32(sc->sc_statistics.rx.cck.plcp_err);
5583
5584 /* accumulate false alarm */
5585 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5586 temp = rx_sens_p->last_false_alarm_cnt_cck;
5587 rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5588 cck_fa += (0xFFFFFFFF - temp);
5589 } else {
5590 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5591 rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5592 }
5593
5594 /* accumulate bad plcp */
5595 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5596 temp = rx_sens_p->last_bad_plcp_cnt_cck;
5597 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5598 cck_bp += (0xFFFFFFFF - temp);
5599 } else {
5600 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5601 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5602 }
5603
5604 /*
5605 * calculate relative value
5606 */
5607 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5608 rx_sens_p->cck_noise_diff = 0;
5609
5610 noise_a =
5611 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF00) >>
5612 8);
5613 noise_b =
5614 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF00) >>
5615 8);
5616 noise_c =
5617 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF00) >>
5618 8);
5619
5620 beacon_a = LE_32(rx_general_p->beacon_energy_a);
5621 beacon_b = LE_32(rx_general_p->beacon_energy_b);
5622 beacon_c = LE_32(rx_general_p->beacon_energy_c);
5623
5624 /* determine maximum noise among 3 chains */
5625 if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5626 max_noise_abc = noise_a;
5627 } else if (noise_b >= noise_c) {
5628 max_noise_abc = noise_b;
5629 } else {
5630 max_noise_abc = noise_c;
5631 }
5632
5633 /* record maximum noise among 3 chains */
5634 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5635 rx_sens_p->cck_noise_idx++;
5636 if (rx_sens_p->cck_noise_idx >= 20) {
5637 rx_sens_p->cck_noise_idx = 0;
5638 }
5639
5640 /* determine maximum noise among 20 max noise */
5641 max_noise_20 = rx_sens_p->cck_noise_max[0];
5642 for (i = 0; i < 20; i++) {
5643 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5644 max_noise_20 = rx_sens_p->cck_noise_max[i];
5645 }
5646 }
5647
5648 /* determine minimum beacon among 3 chains */
5649 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5650 min_beacon_abc = beacon_a;
5651 } else if (beacon_b <= beacon_c) {
5652 min_beacon_abc = beacon_b;
5653 } else {
5654 min_beacon_abc = beacon_c;
5655 }
5656
5657 /* record miminum beacon among 3 chains */
5658 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5659 rx_sens_p->cck_beacon_idx++;
5660 if (rx_sens_p->cck_beacon_idx >= 10) {
5661 rx_sens_p->cck_beacon_idx = 0;
5662 }
5663
5664 /* determine maximum beacon among 10 miminum beacon among 3 chains */
5665 max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5666 for (i = 0; i < 10; i++) {
5667 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5668 max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5669 }
5670 }
5671
5672 /* add a little margin */
5673 max_beacon_10 += 6;
5674
5675 /* record the count of having no false alarms */
5676 if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5677 rx_sens_p->cck_no_false_alarm_num++;
5678 } else {
5679 rx_sens_p->cck_no_false_alarm_num = 0;
5680 }
5681
5682 /*
5683 * adjust parameters in sensitivity command
5684 * according to different status.
5685 * for more infomation, please refer to iwk_calibration.h file
5686 */
5687 if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5688 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5689
5690 if (rx_sens_p->auto_corr_cck_x4 > 160) {
5691 rx_sens_p->cck_noise_ref = max_noise_20;
5692
5693 if (rx_sens_p->min_energy_det_cck > 2) {
5694 rx_sens_p->min_energy_det_cck -= 2;
5695 }
5696 }
5697
5698 if (rx_sens_p->auto_corr_cck_x4 < 160) {
5699 rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5700 } else {
5701 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5702 rx_sens_p->auto_corr_cck_x4 += 3;
5703 } else {
5704 rx_sens_p->auto_corr_cck_x4 = 200;
5705 }
5706 }
5707
5708 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5709 rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5710 } else {
5711 rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5712 }
5713
5714 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5715
5716 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5717 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5718
5719 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5720 (int32_t)max_noise_20;
5721
5722 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5723 ((rx_sens_p->cck_noise_diff > 2) ||
5724 (rx_sens_p->cck_no_false_alarm_num > 100))) {
5725 if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5726 rx_sens_p->min_energy_det_cck += 2;
5727 } else {
5728 rx_sens_p->min_energy_det_cck = 97;
5729 }
5730
5731 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5732 rx_sens_p->auto_corr_cck_x4 -= 3;
5733 } else {
5734 rx_sens_p->auto_corr_cck_x4 = 125;
5735 }
5736
5737 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5738 rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5739 } else {
5740 rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5741 }
5742
5743 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5744 } else {
5745 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5746 }
5747 } else {
5748 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5749
5750 rx_sens_p->cck_noise_ref = max_noise_20;
5751
5752 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5753 rx_sens_p->min_energy_det_cck -= 8;
5754 }
5755
5756 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5757 }
5758
5759 if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5760 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5761 }
5762
5763 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5764
5765 return (IWK_SUCCESS);
5766 }
5767
5768 /*
5769 * make Rx sensitivity calibration for OFDM mode.
5770 * This is preparing parameters for Sensitivity command
5771 */
iwk_ofdm_sens(iwk_sc_t * sc,uint32_t actual_rx_time)5772 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5773 {
5774 uint32_t temp;
5775 uint16_t temp1;
5776 uint32_t ofdm_fa, ofdm_bp;
5777 uint32_t ofdm_sum_fa_bp;
5778 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5779
5780 ofdm_fa = LE_32(sc->sc_statistics.rx.ofdm.false_alarm_cnt);
5781 ofdm_bp = LE_32(sc->sc_statistics.rx.ofdm.plcp_err);
5782
5783 /* accumulate false alarm */
5784 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5785 temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5786 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5787 ofdm_fa += (0xFFFFFFFF - temp);
5788 } else {
5789 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5790 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5791 }
5792
5793 /* accumulate bad plcp */
5794 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5795 temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5796 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5797 ofdm_bp += (0xFFFFFFFF - temp);
5798 } else {
5799 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5800 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5801 }
5802
5803 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5804
5805 /*
5806 * adjust parameter in sensitivity command according to different status
5807 */
5808 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5809 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5810 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5811
5812 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5813 rx_sens_p->auto_corr_mrc_ofdm_x4 =
5814 (temp1 <= 210) ? temp1 : 210;
5815
5816 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5817 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5818
5819 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5820 rx_sens_p->auto_corr_mrc_ofdm_x1 =
5821 (temp1 <= 270) ? temp1 : 270;
5822
5823 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5824
5825 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5826 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5827 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5828
5829 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5830 rx_sens_p->auto_corr_mrc_ofdm_x4 =
5831 (temp1 >= 170) ? temp1 : 170;
5832
5833 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5834 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5835
5836 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5837 rx_sens_p->auto_corr_mrc_ofdm_x1 =
5838 (temp1 >= 220) ? temp1 : 220;
5839
5840 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5841
5842 } else {
5843 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5844 }
5845
5846 return (IWK_SUCCESS);
5847 }
5848
5849 /*
5850 * additional process to management frames
5851 */
iwk_recv_mgmt(struct ieee80211com * ic,mblk_t * mp,struct ieee80211_node * in,int subtype,int rssi,uint32_t rstamp)5852 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5853 struct ieee80211_node *in,
5854 int subtype, int rssi, uint32_t rstamp)
5855 {
5856 iwk_sc_t *sc = (iwk_sc_t *)ic;
5857 struct ieee80211_frame *wh;
5858 uint8_t index1, index2;
5859 int err;
5860
5861 sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5862
5863 mutex_enter(&sc->sc_glock);
5864 switch (subtype) {
5865 case IEEE80211_FC0_SUBTYPE_BEACON:
5866 if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5867 ic->ic_state == IEEE80211_S_RUN) {
5868 if (ieee80211_beacon_update(ic, in,
5869 &sc->sc_ibss.ibss_beacon.iwk_boff,
5870 sc->sc_ibss.ibss_beacon.mp, 0)) {
5871 bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5872 sc->sc_ibss.ibss_beacon.beacon_cmd.
5873 bcon_frame,
5874 MBLKL(sc->sc_ibss.ibss_beacon.mp));
5875 }
5876 err = iwk_cmd(sc, REPLY_TX_BEACON,
5877 &sc->sc_ibss.ibss_beacon.beacon_cmd,
5878 sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5879 if (err != IWK_SUCCESS) {
5880 cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5881 "failed to TX beacon.\n");
5882 }
5883 sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5884 }
5885 if (ic->ic_opmode == IEEE80211_M_IBSS &&
5886 ic->ic_state == IEEE80211_S_RUN) {
5887 wh = (struct ieee80211_frame *)mp->b_rptr;
5888 mutex_enter(&sc->sc_ibss.node_tb_lock);
5889 /*
5890 * search for node in ibss node table
5891 */
5892 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5893 index1++) {
5894 if (sc->sc_ibss.ibss_node_tb[index1].used &&
5895 IEEE80211_ADDR_EQ(sc->sc_ibss.
5896 ibss_node_tb[index1].node.bssid,
5897 wh->i_addr2)) {
5898 break;
5899 }
5900 }
5901 /*
5902 * if don't find in ibss node table
5903 */
5904 if (index1 >= IWK_BROADCAST_ID) {
5905 err = iwk_clean_add_node_ibss(ic,
5906 wh->i_addr2, &index2);
5907 if (err != IWK_SUCCESS) {
5908 cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5909 "failed to clean all nodes "
5910 "and add one node\n");
5911 }
5912 }
5913 mutex_exit(&sc->sc_ibss.node_tb_lock);
5914 }
5915 break;
5916 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5917 break;
5918 }
5919 mutex_exit(&sc->sc_glock);
5920 }
5921
5922 #ifdef DEBUG
5923 /*
5924 * 1) log_event_table_ptr indicates base of the event log. This traces
5925 * a 256-entry history of uCode execution within a circular buffer.
5926 * Its header format is:
5927 *
5928 * uint32_t log_size; log capacity (in number of entries)
5929 * uint32_t type; (1) timestamp with each entry, (0) no timestamp
5930 * uint32_t wraps; # times uCode has wrapped to top of circular buffer
5931 * uint32_t write_index; next circular buffer entry that uCode would fill
5932 *
5933 * The header is followed by the circular buffer of log entries. Entries
5934 * with timestamps have the following format:
5935 *
5936 * uint32_t event_id; range 0 - 1500
5937 * uint32_t timestamp; low 32 bits of TSF (of network, if associated)
5938 * uint32_t data; event_id-specific data value
5939 *
5940 * Entries without timestamps contain only event_id and data.
5941 */
5942
5943 /*
5944 * iwk_write_event_log - Write event log to dmesg
5945 */
iwk_write_event_log(iwk_sc_t * sc)5946 static void iwk_write_event_log(iwk_sc_t *sc)
5947 {
5948 uint32_t log_event_table_ptr; /* Start address of event table */
5949 uint32_t startptr; /* Start address of log data */
5950 uint32_t logptr; /* address of log data entry */
5951 uint32_t i, n, num_events;
5952 uint32_t event_id, data1, data2; /* log data */
5953
5954 uint32_t log_size; /* log capacity (in number of entries) */
5955 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */
5956 uint32_t wraps; /* # times uCode has wrapped to */
5957 /* the top of circular buffer */
5958 uint32_t idx; /* index of entry to be filled in next */
5959
5960 log_event_table_ptr = LE_32(sc->sc_card_alive_run.log_event_table_ptr);
5961 if (!(log_event_table_ptr)) {
5962 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5963 return;
5964 }
5965
5966 iwk_mac_access_enter(sc);
5967
5968 /* Read log header */
5969 log_size = iwk_mem_read(sc, log_event_table_ptr);
5970 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5971 type = iwk_mem_read(sc, log_event_table_ptr);
5972 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5973 wraps = iwk_mem_read(sc, log_event_table_ptr);
5974 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5975 idx = iwk_mem_read(sc, log_event_table_ptr);
5976 startptr = log_event_table_ptr +
5977 sizeof (uint32_t); /* addr of start of log data */
5978 if (!log_size & !wraps) {
5979 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5980 iwk_mac_access_exit(sc);
5981 return;
5982 }
5983
5984 if (!wraps) {
5985 num_events = idx;
5986 logptr = startptr;
5987 } else {
5988 num_events = log_size - idx;
5989 n = type ? 2 : 3;
5990 logptr = startptr + (idx * n * sizeof (uint32_t));
5991 }
5992
5993 for (i = 0; i < num_events; i++) {
5994 event_id = iwk_mem_read(sc, logptr);
5995 logptr += sizeof (uint32_t);
5996 data1 = iwk_mem_read(sc, logptr);
5997 logptr += sizeof (uint32_t);
5998 if (type == 0) { /* no timestamp */
5999 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
6000 event_id, data1));
6001 } else { /* timestamp */
6002 data2 = iwk_mem_read(sc, logptr);
6003 IWK_DBG((IWK_DEBUG_EEPROM,
6004 "Time=%d, Event ID=%d, Data=0x%x\n",
6005 data1, event_id, data2));
6006 logptr += sizeof (uint32_t);
6007 }
6008 }
6009
6010 /*
6011 * Print the wrapped around entries, if any
6012 */
6013 if (wraps) {
6014 logptr = startptr;
6015 for (i = 0; i < idx; i++) {
6016 event_id = iwk_mem_read(sc, logptr);
6017 logptr += sizeof (uint32_t);
6018 data1 = iwk_mem_read(sc, logptr);
6019 logptr += sizeof (uint32_t);
6020 if (type == 0) { /* no timestamp */
6021 IWK_DBG((IWK_DEBUG_EEPROM,
6022 "Event ID=%d, Data=%x0x", event_id, data1));
6023 } else { /* timestamp */
6024 data2 = iwk_mem_read(sc, logptr);
6025 IWK_DBG((IWK_DEBUG_EEPROM,
6026 "Time = %d, Event ID=%d, Data=0x%x\n",
6027 data1, event_id, data2));
6028 logptr += sizeof (uint32_t);
6029 }
6030 }
6031 }
6032
6033 iwk_mac_access_exit(sc);
6034 }
6035
6036 /*
6037 * error_event_table_ptr indicates base of the error log. This contains
6038 * information about any uCode error that occurs. For 4965, the format is:
6039 *
6040 * uint32_t valid; (nonzero) valid, (0) log is empty
6041 * uint32_t error_id; type of error
6042 * uint32_t pc; program counter
6043 * uint32_t blink1; branch link
6044 * uint32_t blink2; branch link
6045 * uint32_t ilink1; interrupt link
6046 * uint32_t ilink2; interrupt link
6047 * uint32_t data1; error-specific data
6048 * uint32_t data2; error-specific data
6049 * uint32_t line; source code line of error
6050 * uint32_t bcon_time; beacon timer
6051 * uint32_t tsf_low; network timestamp function timer
6052 * uint32_t tsf_hi; network timestamp function timer
6053 */
6054 /*
6055 * iwk_write_error_log - Write error log to dmesg
6056 */
iwk_write_error_log(iwk_sc_t * sc)6057 static void iwk_write_error_log(iwk_sc_t *sc)
6058 {
6059 uint32_t err_ptr; /* Start address of error log */
6060 uint32_t valid; /* is error log valid */
6061
6062 err_ptr = LE_32(sc->sc_card_alive_run.error_event_table_ptr);
6063 if (!(err_ptr)) {
6064 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
6065 return;
6066 }
6067
6068 iwk_mac_access_enter(sc);
6069
6070 valid = iwk_mem_read(sc, err_ptr);
6071 if (!(valid)) {
6072 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
6073 iwk_mac_access_exit(sc);
6074 return;
6075 }
6076 err_ptr += sizeof (uint32_t);
6077 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
6078 err_ptr += sizeof (uint32_t);
6079 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
6080 err_ptr += sizeof (uint32_t);
6081 IWK_DBG((IWK_DEBUG_EEPROM,
6082 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6083 err_ptr += sizeof (uint32_t);
6084 IWK_DBG((IWK_DEBUG_EEPROM,
6085 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6086 err_ptr += sizeof (uint32_t);
6087 IWK_DBG((IWK_DEBUG_EEPROM,
6088 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6089 err_ptr += sizeof (uint32_t);
6090 IWK_DBG((IWK_DEBUG_EEPROM,
6091 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6092 err_ptr += sizeof (uint32_t);
6093 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
6094 err_ptr += sizeof (uint32_t);
6095 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
6096 err_ptr += sizeof (uint32_t);
6097 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
6098 err_ptr += sizeof (uint32_t);
6099 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
6100 err_ptr += sizeof (uint32_t);
6101 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
6102 err_ptr += sizeof (uint32_t);
6103 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
6104
6105 iwk_mac_access_exit(sc);
6106 }
6107 #endif /* DEBUG */
6108
6109 static int
iwk_run_state_config_ibss(ieee80211com_t * ic)6110 iwk_run_state_config_ibss(ieee80211com_t *ic)
6111 {
6112 iwk_sc_t *sc = (iwk_sc_t *)ic;
6113 ieee80211_node_t *in = ic->ic_bss;
6114 int i, err = IWK_SUCCESS;
6115
6116 mutex_enter(&sc->sc_ibss.node_tb_lock);
6117
6118 /*
6119 * clean all nodes in ibss node table assure be
6120 * consistent with hardware
6121 */
6122 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6123 sc->sc_ibss.ibss_node_tb[i].used = 0;
6124 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6125 0,
6126 sizeof (iwk_add_sta_t));
6127 }
6128
6129 sc->sc_ibss.node_number = 0;
6130
6131 mutex_exit(&sc->sc_ibss.node_tb_lock);
6132
6133 /*
6134 * configure RX and TX
6135 */
6136 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6137
6138 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6139 sc->sc_config.filter_flags =
6140 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6141 RXON_FILTER_DIS_DECRYPT_MSK |
6142 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6143
6144 sc->sc_config.assoc_id = 0;
6145
6146 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6147 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic,
6148 in->in_chan));
6149
6150 if (ic->ic_curmode == IEEE80211_MODE_11B) {
6151 sc->sc_config.cck_basic_rates = 0x03;
6152 sc->sc_config.ofdm_basic_rates = 0;
6153 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6154 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6155 sc->sc_config.cck_basic_rates = 0;
6156 sc->sc_config.ofdm_basic_rates = 0x15;
6157
6158 } else {
6159 sc->sc_config.cck_basic_rates = 0x0f;
6160 sc->sc_config.ofdm_basic_rates = 0xff;
6161 }
6162
6163 sc->sc_config.flags &=
6164 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6165 RXON_FLG_SHORT_SLOT_MSK);
6166
6167 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6168 sc->sc_config.flags |=
6169 LE_32(RXON_FLG_SHORT_SLOT_MSK);
6170 }
6171
6172 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6173 sc->sc_config.flags |=
6174 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6175 }
6176
6177 sc->sc_config.filter_flags |=
6178 LE_32(RXON_FILTER_ASSOC_MSK);
6179
6180 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6181 sizeof (iwk_rxon_cmd_t), 1);
6182 if (err != IWK_SUCCESS) {
6183 cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6184 "failed to update configuration.\n");
6185 return (err);
6186 }
6187
6188 return (err);
6189
6190 }
6191
6192 static int
iwk_run_state_config_sta(ieee80211com_t * ic)6193 iwk_run_state_config_sta(ieee80211com_t *ic)
6194 {
6195 iwk_sc_t *sc = (iwk_sc_t *)ic;
6196 ieee80211_node_t *in = ic->ic_bss;
6197 int err = IWK_SUCCESS;
6198
6199 /* update adapter's configuration */
6200 if (sc->sc_assoc_id != in->in_associd) {
6201 cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6202 "associate ID mismatch: expected %d, "
6203 "got %d\n",
6204 in->in_associd, sc->sc_assoc_id);
6205 }
6206 sc->sc_config.assoc_id = LE_16(in->in_associd & 0x3fff);
6207
6208 /*
6209 * short preamble/slot time are
6210 * negotiated when associating
6211 */
6212 sc->sc_config.flags &=
6213 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6214 RXON_FLG_SHORT_SLOT_MSK);
6215
6216 if (ic->ic_flags & IEEE80211_F_SHSLOT)
6217 sc->sc_config.flags |=
6218 LE_32(RXON_FLG_SHORT_SLOT_MSK);
6219
6220 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6221 sc->sc_config.flags |=
6222 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6223
6224 sc->sc_config.filter_flags |=
6225 LE_32(RXON_FILTER_ASSOC_MSK);
6226
6227 if (ic->ic_opmode != IEEE80211_M_STA)
6228 sc->sc_config.filter_flags |=
6229 LE_32(RXON_FILTER_BCON_AWARE_MSK);
6230
6231 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6232 " filter_flags %x\n",
6233 sc->sc_config.chan, sc->sc_config.flags,
6234 sc->sc_config.filter_flags));
6235
6236 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6237 sizeof (iwk_rxon_cmd_t), 1);
6238 if (err != IWK_SUCCESS) {
6239 cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6240 "failed to update configuration\n");
6241 return (err);
6242 }
6243
6244 return (err);
6245 }
6246
6247 static int
iwk_fast_recover(iwk_sc_t * sc)6248 iwk_fast_recover(iwk_sc_t *sc)
6249 {
6250 ieee80211com_t *ic = &sc->sc_ic;
6251 int err;
6252
6253 mutex_enter(&sc->sc_glock);
6254
6255 /* restore runtime configuration */
6256 bcopy(&sc->sc_config_save, &sc->sc_config,
6257 sizeof (sc->sc_config));
6258
6259 /* reset state to handle reassociations correctly */
6260 sc->sc_config.assoc_id = 0;
6261 sc->sc_config.filter_flags &=
6262 ~LE_32(RXON_FILTER_ASSOC_MSK);
6263
6264 if ((err = iwk_hw_set_before_auth(sc)) != 0) {
6265 cmn_err(CE_WARN, "iwk_fast_recover(): "
6266 "failed to setup authentication\n");
6267 mutex_exit(&sc->sc_glock);
6268 return (err);
6269 }
6270
6271 bcopy(&sc->sc_config_save, &sc->sc_config,
6272 sizeof (sc->sc_config));
6273
6274 /* update adapter's configuration */
6275 err = iwk_run_state_config_sta(ic);
6276 if (err != IWK_SUCCESS) {
6277 cmn_err(CE_WARN, "iwk_fast_recover(): "
6278 "failed to setup association\n");
6279 mutex_exit(&sc->sc_glock);
6280 return (err);
6281 }
6282
6283 /* obtain current temperature of chipset */
6284 sc->sc_tempera = iwk_curr_tempera(sc);
6285
6286 /*
6287 * make Tx power calibration to determine
6288 * the gains of DSP and radio
6289 */
6290 err = iwk_tx_power_calibration(sc);
6291 if (err) {
6292 cmn_err(CE_WARN, "iwk_fast_recover(): "
6293 "failed to set tx power table\n");
6294 mutex_exit(&sc->sc_glock);
6295 return (err);
6296 }
6297
6298 /*
6299 * make initialization for Receiver
6300 * sensitivity calibration
6301 */
6302 err = iwk_rx_sens_init(sc);
6303 if (err) {
6304 cmn_err(CE_WARN, "iwk_fast_recover(): "
6305 "failed to init RX sensitivity\n");
6306 mutex_exit(&sc->sc_glock);
6307 return (err);
6308 }
6309
6310 /* make initialization for Receiver gain balance */
6311 err = iwk_rxgain_diff_init(sc);
6312 if (err) {
6313 cmn_err(CE_WARN, "iwk_fast_recover(): "
6314 "failed to init phy calibration\n");
6315 mutex_exit(&sc->sc_glock);
6316 return (err);
6317
6318 }
6319 /* set LED on */
6320 iwk_set_led(sc, 2, 0, 1);
6321
6322 mutex_exit(&sc->sc_glock);
6323
6324 /* update keys */
6325 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
6326 for (int i = 0; i < IEEE80211_KEY_MAX; i++) {
6327 if (ic->ic_nw_keys[i].wk_keyix == IEEE80211_KEYIX_NONE)
6328 continue;
6329 err = iwk_key_set(ic, &ic->ic_nw_keys[i],
6330 ic->ic_bss->in_macaddr);
6331 /* failure */
6332 if (err == 0) {
6333 cmn_err(CE_WARN, "iwk_fast_recover(): "
6334 "failed to setup hardware keys\n");
6335 return (IWK_FAIL);
6336 }
6337 }
6338 }
6339
6340 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
6341
6342 /* start queue */
6343 IWK_DBG((IWK_DEBUG_FW, "iwk_fast_recover(): resume xmit\n"));
6344 mac_tx_update(ic->ic_mach);
6345
6346
6347 return (IWK_SUCCESS);
6348 }
6349
6350 static int
iwk_start_tx_beacon(ieee80211com_t * ic)6351 iwk_start_tx_beacon(ieee80211com_t *ic)
6352 {
6353 iwk_sc_t *sc = (iwk_sc_t *)ic;
6354 ieee80211_node_t *in = ic->ic_bss;
6355 int err = IWK_SUCCESS;
6356 iwk_tx_beacon_cmd_t *tx_beacon_p;
6357 uint16_t masks = 0;
6358 mblk_t *mp;
6359 int rate;
6360
6361 /*
6362 * allocate and transmit beacon frames
6363 */
6364 tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6365
6366 (void) memset(tx_beacon_p, 0,
6367 sizeof (iwk_tx_beacon_cmd_t));
6368 rate = 0;
6369 masks = 0;
6370
6371 tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6372 tx_beacon_p->config.stop_time.life_time =
6373 LE_32(0xffffffff);
6374
6375 if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6376 freemsg(sc->sc_ibss.ibss_beacon.mp);
6377 sc->sc_ibss.ibss_beacon.mp = NULL;
6378 }
6379
6380 sc->sc_ibss.ibss_beacon.mp =
6381 ieee80211_beacon_alloc(ic, in,
6382 &sc->sc_ibss.ibss_beacon.iwk_boff);
6383 if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6384 cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6385 "failed to get beacon frame.\n");
6386 return (IWK_FAIL);
6387 }
6388
6389 mp = sc->sc_ibss.ibss_beacon.mp;
6390
6391 ASSERT(mp->b_cont == NULL);
6392
6393 bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6394
6395 tx_beacon_p->config.len = LE_16((uint16_t)(MBLKL(mp)));
6396 sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6397 sizeof (iwk_tx_cmd_t) +
6398 4 + LE_16(tx_beacon_p->config.len);
6399
6400 /*
6401 * beacons are sent at 1M
6402 */
6403 rate = in->in_rates.ir_rates[0];
6404 rate &= IEEE80211_RATE_VAL;
6405
6406 if (2 == rate || 4 == rate || 11 == rate ||
6407 22 == rate) {
6408 masks |= RATE_MCS_CCK_MSK;
6409 }
6410
6411 masks |= RATE_MCS_ANT_B_MSK;
6412
6413 tx_beacon_p->config.rate.r.rate_n_flags =
6414 LE_32(iwk_rate_to_plcp(rate) | masks);
6415
6416
6417 tx_beacon_p->config.tx_flags =
6418 LE_32(TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6419
6420 if (ic->ic_bss->in_tstamp.tsf != 0) {
6421 sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6422 } else {
6423 if (ieee80211_beacon_update(ic, in,
6424 &sc->sc_ibss.ibss_beacon.iwk_boff,
6425 mp, 0)) {
6426 bcopy(mp->b_rptr,
6427 tx_beacon_p->bcon_frame,
6428 MBLKL(mp));
6429 }
6430
6431 err = iwk_cmd(sc, REPLY_TX_BEACON,
6432 tx_beacon_p,
6433 sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6434 1);
6435 if (err != IWK_SUCCESS) {
6436 cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6437 "failed to TX beacon.\n");
6438 return (err);
6439 }
6440
6441 sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6442 }
6443
6444 return (err);
6445 }
6446
6447 static int
iwk_clean_add_node_ibss(struct ieee80211com * ic,uint8_t addr[IEEE80211_ADDR_LEN],uint8_t * index2)6448 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6449 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6450 {
6451 iwk_sc_t *sc = (iwk_sc_t *)ic;
6452 uint8_t index;
6453 iwk_add_sta_t bc_node;
6454 iwk_link_quality_cmd_t bc_link_quality;
6455 iwk_link_quality_cmd_t link_quality;
6456 uint16_t bc_masks = 0;
6457 uint16_t masks = 0;
6458 int i, rate;
6459 struct ieee80211_rateset rs;
6460 iwk_ibss_node_t *ibss_node_p;
6461 int err = IWK_SUCCESS;
6462
6463 /*
6464 * find a location that is not
6465 * used in ibss node table
6466 */
6467 for (index = IWK_STA_ID;
6468 index < IWK_STATION_COUNT; index++) {
6469 if (!sc->sc_ibss.ibss_node_tb[index].used) {
6470 break;
6471 }
6472 }
6473
6474 /*
6475 * if have too many nodes in hardware, clean up
6476 */
6477 if (index < IWK_BROADCAST_ID &&
6478 sc->sc_ibss.node_number >= 25) {
6479 if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6480 NULL, 0, 1) != IWK_SUCCESS) {
6481 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6482 "failed to remove all nodes in hardware\n");
6483 return (IWK_FAIL);
6484 }
6485
6486 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6487 sc->sc_ibss.ibss_node_tb[i].used = 0;
6488 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6489 0, sizeof (iwk_add_sta_t));
6490 }
6491
6492 sc->sc_ibss.node_number = 0;
6493
6494 /*
6495 * add broadcast node so that we
6496 * can send broadcast frame
6497 */
6498 (void) memset(&bc_node, 0, sizeof (bc_node));
6499 (void) memset(bc_node.bssid, 0xff, 6);
6500 bc_node.id = IWK_BROADCAST_ID;
6501
6502 err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6503 if (err != IWK_SUCCESS) {
6504 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6505 "failed to add broadcast node\n");
6506 return (err);
6507 }
6508
6509 /* TX_LINK_QUALITY cmd */
6510 (void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6511 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6512 bc_masks |= RATE_MCS_CCK_MSK;
6513 bc_masks |= RATE_MCS_ANT_B_MSK;
6514 bc_masks &= ~RATE_MCS_ANT_A_MSK;
6515 bc_link_quality.rate_n_flags[i] =
6516 LE_32(iwk_rate_to_plcp(2) | bc_masks);
6517 }
6518
6519 bc_link_quality.general_params.single_stream_ant_msk = 2;
6520 bc_link_quality.general_params.dual_stream_ant_msk = 3;
6521 bc_link_quality.agg_params.agg_dis_start_th = 3;
6522 bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6523 bc_link_quality.sta_id = IWK_BROADCAST_ID;
6524
6525 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6526 &bc_link_quality, sizeof (bc_link_quality), 1);
6527 if (err != IWK_SUCCESS) {
6528 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6529 "failed to config link quality table\n");
6530 return (err);
6531 }
6532 }
6533
6534 if (index >= IWK_BROADCAST_ID) {
6535 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6536 "the count of node in hardware is too much\n");
6537 return (IWK_FAIL);
6538 }
6539
6540 /*
6541 * add a node into hardware
6542 */
6543 ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6544
6545 ibss_node_p->used = 1;
6546
6547 (void) memset(&ibss_node_p->node, 0,
6548 sizeof (iwk_add_sta_t));
6549
6550 IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6551 ibss_node_p->node.id = index;
6552 ibss_node_p->node.control = 0;
6553 ibss_node_p->node.flags = 0;
6554
6555 err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6556 sizeof (iwk_add_sta_t), 1);
6557 if (err != IWK_SUCCESS) {
6558 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6559 "failed to add IBSS node\n");
6560 ibss_node_p->used = 0;
6561 (void) memset(&ibss_node_p->node, 0,
6562 sizeof (iwk_add_sta_t));
6563 return (err);
6564 }
6565
6566 sc->sc_ibss.node_number++;
6567
6568 (void) memset(&link_quality, 0, sizeof (link_quality));
6569
6570 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6571 ic->ic_curchan)];
6572
6573 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6574 if (i < rs.ir_nrates) {
6575 rate = rs.
6576 ir_rates[rs.ir_nrates - i];
6577 } else {
6578 rate = 2;
6579 }
6580
6581 if (2 == rate || 4 == rate ||
6582 11 == rate || 22 == rate) {
6583 masks |= RATE_MCS_CCK_MSK;
6584 }
6585
6586 masks |= RATE_MCS_ANT_B_MSK;
6587 masks &= ~RATE_MCS_ANT_A_MSK;
6588
6589 link_quality.rate_n_flags[i] =
6590 LE_32(iwk_rate_to_plcp(rate) | masks);
6591 }
6592
6593 link_quality.general_params.single_stream_ant_msk = 2;
6594 link_quality.general_params.dual_stream_ant_msk = 3;
6595 link_quality.agg_params.agg_dis_start_th = 3;
6596 link_quality.agg_params.agg_time_limit = LE_16(4000);
6597 link_quality.sta_id = ibss_node_p->node.id;
6598
6599 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6600 &link_quality, sizeof (link_quality), 1);
6601 if (err != IWK_SUCCESS) {
6602 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6603 "failed to set up TX link quality\n");
6604 ibss_node_p->used = 0;
6605 (void) memset(ibss_node_p->node.bssid, 0, 6);
6606 return (err);
6607 }
6608
6609 *index2 = index;
6610
6611 return (err);
6612 }
6613