xref: /titanic_51/usr/src/uts/common/io/iwk/iwk2.c (revision 8793b36b40d14ad0a0fecc97738dc118a928f46c)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 
312 static void	iwk_write_event_log(iwk_sc_t *);
313 static void	iwk_write_error_log(iwk_sc_t *);
314 
315 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
316 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
317 
318 /*
319  * GLD specific operations
320  */
321 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
322 static int	iwk_m_start(void *arg);
323 static void	iwk_m_stop(void *arg);
324 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
325 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
326 static int	iwk_m_promisc(void *arg, boolean_t on);
327 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
328 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
329 
330 static void	iwk_destroy_locks(iwk_sc_t *sc);
331 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
332 static void	iwk_thread(iwk_sc_t *sc);
333 
334 /*
335  * Supported rates for 802.11b/g modes (in 500Kbps unit).
336  * 11a and 11n support will be added later.
337  */
338 static const struct ieee80211_rateset iwk_rateset_11b =
339 	{ 4, { 2, 4, 11, 22 } };
340 
341 static const struct ieee80211_rateset iwk_rateset_11g =
342 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
343 
344 /*
345  * For mfthread only
346  */
347 extern pri_t minclsyspri;
348 
349 #define	DRV_NAME_4965	"iwk"
350 
351 /*
352  * Module Loading Data & Entry Points
353  */
354 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
355     iwk_detach, nodev, NULL, D_MP, NULL);
356 
357 static struct modldrv iwk_modldrv = {
358 	&mod_driverops,
359 	"Intel(R) 4965AGN driver(N)",
360 	&iwk_devops
361 };
362 
363 static struct modlinkage iwk_modlinkage = {
364 	MODREV_1,
365 	&iwk_modldrv,
366 	NULL
367 };
368 
369 int
370 _init(void)
371 {
372 	int	status;
373 
374 	status = ddi_soft_state_init(&iwk_soft_state_p,
375 	    sizeof (iwk_sc_t), 1);
376 	if (status != DDI_SUCCESS)
377 		return (status);
378 
379 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
380 	status = mod_install(&iwk_modlinkage);
381 	if (status != DDI_SUCCESS) {
382 		mac_fini_ops(&iwk_devops);
383 		ddi_soft_state_fini(&iwk_soft_state_p);
384 	}
385 
386 	return (status);
387 }
388 
389 int
390 _fini(void)
391 {
392 	int status;
393 
394 	status = mod_remove(&iwk_modlinkage);
395 	if (status == DDI_SUCCESS) {
396 		mac_fini_ops(&iwk_devops);
397 		ddi_soft_state_fini(&iwk_soft_state_p);
398 	}
399 
400 	return (status);
401 }
402 
403 int
404 _info(struct modinfo *mip)
405 {
406 	return (mod_info(&iwk_modlinkage, mip));
407 }
408 
409 /*
410  * Mac Call Back entries
411  */
412 mac_callbacks_t	iwk_m_callbacks = {
413 	MC_IOCTL,
414 	iwk_m_stat,
415 	iwk_m_start,
416 	iwk_m_stop,
417 	iwk_m_promisc,
418 	iwk_m_multicst,
419 	iwk_m_unicst,
420 	iwk_m_tx,
421 	NULL,
422 	iwk_m_ioctl
423 };
424 
425 #ifdef DEBUG
426 void
427 iwk_dbg(uint32_t flags, const char *fmt, ...)
428 {
429 	va_list	ap;
430 
431 	if (flags & iwk_dbg_flags) {
432 		va_start(ap, fmt);
433 		vcmn_err(CE_NOTE, fmt, ap);
434 		va_end(ap);
435 	}
436 }
437 #endif
438 
439 /*
440  * device operations
441  */
442 int
443 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
444 {
445 	iwk_sc_t		*sc;
446 	ieee80211com_t	*ic;
447 	int			instance, err, i;
448 	char			strbuf[32];
449 	wifi_data_t		wd = { 0 };
450 	mac_register_t		*macp;
451 
452 	int			intr_type;
453 	int			intr_count;
454 	int			intr_actual;
455 
456 	switch (cmd) {
457 	case DDI_ATTACH:
458 		break;
459 	case DDI_RESUME:
460 		sc = ddi_get_soft_state(iwk_soft_state_p,
461 		    ddi_get_instance(dip));
462 		ASSERT(sc != NULL);
463 		mutex_enter(&sc->sc_glock);
464 		sc->sc_flags &= ~IWK_F_SUSPEND;
465 		mutex_exit(&sc->sc_glock);
466 		if (sc->sc_flags & IWK_F_RUNNING) {
467 			(void) iwk_init(sc);
468 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
469 		}
470 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
471 		return (DDI_SUCCESS);
472 	default:
473 		err = DDI_FAILURE;
474 		goto attach_fail1;
475 	}
476 
477 	instance = ddi_get_instance(dip);
478 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
479 	if (err != DDI_SUCCESS) {
480 		cmn_err(CE_WARN,
481 		    "iwk_attach(): failed to allocate soft state\n");
482 		goto attach_fail1;
483 	}
484 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
485 	sc->sc_dip = dip;
486 
487 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
488 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
489 	if (err != DDI_SUCCESS) {
490 		cmn_err(CE_WARN,
491 		    "iwk_attach(): failed to map config spaces regs\n");
492 		goto attach_fail2;
493 	}
494 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
495 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
496 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
497 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
498 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
499 	if (!sc->sc_clsz)
500 		sc->sc_clsz = 16;
501 	sc->sc_clsz = (sc->sc_clsz << 2);
502 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
503 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
504 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
505 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
506 	/*
507 	 * Map operating registers
508 	 */
509 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
510 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
511 	if (err != DDI_SUCCESS) {
512 		cmn_err(CE_WARN,
513 		    "iwk_attach(): failed to map device regs\n");
514 		goto attach_fail2a;
515 	}
516 
517 	err = ddi_intr_get_supported_types(dip, &intr_type);
518 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
519 		cmn_err(CE_WARN, "iwk_attach(): "
520 		    "Fixed type interrupt is not supported\n");
521 		goto attach_fail_intr_a;
522 	}
523 
524 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
525 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
526 		cmn_err(CE_WARN, "iwk_attach(): "
527 		    "No fixed interrupts\n");
528 		goto attach_fail_intr_a;
529 	}
530 
531 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
532 
533 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
534 	    intr_count, &intr_actual, 0);
535 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
536 		cmn_err(CE_WARN, "iwk_attach(): "
537 		    "ddi_intr_alloc() failed 0x%x\n", err);
538 		goto attach_fail_intr_b;
539 	}
540 
541 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
542 	if (err != DDI_SUCCESS) {
543 		cmn_err(CE_WARN, "iwk_attach(): "
544 		    "ddi_intr_get_pri() failed 0x%x\n", err);
545 		goto attach_fail_intr_c;
546 	}
547 
548 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
549 	    DDI_INTR_PRI(sc->sc_intr_pri));
550 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
551 	    DDI_INTR_PRI(sc->sc_intr_pri));
552 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
553 	    DDI_INTR_PRI(sc->sc_intr_pri));
554 
555 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
556 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
557 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
558 	/*
559 	 * initialize the mfthread
560 	 */
561 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
562 	sc->sc_mf_thread = NULL;
563 	sc->sc_mf_thread_switch = 0;
564 
565 	/*
566 	 * Allocate shared page.
567 	 */
568 	err = iwk_alloc_shared(sc);
569 	if (err != DDI_SUCCESS) {
570 		cmn_err(CE_WARN, "iwk_attach(): "
571 		    "failed to allocate shared page\n");
572 		goto attach_fail3;
573 	}
574 
575 	/*
576 	 * Allocate keep warm page.
577 	 */
578 	err = iwk_alloc_kw(sc);
579 	if (err != DDI_SUCCESS) {
580 		cmn_err(CE_WARN, "iwk_attach(): "
581 		    "failed to allocate keep warm page\n");
582 		goto attach_fail3a;
583 	}
584 
585 	/*
586 	 * Do some necessary hardware initializations.
587 	 */
588 	err = iwk_preinit(sc);
589 	if (err != DDI_SUCCESS) {
590 		cmn_err(CE_WARN, "iwk_attach(): "
591 		    "failed to init hardware\n");
592 		goto attach_fail4;
593 	}
594 
595 	/* initialize EEPROM */
596 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
597 	if (err != 0) {
598 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
599 		goto attach_fail4;
600 	}
601 
602 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
603 		IWK_DBG((IWK_DEBUG_EEPROM, "older EEPROM detected"));
604 		goto attach_fail4;
605 	}
606 
607 	iwk_get_mac_from_eep(sc);
608 
609 	err = iwk_ring_init(sc);
610 	if (err != DDI_SUCCESS) {
611 		cmn_err(CE_WARN, "iwk_attach(): "
612 		    "failed to allocate and initialize ring\n");
613 		goto attach_fail4;
614 	}
615 
616 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
617 
618 	err = iwk_alloc_fw_dma(sc);
619 	if (err != DDI_SUCCESS) {
620 		cmn_err(CE_WARN, "iwk_attach(): "
621 		    "failed to allocate firmware dma\n");
622 		goto attach_fail5;
623 	}
624 
625 	/*
626 	 * Initialize the wifi part, which will be used by
627 	 * generic layer
628 	 */
629 	ic = &sc->sc_ic;
630 	ic->ic_phytype  = IEEE80211_T_OFDM;
631 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
632 	ic->ic_state    = IEEE80211_S_INIT;
633 	ic->ic_maxrssi  = 100; /* experimental number */
634 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
635 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
636 	/*
637 	 * use software WEP and TKIP, hardware CCMP;
638 	 */
639 	ic->ic_caps |= IEEE80211_C_AES_CCM;
640 	/*
641 	 * Support WPA/WPA2
642 	 */
643 	ic->ic_caps |= IEEE80211_C_WPA;
644 
645 	/* set supported .11b and .11g rates */
646 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
647 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
648 
649 	/* set supported .11b and .11g channels (1 through 14) */
650 	for (i = 1; i <= 14; i++) {
651 		ic->ic_sup_channels[i].ich_freq =
652 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
653 		ic->ic_sup_channels[i].ich_flags =
654 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
655 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
656 	}
657 
658 	ic->ic_xmit = iwk_send;
659 	/*
660 	 * init Wifi layer
661 	 */
662 	ieee80211_attach(ic);
663 
664 	/*
665 	 * different instance has different WPA door
666 	 */
667 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
668 	    ddi_driver_name(dip),
669 	    ddi_get_instance(dip));
670 
671 	/*
672 	 * Override 80211 default routines
673 	 */
674 	sc->sc_newstate = ic->ic_newstate;
675 	ic->ic_newstate = iwk_newstate;
676 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
677 	ic->ic_node_alloc = iwk_node_alloc;
678 	ic->ic_node_free = iwk_node_free;
679 	ic->ic_crypto.cs_key_set = iwk_key_set;
680 	ieee80211_media_init(ic);
681 	/*
682 	 * initialize default tx key
683 	 */
684 	ic->ic_def_txkey = 0;
685 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
686 	    iwk_rx_softintr, (caddr_t)sc);
687 	if (err != DDI_SUCCESS) {
688 		cmn_err(CE_WARN, "iwk_attach(): "
689 		    "add soft interrupt failed\n");
690 		goto attach_fail7;
691 	}
692 
693 	/*
694 	 * Add the interrupt handler
695 	 */
696 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
697 	    (caddr_t)sc, NULL);
698 	if (err != DDI_SUCCESS) {
699 		cmn_err(CE_WARN, "iwk_attach(): "
700 		    "ddi_intr_add_handle() failed\n");
701 		goto attach_fail8;
702 	}
703 
704 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
705 	if (err != DDI_SUCCESS) {
706 		cmn_err(CE_WARN, "iwk_attach(): "
707 		    "ddi_intr_enable() failed\n");
708 		goto attach_fail_intr_d;
709 	}
710 
711 	/*
712 	 * Initialize pointer to device specific functions
713 	 */
714 	wd.wd_secalloc = WIFI_SEC_NONE;
715 	wd.wd_opmode = ic->ic_opmode;
716 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
717 
718 	macp = mac_alloc(MAC_VERSION);
719 	if (err != DDI_SUCCESS) {
720 		cmn_err(CE_WARN,
721 		    "iwk_attach(): failed to do mac_alloc()\n");
722 		goto attach_fail9;
723 	}
724 
725 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
726 	macp->m_driver		= sc;
727 	macp->m_dip		= dip;
728 	macp->m_src_addr	= ic->ic_macaddr;
729 	macp->m_callbacks	= &iwk_m_callbacks;
730 	macp->m_min_sdu		= 0;
731 	macp->m_max_sdu		= IEEE80211_MTU;
732 	macp->m_pdata		= &wd;
733 	macp->m_pdata_size	= sizeof (wd);
734 
735 	/*
736 	 * Register the macp to mac
737 	 */
738 	err = mac_register(macp, &ic->ic_mach);
739 	mac_free(macp);
740 	if (err != DDI_SUCCESS) {
741 		cmn_err(CE_WARN,
742 		    "iwk_attach(): failed to do mac_register()\n");
743 		goto attach_fail9;
744 	}
745 
746 	/*
747 	 * Create minor node of type DDI_NT_NET_WIFI
748 	 */
749 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
750 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
751 	    instance + 1, DDI_NT_NET_WIFI, 0);
752 	if (err != DDI_SUCCESS)
753 		cmn_err(CE_WARN,
754 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
755 
756 	/*
757 	 * Notify link is down now
758 	 */
759 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
760 
761 	/*
762 	 * create the mf thread to handle the link status,
763 	 * recovery fatal error, etc.
764 	 */
765 	sc->sc_mf_thread_switch = 1;
766 	if (sc->sc_mf_thread == NULL)
767 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
768 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
769 
770 	sc->sc_flags |= IWK_F_ATTACHED;
771 
772 	return (DDI_SUCCESS);
773 attach_fail9:
774 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
775 attach_fail_intr_d:
776 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
777 
778 attach_fail8:
779 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
780 	sc->sc_soft_hdl = NULL;
781 attach_fail7:
782 	ieee80211_detach(ic);
783 attach_fail6:
784 	iwk_free_fw_dma(sc);
785 attach_fail5:
786 	iwk_ring_free(sc);
787 attach_fail4:
788 	iwk_free_kw(sc);
789 attach_fail3a:
790 	iwk_free_shared(sc);
791 attach_fail3:
792 	iwk_destroy_locks(sc);
793 attach_fail_intr_c:
794 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
795 attach_fail_intr_b:
796 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
797 attach_fail_intr_a:
798 	ddi_regs_map_free(&sc->sc_handle);
799 attach_fail2a:
800 	ddi_regs_map_free(&sc->sc_cfg_handle);
801 attach_fail2:
802 	ddi_soft_state_free(iwk_soft_state_p, instance);
803 attach_fail1:
804 	return (err);
805 }
806 
807 int
808 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
809 {
810 	iwk_sc_t	*sc;
811 	int err;
812 
813 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
814 	ASSERT(sc != NULL);
815 
816 	switch (cmd) {
817 	case DDI_DETACH:
818 		break;
819 	case DDI_SUSPEND:
820 		if (sc->sc_flags & IWK_F_RUNNING) {
821 			iwk_stop(sc);
822 		}
823 		mutex_enter(&sc->sc_glock);
824 		sc->sc_flags |= IWK_F_SUSPEND;
825 		mutex_exit(&sc->sc_glock);
826 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
827 		return (DDI_SUCCESS);
828 	default:
829 		return (DDI_FAILURE);
830 	}
831 
832 	if (!(sc->sc_flags & IWK_F_ATTACHED))
833 		return (DDI_FAILURE);
834 
835 	err = mac_disable(sc->sc_ic.ic_mach);
836 	if (err != DDI_SUCCESS)
837 		return (err);
838 
839 	/*
840 	 * Destroy the mf_thread
841 	 */
842 	mutex_enter(&sc->sc_mt_lock);
843 	sc->sc_mf_thread_switch = 0;
844 	while (sc->sc_mf_thread != NULL) {
845 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
846 			break;
847 	}
848 	mutex_exit(&sc->sc_mt_lock);
849 
850 	iwk_stop(sc);
851 	DELAY(500000);
852 
853 	/*
854 	 * Unregiste from the MAC layer subsystem
855 	 */
856 	(void) mac_unregister(sc->sc_ic.ic_mach);
857 
858 	mutex_enter(&sc->sc_glock);
859 	iwk_free_fw_dma(sc);
860 	iwk_ring_free(sc);
861 	iwk_free_kw(sc);
862 	iwk_free_shared(sc);
863 	mutex_exit(&sc->sc_glock);
864 
865 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
866 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
867 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
868 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
869 
870 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
871 	sc->sc_soft_hdl = NULL;
872 
873 	/*
874 	 * detach ieee80211
875 	 */
876 	ieee80211_detach(&sc->sc_ic);
877 
878 	iwk_destroy_locks(sc);
879 
880 	ddi_regs_map_free(&sc->sc_handle);
881 	ddi_regs_map_free(&sc->sc_cfg_handle);
882 	ddi_remove_minor_node(dip, NULL);
883 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
884 
885 	return (DDI_SUCCESS);
886 }
887 
888 static void
889 iwk_destroy_locks(iwk_sc_t *sc)
890 {
891 	cv_destroy(&sc->sc_mt_cv);
892 	mutex_destroy(&sc->sc_mt_lock);
893 	cv_destroy(&sc->sc_tx_cv);
894 	cv_destroy(&sc->sc_cmd_cv);
895 	cv_destroy(&sc->sc_fw_cv);
896 	mutex_destroy(&sc->sc_tx_lock);
897 	mutex_destroy(&sc->sc_glock);
898 }
899 
900 /*
901  * Allocate an area of memory and a DMA handle for accessing it
902  */
903 static int
904 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
905     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
906     uint_t dma_flags, iwk_dma_t *dma_p)
907 {
908 	caddr_t vaddr;
909 	int err;
910 
911 	/*
912 	 * Allocate handle
913 	 */
914 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
915 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
916 	if (err != DDI_SUCCESS) {
917 		dma_p->dma_hdl = NULL;
918 		return (DDI_FAILURE);
919 	}
920 
921 	/*
922 	 * Allocate memory
923 	 */
924 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
925 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
926 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
927 	if (err != DDI_SUCCESS) {
928 		ddi_dma_free_handle(&dma_p->dma_hdl);
929 		dma_p->dma_hdl = NULL;
930 		dma_p->acc_hdl = NULL;
931 		return (DDI_FAILURE);
932 	}
933 
934 	/*
935 	 * Bind the two together
936 	 */
937 	dma_p->mem_va = vaddr;
938 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
939 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
940 	    &dma_p->cookie, &dma_p->ncookies);
941 	if (err != DDI_DMA_MAPPED) {
942 		ddi_dma_mem_free(&dma_p->acc_hdl);
943 		ddi_dma_free_handle(&dma_p->dma_hdl);
944 		dma_p->acc_hdl = NULL;
945 		dma_p->dma_hdl = NULL;
946 		return (DDI_FAILURE);
947 	}
948 
949 	dma_p->nslots = ~0U;
950 	dma_p->size = ~0U;
951 	dma_p->token = ~0U;
952 	dma_p->offset = 0;
953 	return (DDI_SUCCESS);
954 }
955 
956 /*
957  * Free one allocated area of DMAable memory
958  */
959 static void
960 iwk_free_dma_mem(iwk_dma_t *dma_p)
961 {
962 	if (dma_p->dma_hdl != NULL) {
963 		if (dma_p->ncookies) {
964 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
965 			dma_p->ncookies = 0;
966 		}
967 		ddi_dma_free_handle(&dma_p->dma_hdl);
968 		dma_p->dma_hdl = NULL;
969 	}
970 
971 	if (dma_p->acc_hdl != NULL) {
972 		ddi_dma_mem_free(&dma_p->acc_hdl);
973 		dma_p->acc_hdl = NULL;
974 	}
975 }
976 
977 /*
978  *
979  */
980 static int
981 iwk_alloc_fw_dma(iwk_sc_t *sc)
982 {
983 	int err = DDI_SUCCESS;
984 	iwk_dma_t *dma_p;
985 	char *t;
986 
987 	/*
988 	 * firmware image layout:
989 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
990 	 */
991 	t = (char *)(sc->sc_hdr + 1);
992 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
993 	    &fw_dma_attr, &iwk_dma_accattr,
994 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
995 	    &sc->sc_dma_fw_text);
996 	dma_p = &sc->sc_dma_fw_text;
997 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
998 	    dma_p->ncookies, dma_p->cookie.dmac_address,
999 	    dma_p->cookie.dmac_size));
1000 	if (err != DDI_SUCCESS) {
1001 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1002 		    " text dma memory");
1003 		goto fail;
1004 	}
1005 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1006 
1007 	t += LE_32(sc->sc_hdr->textsz);
1008 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1009 	    &fw_dma_attr, &iwk_dma_accattr,
1010 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1011 	    &sc->sc_dma_fw_data);
1012 	dma_p = &sc->sc_dma_fw_data;
1013 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1014 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1015 	    dma_p->cookie.dmac_size));
1016 	if (err != DDI_SUCCESS) {
1017 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1018 		    " data dma memory");
1019 		goto fail;
1020 	}
1021 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1022 
1023 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1024 	    &fw_dma_attr, &iwk_dma_accattr,
1025 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1026 	    &sc->sc_dma_fw_data_bak);
1027 	dma_p = &sc->sc_dma_fw_data_bak;
1028 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1029 	    "size:%lx]\n",
1030 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1031 	    dma_p->cookie.dmac_size));
1032 	if (err != DDI_SUCCESS) {
1033 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1034 		    " data bakeup dma memory");
1035 		goto fail;
1036 	}
1037 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1038 
1039 	t += LE_32(sc->sc_hdr->datasz);
1040 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1041 	    &fw_dma_attr, &iwk_dma_accattr,
1042 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1043 	    &sc->sc_dma_fw_init_text);
1044 	dma_p = &sc->sc_dma_fw_init_text;
1045 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1046 	    "size:%lx]\n",
1047 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1048 	    dma_p->cookie.dmac_size));
1049 	if (err != DDI_SUCCESS) {
1050 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1051 		    "init text dma memory");
1052 		goto fail;
1053 	}
1054 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1055 
1056 	t += LE_32(sc->sc_hdr->init_textsz);
1057 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1058 	    &fw_dma_attr, &iwk_dma_accattr,
1059 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1060 	    &sc->sc_dma_fw_init_data);
1061 	dma_p = &sc->sc_dma_fw_init_data;
1062 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1063 	    "size:%lx]\n",
1064 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1065 	    dma_p->cookie.dmac_size));
1066 	if (err != DDI_SUCCESS) {
1067 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1068 		    "init data dma memory");
1069 		goto fail;
1070 	}
1071 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1072 
1073 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1074 fail:
1075 	return (err);
1076 }
1077 
1078 static void
1079 iwk_free_fw_dma(iwk_sc_t *sc)
1080 {
1081 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1082 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1083 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1084 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1085 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1086 }
1087 
1088 /*
1089  * Allocate a shared page between host and NIC.
1090  */
1091 static int
1092 iwk_alloc_shared(iwk_sc_t *sc)
1093 {
1094 	iwk_dma_t *dma_p;
1095 	int err = DDI_SUCCESS;
1096 
1097 	/* must be aligned on a 4K-page boundary */
1098 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1099 	    &sh_dma_attr, &iwk_dma_accattr,
1100 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1101 	    &sc->sc_dma_sh);
1102 	if (err != DDI_SUCCESS)
1103 		goto fail;
1104 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1105 
1106 	dma_p = &sc->sc_dma_sh;
1107 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1108 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1109 	    dma_p->cookie.dmac_size));
1110 
1111 	return (err);
1112 fail:
1113 	iwk_free_shared(sc);
1114 	return (err);
1115 }
1116 
1117 static void
1118 iwk_free_shared(iwk_sc_t *sc)
1119 {
1120 	iwk_free_dma_mem(&sc->sc_dma_sh);
1121 }
1122 
1123 /*
1124  * Allocate a keep warm page.
1125  */
1126 static int
1127 iwk_alloc_kw(iwk_sc_t *sc)
1128 {
1129 	iwk_dma_t *dma_p;
1130 	int err = DDI_SUCCESS;
1131 
1132 	/* must be aligned on a 4K-page boundary */
1133 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1134 	    &kw_dma_attr, &iwk_dma_accattr,
1135 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1136 	    &sc->sc_dma_kw);
1137 	if (err != DDI_SUCCESS)
1138 		goto fail;
1139 
1140 	dma_p = &sc->sc_dma_kw;
1141 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1142 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1143 	    dma_p->cookie.dmac_size));
1144 
1145 	return (err);
1146 fail:
1147 	iwk_free_kw(sc);
1148 	return (err);
1149 }
1150 
1151 static void
1152 iwk_free_kw(iwk_sc_t *sc)
1153 {
1154 	iwk_free_dma_mem(&sc->sc_dma_kw);
1155 }
1156 
1157 static int
1158 iwk_alloc_rx_ring(iwk_sc_t *sc)
1159 {
1160 	iwk_rx_ring_t *ring;
1161 	iwk_rx_data_t *data;
1162 	iwk_dma_t *dma_p;
1163 	int i, err = DDI_SUCCESS;
1164 
1165 	ring = &sc->sc_rxq;
1166 	ring->cur = 0;
1167 
1168 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1169 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1170 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1171 	    &ring->dma_desc);
1172 	if (err != DDI_SUCCESS) {
1173 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring desc "
1174 		    "failed\n"));
1175 		goto fail;
1176 	}
1177 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1178 	dma_p = &ring->dma_desc;
1179 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1180 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1181 	    dma_p->cookie.dmac_size));
1182 
1183 	/*
1184 	 * Allocate Rx buffers.
1185 	 */
1186 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1187 		data = &ring->data[i];
1188 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1189 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1190 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1191 		    &data->dma_data);
1192 		if (err != DDI_SUCCESS) {
1193 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring "
1194 			    "buf[%d] failed\n", i));
1195 			goto fail;
1196 		}
1197 		/*
1198 		 * the physical address bit [8-36] are used,
1199 		 * instead of bit [0-31] in 3945.
1200 		 */
1201 		ring->desc[i] = LE_32((uint32_t)
1202 		    (data->dma_data.cookie.dmac_address >> 8));
1203 	}
1204 	dma_p = &ring->data[0].dma_data;
1205 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1206 	    "size:%lx]\n",
1207 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1208 	    dma_p->cookie.dmac_size));
1209 
1210 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1211 
1212 	return (err);
1213 
1214 fail:
1215 	iwk_free_rx_ring(sc);
1216 	return (err);
1217 }
1218 
1219 static void
1220 iwk_reset_rx_ring(iwk_sc_t *sc)
1221 {
1222 	int n;
1223 
1224 	iwk_mac_access_enter(sc);
1225 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1226 	for (n = 0; n < 2000; n++) {
1227 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1228 			break;
1229 		DELAY(1000);
1230 	}
1231 #ifdef DEBUG
1232 	if (n == 2000)
1233 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1234 #endif
1235 	iwk_mac_access_exit(sc);
1236 
1237 	sc->sc_rxq.cur = 0;
1238 }
1239 
1240 static void
1241 iwk_free_rx_ring(iwk_sc_t *sc)
1242 {
1243 	int i;
1244 
1245 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1246 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1247 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1248 			    DDI_DMA_SYNC_FORCPU);
1249 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1250 	}
1251 
1252 	if (sc->sc_rxq.dma_desc.dma_hdl)
1253 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1254 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1255 }
1256 
1257 static int
1258 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1259     int slots, int qid)
1260 {
1261 	iwk_tx_data_t *data;
1262 	iwk_tx_desc_t *desc_h;
1263 	uint32_t paddr_desc_h;
1264 	iwk_cmd_t *cmd_h;
1265 	uint32_t paddr_cmd_h;
1266 	iwk_dma_t *dma_p;
1267 	int i, err = DDI_SUCCESS;
1268 
1269 	ring->qid = qid;
1270 	ring->count = TFD_QUEUE_SIZE_MAX;
1271 	ring->window = slots;
1272 	ring->queued = 0;
1273 	ring->cur = 0;
1274 
1275 	err = iwk_alloc_dma_mem(sc,
1276 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1277 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1278 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1279 	    &ring->dma_desc);
1280 	if (err != DDI_SUCCESS) {
1281 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring desc[%d]"
1282 		    " failed\n", qid));
1283 		goto fail;
1284 	}
1285 	dma_p = &ring->dma_desc;
1286 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1287 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1288 	    dma_p->cookie.dmac_size));
1289 
1290 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1291 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1292 
1293 	err = iwk_alloc_dma_mem(sc,
1294 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1295 	    &cmd_dma_attr, &iwk_dma_accattr,
1296 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1297 	    &ring->dma_cmd);
1298 	if (err != DDI_SUCCESS) {
1299 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring cmd[%d]"
1300 		    " failed\n", qid));
1301 		goto fail;
1302 	}
1303 	dma_p = &ring->dma_cmd;
1304 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1305 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1306 	    dma_p->cookie.dmac_size));
1307 
1308 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1309 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1310 
1311 	/*
1312 	 * Allocate Tx buffers.
1313 	 */
1314 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1315 	    KM_NOSLEEP);
1316 	if (ring->data == NULL) {
1317 		IWK_DBG((IWK_DEBUG_DMA, "could not allocate "
1318 		    "tx data slots\n"));
1319 		goto fail;
1320 	}
1321 
1322 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1323 		data = &ring->data[i];
1324 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1325 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1326 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1327 		    &data->dma_data);
1328 		if (err != DDI_SUCCESS) {
1329 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx "
1330 			    "ring buf[%d] failed\n", i));
1331 			goto fail;
1332 		}
1333 
1334 		data->desc = desc_h + i;
1335 		data->paddr_desc = paddr_desc_h +
1336 		    _PTRDIFF(data->desc, desc_h);
1337 		data->cmd = cmd_h +  i; /* (i % slots); */
1338 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1339 		data->paddr_cmd = paddr_cmd_h +
1340 		    _PTRDIFF(data->cmd, cmd_h);
1341 	}
1342 	dma_p = &ring->data[0].dma_data;
1343 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1344 	    "size:%lx]\n",
1345 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1346 	    dma_p->cookie.dmac_size));
1347 
1348 	return (err);
1349 
1350 fail:
1351 	if (ring->data)
1352 		kmem_free(ring->data,
1353 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1354 	iwk_free_tx_ring(sc, ring);
1355 	return (err);
1356 }
1357 
1358 static void
1359 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1360 {
1361 	iwk_tx_data_t *data;
1362 	int i, n;
1363 
1364 	iwk_mac_access_enter(sc);
1365 
1366 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1367 	for (n = 0; n < 200; n++) {
1368 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1369 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1370 			break;
1371 		DELAY(10);
1372 	}
1373 #ifdef DEBUG
1374 	if (n == 200 && iwk_dbg_flags > 0) {
1375 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1376 		    ring->qid));
1377 	}
1378 #endif
1379 	iwk_mac_access_exit(sc);
1380 
1381 	for (i = 0; i < ring->count; i++) {
1382 		data = &ring->data[i];
1383 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1384 	}
1385 
1386 	ring->queued = 0;
1387 	ring->cur = 0;
1388 }
1389 
1390 /*ARGSUSED*/
1391 static void
1392 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1393 {
1394 	int i;
1395 
1396 	if (ring->dma_desc.dma_hdl != NULL)
1397 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1398 	iwk_free_dma_mem(&ring->dma_desc);
1399 
1400 	if (ring->dma_cmd.dma_hdl != NULL)
1401 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1402 	iwk_free_dma_mem(&ring->dma_cmd);
1403 
1404 	if (ring->data != NULL) {
1405 		for (i = 0; i < ring->count; i++) {
1406 			if (ring->data[i].dma_data.dma_hdl)
1407 				IWK_DMA_SYNC(ring->data[i].dma_data,
1408 				    DDI_DMA_SYNC_FORDEV);
1409 			iwk_free_dma_mem(&ring->data[i].dma_data);
1410 		}
1411 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1412 	}
1413 }
1414 
1415 static int
1416 iwk_ring_init(iwk_sc_t *sc)
1417 {
1418 	int i, err = DDI_SUCCESS;
1419 
1420 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1421 		if (i == IWK_CMD_QUEUE_NUM)
1422 			continue;
1423 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1424 		    i);
1425 		if (err != DDI_SUCCESS)
1426 			goto fail;
1427 	}
1428 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1429 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1430 	if (err != DDI_SUCCESS)
1431 		goto fail;
1432 	err = iwk_alloc_rx_ring(sc);
1433 	if (err != DDI_SUCCESS)
1434 		goto fail;
1435 	return (err);
1436 
1437 fail:
1438 	return (err);
1439 }
1440 
1441 static void
1442 iwk_ring_free(iwk_sc_t *sc)
1443 {
1444 	int i = IWK_NUM_QUEUES;
1445 
1446 	iwk_free_rx_ring(sc);
1447 	while (--i >= 0) {
1448 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1449 	}
1450 }
1451 
1452 /* ARGSUSED */
1453 static ieee80211_node_t *
1454 iwk_node_alloc(ieee80211com_t *ic)
1455 {
1456 	iwk_amrr_t *amrr;
1457 
1458 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1459 	if (amrr != NULL)
1460 		iwk_amrr_init(amrr);
1461 	return (&amrr->in);
1462 }
1463 
1464 static void
1465 iwk_node_free(ieee80211_node_t *in)
1466 {
1467 	ieee80211com_t *ic = in->in_ic;
1468 
1469 	ic->ic_node_cleanup(in);
1470 	if (in->in_wpa_ie != NULL)
1471 		ieee80211_free(in->in_wpa_ie);
1472 	kmem_free(in, sizeof (iwk_amrr_t));
1473 }
1474 
1475 /*ARGSUSED*/
1476 static int
1477 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1478 {
1479 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1480 	ieee80211_node_t *in = ic->ic_bss;
1481 	enum ieee80211_state ostate = ic->ic_state;
1482 	int i, err = IWK_SUCCESS;
1483 
1484 	mutex_enter(&sc->sc_glock);
1485 	switch (nstate) {
1486 	case IEEE80211_S_SCAN:
1487 		ic->ic_state = nstate;
1488 		if (ostate == IEEE80211_S_INIT) {
1489 			ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1490 			/* let LED blink when scanning */
1491 			iwk_set_led(sc, 2, 10, 2);
1492 
1493 			if ((err = iwk_scan(sc)) != 0) {
1494 				IWK_DBG((IWK_DEBUG_80211,
1495 				    "could not initiate scan\n"));
1496 				ic->ic_flags &= ~(IEEE80211_F_SCAN |
1497 				    IEEE80211_F_ASCAN);
1498 				ic->ic_state = ostate;
1499 				mutex_exit(&sc->sc_glock);
1500 				return (err);
1501 			}
1502 		}
1503 		sc->sc_clk = 0;
1504 		mutex_exit(&sc->sc_glock);
1505 		return (IWK_SUCCESS);
1506 
1507 	case IEEE80211_S_AUTH:
1508 		/* reset state to handle reassociations correctly */
1509 		sc->sc_config.assoc_id = 0;
1510 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1511 
1512 		/*
1513 		 * before sending authentication and association request frame,
1514 		 * we need do something in the hardware, such as setting the
1515 		 * channel same to the target AP...
1516 		 */
1517 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1518 			IWK_DBG((IWK_DEBUG_80211,
1519 			    "could not send authentication request\n"));
1520 			mutex_exit(&sc->sc_glock);
1521 			return (err);
1522 		}
1523 		break;
1524 
1525 	case IEEE80211_S_RUN:
1526 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1527 			/* let LED blink when monitoring */
1528 			iwk_set_led(sc, 2, 10, 10);
1529 			break;
1530 		}
1531 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1532 
1533 		/* none IBSS mode */
1534 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1535 			/* update adapter's configuration */
1536 			sc->sc_config.assoc_id = sc->sc_assoc_id & 0x3fff;
1537 			/*
1538 			 * short preamble/slot time are
1539 			 * negotiated when associating
1540 			 */
1541 			sc->sc_config.flags &=
1542 			    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1543 			    RXON_FLG_SHORT_SLOT_MSK);
1544 
1545 			if (ic->ic_flags & IEEE80211_F_SHSLOT)
1546 				sc->sc_config.flags |=
1547 				    LE_32(RXON_FLG_SHORT_SLOT_MSK);
1548 
1549 			if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1550 				sc->sc_config.flags |=
1551 				    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1552 
1553 			sc->sc_config.filter_flags |=
1554 			    LE_32(RXON_FILTER_ASSOC_MSK);
1555 
1556 			if (ic->ic_opmode != IEEE80211_M_STA)
1557 				sc->sc_config.filter_flags |=
1558 				    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1559 
1560 			IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1561 			    " filter_flags %x\n",
1562 			    sc->sc_config.chan, sc->sc_config.flags,
1563 			    sc->sc_config.filter_flags));
1564 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1565 			    sizeof (iwk_rxon_cmd_t), 1);
1566 			if (err != IWK_SUCCESS) {
1567 				IWK_DBG((IWK_DEBUG_80211,
1568 				    "could not update configuration\n"));
1569 				mutex_exit(&sc->sc_glock);
1570 				return (err);
1571 			}
1572 		}
1573 
1574 		/* obtain current temperature of chipset */
1575 		sc->sc_tempera = iwk_curr_tempera(sc);
1576 
1577 		/*
1578 		 * make Tx power calibration to determine
1579 		 * the gains of DSP and radio
1580 		 */
1581 		err = iwk_tx_power_calibration(sc);
1582 		if (err) {
1583 			cmn_err(CE_WARN, "iwk_newstate(): "
1584 			    "failed to set tx power table\n");
1585 			return (err);
1586 		}
1587 
1588 		/* start automatic rate control */
1589 		mutex_enter(&sc->sc_mt_lock);
1590 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1591 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1592 			/* set rate to some reasonable initial value */
1593 			i = in->in_rates.ir_nrates - 1;
1594 			while (i > 0 && IEEE80211_RATE(i) > 72)
1595 				i--;
1596 			in->in_txrate = i;
1597 		} else {
1598 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1599 		}
1600 		mutex_exit(&sc->sc_mt_lock);
1601 
1602 		/* set LED on after associated */
1603 		iwk_set_led(sc, 2, 0, 1);
1604 		break;
1605 
1606 	case IEEE80211_S_INIT:
1607 		/* set LED off after init */
1608 		iwk_set_led(sc, 2, 1, 0);
1609 		break;
1610 	case IEEE80211_S_ASSOC:
1611 		break;
1612 	}
1613 
1614 	mutex_exit(&sc->sc_glock);
1615 
1616 	err = sc->sc_newstate(ic, nstate, arg);
1617 
1618 	if (nstate == IEEE80211_S_RUN) {
1619 
1620 		mutex_enter(&sc->sc_glock);
1621 
1622 		/*
1623 		 * make initialization for Receiver
1624 		 * sensitivity calibration
1625 		 */
1626 		err = iwk_rx_sens_init(sc);
1627 		if (err) {
1628 			cmn_err(CE_WARN, "iwk_newstate(): "
1629 			    "failed to init RX sensitivity\n");
1630 			return (err);
1631 		}
1632 
1633 		/* make initialization for Receiver gain balance */
1634 		err = iwk_rxgain_diff_init(sc);
1635 		if (err) {
1636 			cmn_err(CE_WARN, "iwk_newstate(): "
1637 			    "failed to init phy calibration\n");
1638 			return (err);
1639 		}
1640 
1641 		mutex_exit(&sc->sc_glock);
1642 
1643 	}
1644 
1645 	return (err);
1646 }
1647 
1648 /*ARGSUSED*/
1649 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1650     const uint8_t mac[IEEE80211_ADDR_LEN])
1651 {
1652 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1653 	iwk_add_sta_t node;
1654 	int err;
1655 
1656 	switch (k->wk_cipher->ic_cipher) {
1657 	case IEEE80211_CIPHER_WEP:
1658 	case IEEE80211_CIPHER_TKIP:
1659 		return (1); /* sofeware do it. */
1660 	case IEEE80211_CIPHER_AES_CCM:
1661 		break;
1662 	default:
1663 		return (0);
1664 	}
1665 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1666 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1667 
1668 	mutex_enter(&sc->sc_glock);
1669 
1670 	/* update ap/multicast node */
1671 	(void) memset(&node, 0, sizeof (node));
1672 	if (IEEE80211_IS_MULTICAST(mac)) {
1673 		(void) memset(node.bssid, 0xff, 6);
1674 		node.id = IWK_BROADCAST_ID;
1675 	} else {
1676 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1677 		node.id = IWK_AP_ID;
1678 	}
1679 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1680 		node.key_flags = 0;
1681 		node.keyp = k->wk_keyix;
1682 	} else {
1683 		node.key_flags = (1 << 14);
1684 		node.keyp = k->wk_keyix + 4;
1685 	}
1686 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1687 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1688 	node.sta_mask = STA_MODIFY_KEY_MASK;
1689 	node.control = 1;
1690 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1691 	if (err != IWK_SUCCESS) {
1692 		cmn_err(CE_WARN, "iwk_key_set():"
1693 		    "failed to update ap node\n");
1694 		mutex_exit(&sc->sc_glock);
1695 		return (0);
1696 	}
1697 	mutex_exit(&sc->sc_glock);
1698 	return (1);
1699 }
1700 
1701 /*
1702  * exclusive access to mac begin.
1703  */
1704 static void
1705 iwk_mac_access_enter(iwk_sc_t *sc)
1706 {
1707 	uint32_t tmp;
1708 	int n;
1709 
1710 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1711 	IWK_WRITE(sc, CSR_GP_CNTRL,
1712 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1713 
1714 	/* wait until we succeed */
1715 	for (n = 0; n < 1000; n++) {
1716 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1717 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1718 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1719 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1720 			break;
1721 		DELAY(10);
1722 	}
1723 	if (n == 1000)
1724 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1725 }
1726 
1727 /*
1728  * exclusive access to mac end.
1729  */
1730 static void
1731 iwk_mac_access_exit(iwk_sc_t *sc)
1732 {
1733 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1734 	IWK_WRITE(sc, CSR_GP_CNTRL,
1735 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1736 }
1737 
1738 static uint32_t
1739 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1740 {
1741 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1742 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1743 }
1744 
1745 static void
1746 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1747 {
1748 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1749 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1750 }
1751 
1752 static uint32_t
1753 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1754 {
1755 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1756 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1757 }
1758 
1759 static void
1760 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1761 {
1762 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1763 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1764 }
1765 
1766 static void
1767 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1768     uint32_t *data, int wlen)
1769 {
1770 	for (; wlen > 0; wlen--, data++, addr += 4)
1771 		iwk_reg_write(sc, addr, *data);
1772 }
1773 
1774 
1775 /*
1776  * ucode load/initialization steps:
1777  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1778  * BSM contains a small memory that *always* stays powered up, so it can
1779  * retain the bootstrap program even when the card is in a power-saving
1780  * power-down state.  The BSM loads the small program into ARC processor's
1781  * instruction memory when triggered by power-up.
1782  * 2)  load Initialize image via bootstrap program.
1783  * The Initialize image sets up regulatory and calibration data for the
1784  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1785  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1786  * correction.
1787  */
1788 static int
1789 iwk_load_firmware(iwk_sc_t *sc)
1790 {
1791 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1792 	uint32_t size = sc->sc_hdr->bootsz;
1793 	int n, err = IWK_SUCCESS;
1794 
1795 	/*
1796 	 * The physical address bit [4-35] of the initialize uCode.
1797 	 * In the initialize alive notify interrupt the physical address of
1798 	 * the runtime ucode will be set for loading.
1799 	 */
1800 	iwk_mac_access_enter(sc);
1801 
1802 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1803 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1804 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1805 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1806 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1807 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1808 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1809 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1810 
1811 	/* load bootstrap code into BSM memory */
1812 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1813 	    size / sizeof (uint32_t));
1814 
1815 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1816 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1817 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1818 
1819 	/*
1820 	 * prepare to load initialize uCode
1821 	 */
1822 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1823 
1824 	/* wait while the adapter is busy loading the firmware */
1825 	for (n = 0; n < 1000; n++) {
1826 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1827 		    BSM_WR_CTRL_REG_BIT_START))
1828 			break;
1829 		DELAY(10);
1830 	}
1831 	if (n == 1000) {
1832 		IWK_DBG((IWK_DEBUG_FW,
1833 		    "timeout transferring firmware\n"));
1834 		err = ETIMEDOUT;
1835 		return (err);
1836 	}
1837 
1838 	/* for future power-save mode use */
1839 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1840 
1841 	iwk_mac_access_exit(sc);
1842 
1843 	return (err);
1844 }
1845 
1846 /*ARGSUSED*/
1847 static void
1848 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1849 {
1850 	ieee80211com_t *ic = &sc->sc_ic;
1851 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1852 	iwk_rx_phy_res_t *stat;
1853 	ieee80211_node_t *in;
1854 	uint32_t *tail;
1855 	struct ieee80211_frame *wh;
1856 	mblk_t *mp;
1857 	uint16_t len, rssi, mrssi, agc;
1858 	int16_t t;
1859 	uint32_t ants, i;
1860 	struct iwk_rx_non_cfg_phy *phyinfo;
1861 
1862 	/* assuming not 11n here. cope with 11n in phase-II */
1863 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1864 	if (stat->cfg_phy_cnt > 20) {
1865 		return;
1866 	}
1867 
1868 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1869 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1870 	mrssi = 0;
1871 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
1872 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
1873 	for (i = 0; i < 3; i++) {
1874 		if (ants & (1 << i))
1875 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1876 	}
1877 	t = mrssi - agc - 44; /* t is the dBM value */
1878 	/*
1879 	 * convert dBm to percentage ???
1880 	 */
1881 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
1882 	    (75 * 75);
1883 	if (rssi > 100)
1884 		rssi = 100;
1885 	if (rssi < 1)
1886 		rssi = 1;
1887 	len = stat->byte_count;
1888 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1889 
1890 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1891 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1892 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1893 	    len, stat->rate.r.s.rate, stat->channel,
1894 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1895 	    stat->cfg_phy_cnt, LE_32(*tail)));
1896 
1897 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
1898 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
1899 		return;
1900 	}
1901 
1902 	/*
1903 	 * discard Rx frames with bad CRC
1904 	 */
1905 	if ((LE_32(*tail) &
1906 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
1907 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1908 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
1909 		    LE_32(*tail)));
1910 		sc->sc_rx_err++;
1911 		return;
1912 	}
1913 
1914 	wh = (struct ieee80211_frame *)
1915 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
1916 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
1917 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
1918 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
1919 		    sc->sc_assoc_id));
1920 	}
1921 #ifdef DEBUG
1922 	if (iwk_dbg_flags & IWK_DEBUG_RX)
1923 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
1924 #endif
1925 	in = ieee80211_find_rxnode(ic, wh);
1926 	mp = allocb(len, BPRI_MED);
1927 	if (mp) {
1928 		(void) memcpy(mp->b_wptr, wh, len);
1929 		mp->b_wptr += len;
1930 
1931 		/* send the frame to the 802.11 layer */
1932 		(void) ieee80211_input(ic, mp, in, rssi, 0);
1933 	} else {
1934 		sc->sc_rx_nobuf++;
1935 		IWK_DBG((IWK_DEBUG_RX,
1936 		    "iwk_rx_intr(): alloc rx buf failed\n"));
1937 	}
1938 	/* release node reference */
1939 	ieee80211_free_node(in);
1940 }
1941 
1942 /*ARGSUSED*/
1943 static void
1944 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1945 {
1946 	ieee80211com_t *ic = &sc->sc_ic;
1947 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
1948 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
1949 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
1950 
1951 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
1952 	    " retries=%d frame_count=%x nkill=%d "
1953 	    "rate=%x duration=%d status=%x\n",
1954 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
1955 	    stat->bt_kill_count, stat->rate.r.s.rate,
1956 	    LE_32(stat->duration), LE_32(stat->status)));
1957 
1958 	amrr->txcnt++;
1959 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
1960 	if (stat->ntries > 0) {
1961 		amrr->retrycnt++;
1962 		sc->sc_tx_retries++;
1963 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
1964 		    sc->sc_tx_retries));
1965 	}
1966 
1967 	sc->sc_tx_timer = 0;
1968 
1969 	mutex_enter(&sc->sc_tx_lock);
1970 	ring->queued--;
1971 	if (ring->queued < 0)
1972 		ring->queued = 0;
1973 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
1974 		sc->sc_need_reschedule = 0;
1975 		mutex_exit(&sc->sc_tx_lock);
1976 		mac_tx_update(ic->ic_mach);
1977 		mutex_enter(&sc->sc_tx_lock);
1978 	}
1979 	mutex_exit(&sc->sc_tx_lock);
1980 }
1981 
1982 static void
1983 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1984 {
1985 	if ((desc->hdr.qid & 7) != 4) {
1986 		return;
1987 	}
1988 	mutex_enter(&sc->sc_glock);
1989 	sc->sc_flags |= IWK_F_CMD_DONE;
1990 	cv_signal(&sc->sc_cmd_cv);
1991 	mutex_exit(&sc->sc_glock);
1992 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
1993 	    "qid=%x idx=%d flags=%x type=0x%x\n",
1994 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
1995 	    desc->hdr.type));
1996 }
1997 
1998 static void
1999 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2000 {
2001 	uint32_t base, i;
2002 	struct iwk_alive_resp *ar =
2003 	    (struct iwk_alive_resp *)(desc + 1);
2004 
2005 	/* the microcontroller is ready */
2006 	IWK_DBG((IWK_DEBUG_FW,
2007 	    "microcode alive notification minor: %x major: %x type:"
2008 	    " %x subtype: %x\n",
2009 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2010 
2011 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2012 		IWK_DBG((IWK_DEBUG_FW,
2013 		    "microcontroller initialization failed\n"));
2014 	}
2015 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2016 		IWK_DBG((IWK_DEBUG_FW,
2017 		    "initialization alive received.\n"));
2018 		(void) memcpy(&sc->sc_card_alive_init, ar,
2019 		    sizeof (struct iwk_init_alive_resp));
2020 		/* XXX get temperature */
2021 		iwk_mac_access_enter(sc);
2022 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2023 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2024 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2025 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2026 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2027 		    sc->sc_dma_fw_data.cookie.dmac_size);
2028 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2029 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2030 		iwk_mac_access_exit(sc);
2031 	} else {
2032 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2033 		(void) memcpy(&sc->sc_card_alive_run, ar,
2034 		    sizeof (struct iwk_alive_resp));
2035 
2036 		/*
2037 		 * Init SCD related registers to make Tx work. XXX
2038 		 */
2039 		iwk_mac_access_enter(sc);
2040 
2041 		/* read sram address of data base */
2042 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2043 
2044 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2045 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2046 		    i < 128; i += 4)
2047 			iwk_mem_write(sc, base + i, 0);
2048 
2049 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2050 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2051 		    i < 256; i += 4)
2052 			iwk_mem_write(sc, base + i, 0);
2053 
2054 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2055 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2056 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2057 			iwk_mem_write(sc, base + i, 0);
2058 
2059 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2060 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2061 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2062 
2063 		/* initiate the tx queues */
2064 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2065 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2066 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2067 			iwk_mem_write(sc, sc->sc_scd_base +
2068 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2069 			    (SCD_WIN_SIZE & 0x7f));
2070 			iwk_mem_write(sc, sc->sc_scd_base +
2071 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2072 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2073 		}
2074 		/* interrupt enable on each queue0-7 */
2075 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2076 		    (1 << IWK_NUM_QUEUES) - 1);
2077 		/* enable  each channel 0-7 */
2078 		iwk_reg_write(sc, SCD_TXFACT,
2079 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2080 		/*
2081 		 * queue 0-7 maps to FIFO 0-7 and
2082 		 * all queues work under FIFO mode (none-scheduler-ack)
2083 		 */
2084 		for (i = 0; i < 7; i++) {
2085 			iwk_reg_write(sc,
2086 			    SCD_QUEUE_STATUS_BITS(i),
2087 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2088 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2089 			    SCD_QUEUE_STTS_REG_MSK);
2090 		}
2091 		iwk_mac_access_exit(sc);
2092 
2093 		sc->sc_flags |= IWK_F_FW_INIT;
2094 		cv_signal(&sc->sc_fw_cv);
2095 	}
2096 
2097 }
2098 
2099 static uint_t
2100 /* LINTED: argument unused in function: unused */
2101 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2102 {
2103 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2104 	ieee80211com_t *ic = &sc->sc_ic;
2105 	iwk_rx_desc_t *desc;
2106 	iwk_rx_data_t *data;
2107 	uint32_t index;
2108 
2109 	mutex_enter(&sc->sc_glock);
2110 	if (sc->sc_rx_softint_pending != 1) {
2111 		mutex_exit(&sc->sc_glock);
2112 		return (DDI_INTR_UNCLAIMED);
2113 	}
2114 	/* disable interrupts */
2115 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2116 	mutex_exit(&sc->sc_glock);
2117 
2118 	/*
2119 	 * firmware has moved the index of the rx queue, driver get it,
2120 	 * and deal with it.
2121 	 */
2122 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2123 
2124 	while (sc->sc_rxq.cur != index) {
2125 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2126 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2127 
2128 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2129 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2130 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2131 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2132 
2133 		/* a command other than a tx need to be replied */
2134 		if (!(desc->hdr.qid & 0x80) &&
2135 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2136 		    (desc->hdr.type != REPLY_TX) &&
2137 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2138 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2139 		    (desc->hdr.type != SENSITIVITY_CMD))
2140 			iwk_cmd_intr(sc, desc);
2141 
2142 		switch (desc->hdr.type) {
2143 		case REPLY_4965_RX:
2144 			iwk_rx_intr(sc, desc, data);
2145 			break;
2146 
2147 		case REPLY_TX:
2148 			iwk_tx_intr(sc, desc, data);
2149 			break;
2150 
2151 		case REPLY_ALIVE:
2152 			iwk_ucode_alive(sc, desc);
2153 			break;
2154 
2155 		case CARD_STATE_NOTIFICATION:
2156 		{
2157 			uint32_t *status = (uint32_t *)(desc + 1);
2158 
2159 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2160 			    LE_32(*status)));
2161 
2162 			if (LE_32(*status) & 1) {
2163 				/*
2164 				 * the radio button has to be pushed(OFF). It
2165 				 * is considered as a hw error, the
2166 				 * iwk_thread() tries to recover it after the
2167 				 * button is pushed again(ON)
2168 				 */
2169 				cmn_err(CE_NOTE,
2170 				    "iwk_rx_softintr(): "
2171 				    "Radio transmitter is off\n");
2172 				sc->sc_ostate = sc->sc_ic.ic_state;
2173 				ieee80211_new_state(&sc->sc_ic,
2174 				    IEEE80211_S_INIT, -1);
2175 				sc->sc_flags |=
2176 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2177 			}
2178 			break;
2179 		}
2180 		case SCAN_START_NOTIFICATION:
2181 		{
2182 			iwk_start_scan_t *scan =
2183 			    (iwk_start_scan_t *)(desc + 1);
2184 
2185 			IWK_DBG((IWK_DEBUG_SCAN,
2186 			    "scanning channel %d status %x\n",
2187 			    scan->chan, LE_32(scan->status)));
2188 
2189 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2190 			break;
2191 		}
2192 		case SCAN_COMPLETE_NOTIFICATION:
2193 			IWK_DBG((IWK_DEBUG_SCAN, "scan finished\n"));
2194 			sc->sc_flags &= ~IWK_F_SCANNING;
2195 			ieee80211_end_scan(ic);
2196 			break;
2197 		case STATISTICS_NOTIFICATION:
2198 		{
2199 			/* handle statistics notification */
2200 			iwk_statistics_notify(sc, desc);
2201 			break;
2202 		}
2203 
2204 		}
2205 
2206 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2207 	}
2208 
2209 	/*
2210 	 * driver dealt with what reveived in rx queue and tell the information
2211 	 * to the firmware.
2212 	 */
2213 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2214 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2215 
2216 	mutex_enter(&sc->sc_glock);
2217 	/* re-enable interrupts */
2218 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2219 	sc->sc_rx_softint_pending = 0;
2220 	mutex_exit(&sc->sc_glock);
2221 
2222 	return (DDI_INTR_CLAIMED);
2223 }
2224 
2225 static uint_t
2226 /* LINTED: argument unused in function: unused */
2227 iwk_intr(caddr_t arg, caddr_t unused)
2228 {
2229 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2230 	uint32_t r, rfh;
2231 
2232 	mutex_enter(&sc->sc_glock);
2233 
2234 	if (sc->sc_flags & IWK_F_SUSPEND) {
2235 		mutex_exit(&sc->sc_glock);
2236 		return (DDI_INTR_UNCLAIMED);
2237 	}
2238 
2239 	r = IWK_READ(sc, CSR_INT);
2240 	if (r == 0 || r == 0xffffffff) {
2241 		mutex_exit(&sc->sc_glock);
2242 		return (DDI_INTR_UNCLAIMED);
2243 	}
2244 
2245 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2246 
2247 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2248 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2249 	/* disable interrupts */
2250 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2251 	/* ack interrupts */
2252 	IWK_WRITE(sc, CSR_INT, r);
2253 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2254 
2255 	if (sc->sc_soft_hdl == NULL) {
2256 		mutex_exit(&sc->sc_glock);
2257 		return (DDI_INTR_CLAIMED);
2258 	}
2259 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2260 		IWK_DBG((IWK_DEBUG_FW, "fatal firmware error\n"));
2261 		mutex_exit(&sc->sc_glock);
2262 #ifdef DEBUG
2263 		/* dump event and error logs to dmesg */
2264 		iwk_write_error_log(sc);
2265 		iwk_write_event_log(sc);
2266 #endif /* DEBUG */
2267 		iwk_stop(sc);
2268 		sc->sc_ostate = sc->sc_ic.ic_state;
2269 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2270 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2271 		return (DDI_INTR_CLAIMED);
2272 	}
2273 
2274 	if (r & BIT_INT_RF_KILL) {
2275 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2276 	}
2277 
2278 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2279 	    (rfh & FH_INT_RX_MASK)) {
2280 		sc->sc_rx_softint_pending = 1;
2281 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2282 	}
2283 
2284 	if (r & BIT_INT_ALIVE)	{
2285 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2286 	}
2287 
2288 	/* re-enable interrupts */
2289 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2290 	mutex_exit(&sc->sc_glock);
2291 
2292 	return (DDI_INTR_CLAIMED);
2293 }
2294 
2295 static uint8_t
2296 iwk_rate_to_plcp(int rate)
2297 {
2298 	uint8_t ret;
2299 
2300 	switch (rate) {
2301 	/* CCK rates */
2302 	case 2:
2303 		ret = 0xa;
2304 		break;
2305 	case 4:
2306 		ret = 0x14;
2307 		break;
2308 	case 11:
2309 		ret = 0x37;
2310 		break;
2311 	case 22:
2312 		ret = 0x6e;
2313 		break;
2314 	/* OFDM rates */
2315 	case 12:
2316 		ret = 0xd;
2317 		break;
2318 	case 18:
2319 		ret = 0xf;
2320 		break;
2321 	case 24:
2322 		ret = 0x5;
2323 		break;
2324 	case 36:
2325 		ret = 0x7;
2326 		break;
2327 	case 48:
2328 		ret = 0x9;
2329 		break;
2330 	case 72:
2331 		ret = 0xb;
2332 		break;
2333 	case 96:
2334 		ret = 0x1;
2335 		break;
2336 	case 108:
2337 		ret = 0x3;
2338 		break;
2339 	default:
2340 		ret = 0;
2341 		break;
2342 	}
2343 	return (ret);
2344 }
2345 
2346 static mblk_t *
2347 iwk_m_tx(void *arg, mblk_t *mp)
2348 {
2349 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2350 	ieee80211com_t	*ic = &sc->sc_ic;
2351 	mblk_t			*next;
2352 
2353 	if (sc->sc_flags & IWK_F_SUSPEND) {
2354 		freemsgchain(mp);
2355 		return (NULL);
2356 	}
2357 
2358 	if (ic->ic_state != IEEE80211_S_RUN) {
2359 		freemsgchain(mp);
2360 		return (NULL);
2361 	}
2362 
2363 	while (mp != NULL) {
2364 		next = mp->b_next;
2365 		mp->b_next = NULL;
2366 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2367 			mp->b_next = next;
2368 			break;
2369 		}
2370 		mp = next;
2371 	}
2372 	return (mp);
2373 }
2374 
2375 /* ARGSUSED */
2376 static int
2377 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2378 {
2379 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2380 	iwk_tx_ring_t *ring;
2381 	iwk_tx_desc_t *desc;
2382 	iwk_tx_data_t *data;
2383 	iwk_cmd_t *cmd;
2384 	iwk_tx_cmd_t *tx;
2385 	ieee80211_node_t *in;
2386 	struct ieee80211_frame *wh;
2387 	struct ieee80211_key *k = NULL;
2388 	mblk_t *m, *m0;
2389 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2390 	uint16_t masks = 0;
2391 
2392 	ring = &sc->sc_txq[0];
2393 	data = &ring->data[ring->cur];
2394 	desc = data->desc;
2395 	cmd = data->cmd;
2396 	bzero(desc, sizeof (*desc));
2397 	bzero(cmd, sizeof (*cmd));
2398 
2399 	mutex_enter(&sc->sc_tx_lock);
2400 	if (sc->sc_flags & IWK_F_SUSPEND) {
2401 		mutex_exit(&sc->sc_tx_lock);
2402 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2403 		    IEEE80211_FC0_TYPE_DATA) {
2404 			freemsg(mp);
2405 		}
2406 		err = IWK_FAIL;
2407 		goto exit;
2408 	}
2409 
2410 	if (ring->queued > ring->count - 64) {
2411 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2412 		sc->sc_need_reschedule = 1;
2413 		mutex_exit(&sc->sc_tx_lock);
2414 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2415 		    IEEE80211_FC0_TYPE_DATA) {
2416 			freemsg(mp);
2417 		}
2418 		sc->sc_tx_nobuf++;
2419 		err = IWK_FAIL;
2420 		goto exit;
2421 	}
2422 	mutex_exit(&sc->sc_tx_lock);
2423 
2424 	hdrlen = sizeof (struct ieee80211_frame);
2425 
2426 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2427 	if (m == NULL) { /* can not alloc buf, drop this package */
2428 		cmn_err(CE_WARN,
2429 		    "iwk_send(): failed to allocate msgbuf\n");
2430 		freemsg(mp);
2431 		err = IWK_SUCCESS;
2432 		goto exit;
2433 	}
2434 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2435 		mblen = MBLKL(m0);
2436 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2437 		off += mblen;
2438 	}
2439 	m->b_wptr += off;
2440 	freemsg(mp);
2441 
2442 	wh = (struct ieee80211_frame *)m->b_rptr;
2443 
2444 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2445 	if (in == NULL) {
2446 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2447 		freemsg(m);
2448 		sc->sc_tx_err++;
2449 		err = IWK_SUCCESS;
2450 		goto exit;
2451 	}
2452 	(void) ieee80211_encap(ic, m, in);
2453 
2454 	cmd->hdr.type = REPLY_TX;
2455 	cmd->hdr.flags = 0;
2456 	cmd->hdr.qid = ring->qid;
2457 	cmd->hdr.idx = ring->cur;
2458 
2459 	tx = (iwk_tx_cmd_t *)cmd->data;
2460 	tx->tx_flags = 0;
2461 
2462 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2463 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2464 	} else {
2465 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2466 	}
2467 
2468 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2469 		k = ieee80211_crypto_encap(ic, m);
2470 		if (k == NULL) {
2471 			freemsg(m);
2472 			sc->sc_tx_err++;
2473 			err = IWK_SUCCESS;
2474 			goto exit;
2475 		}
2476 
2477 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2478 			tx->sec_ctl = 2; /* for CCMP */
2479 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2480 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2481 		}
2482 
2483 		/* packet header may have moved, reset our local pointer */
2484 		wh = (struct ieee80211_frame *)m->b_rptr;
2485 	}
2486 
2487 	len = msgdsize(m);
2488 
2489 #ifdef DEBUG
2490 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2491 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2492 #endif
2493 
2494 	/* pickup a rate */
2495 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2496 	    IEEE80211_FC0_TYPE_MGT) {
2497 		/* mgmt frames are sent at 1M */
2498 		rate = in->in_rates.ir_rates[0];
2499 	} else {
2500 		/*
2501 		 * do it here for the software way rate control.
2502 		 * later for rate scaling in hardware.
2503 		 * maybe like the following, for management frame:
2504 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2505 		 * for data frame:
2506 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2507 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2508 		 * tx->initial_rate_index = 1;
2509 		 *
2510 		 * now the txrate is determined in tx cmd flags, set to the
2511 		 * max value 54M for 11g and 11M for 11b.
2512 		 */
2513 
2514 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2515 			rate = ic->ic_fixed_rate;
2516 		} else {
2517 			rate = in->in_rates.ir_rates[in->in_txrate];
2518 		}
2519 	}
2520 	rate &= IEEE80211_RATE_VAL;
2521 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2522 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2523 
2524 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2525 
2526 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2527 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2528 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2529 
2530 	/* retrieve destination node's id */
2531 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2532 		tx->sta_id = IWK_BROADCAST_ID;
2533 	} else {
2534 		if (ic->ic_opmode != IEEE80211_M_IBSS)
2535 			tx->sta_id = IWK_AP_ID;
2536 	}
2537 
2538 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2539 	    IEEE80211_FC0_TYPE_MGT) {
2540 		/* tell h/w to set timestamp in probe responses */
2541 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2542 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2543 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2544 
2545 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2546 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2547 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2548 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2549 			tx->timeout.pm_frame_timeout = 3;
2550 		else
2551 			tx->timeout.pm_frame_timeout = 2;
2552 	} else
2553 		tx->timeout.pm_frame_timeout = 0;
2554 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2555 		masks |= RATE_MCS_CCK_MSK;
2556 
2557 	masks |= RATE_MCS_ANT_B_MSK;
2558 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2559 
2560 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2561 	    tx->tx_flags));
2562 
2563 	tx->rts_retry_limit = 60;
2564 	tx->data_retry_limit = 15;
2565 
2566 	tx->stop_time.life_time  = LE_32(0xffffffff);
2567 
2568 	tx->len = LE_16(len);
2569 
2570 	tx->dram_lsb_ptr =
2571 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2572 	tx->dram_msb_ptr = 0;
2573 	tx->driver_txop = 0;
2574 	tx->next_frame_len = 0;
2575 
2576 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2577 	m->b_rptr += hdrlen;
2578 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2579 
2580 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2581 	    ring->qid, ring->cur, len));
2582 
2583 	/*
2584 	 * first segment includes the tx cmd plus the 802.11 header,
2585 	 * the second includes the remaining of the 802.11 frame.
2586 	 */
2587 	desc->val0 = LE_32(2 << 24);
2588 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2589 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2590 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2591 	desc->pa[0].val2 =
2592 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2593 	    ((len - hdrlen) << 20);
2594 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2595 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2596 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2597 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2598 
2599 	mutex_enter(&sc->sc_tx_lock);
2600 	ring->queued++;
2601 	mutex_exit(&sc->sc_tx_lock);
2602 
2603 	/* kick ring */
2604 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2605 	    tfd_offset[ring->cur].val = 8 + len;
2606 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2607 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2608 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2609 	}
2610 
2611 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2612 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2613 
2614 	ring->cur = (ring->cur + 1) % ring->count;
2615 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2616 	freemsg(m);
2617 	/* release node reference */
2618 	ieee80211_free_node(in);
2619 
2620 	ic->ic_stats.is_tx_bytes += len;
2621 	ic->ic_stats.is_tx_frags++;
2622 
2623 	if (sc->sc_tx_timer == 0)
2624 		sc->sc_tx_timer = 10;
2625 exit:
2626 	return (err);
2627 }
2628 
2629 static void
2630 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2631 {
2632 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2633 	ieee80211com_t	*ic = &sc->sc_ic;
2634 	int		err;
2635 
2636 	err = ieee80211_ioctl(ic, wq, mp);
2637 
2638 	if (err == ENETRESET) {
2639 		/*
2640 		 * This is special for the hidden AP connection.
2641 		 * In any case, we should make sure only one 'scan'
2642 		 * in the driver for a 'connect' CLI command. So
2643 		 * when connecting to a hidden AP, the scan is just
2644 		 * sent out to the air when we know the desired
2645 		 * essid of the AP we want to connect.
2646 		 */
2647 		if (ic->ic_des_esslen) {
2648 			(void) ieee80211_new_state(ic,
2649 			    IEEE80211_S_SCAN, -1);
2650 		}
2651 	}
2652 }
2653 
2654 /*ARGSUSED*/
2655 static int
2656 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2657 {
2658 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2659 	ieee80211com_t	*ic = &sc->sc_ic;
2660 	ieee80211_node_t *in = ic->ic_bss;
2661 	struct ieee80211_rateset *rs = &in->in_rates;
2662 
2663 	mutex_enter(&sc->sc_glock);
2664 	switch (stat) {
2665 	case MAC_STAT_IFSPEED:
2666 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2667 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2668 		    : ic->ic_fixed_rate) /2 * 1000000;
2669 		break;
2670 	case MAC_STAT_NOXMTBUF:
2671 		*val = sc->sc_tx_nobuf;
2672 		break;
2673 	case MAC_STAT_NORCVBUF:
2674 		*val = sc->sc_rx_nobuf;
2675 		break;
2676 	case MAC_STAT_IERRORS:
2677 		*val = sc->sc_rx_err;
2678 		break;
2679 	case MAC_STAT_RBYTES:
2680 		*val = ic->ic_stats.is_rx_bytes;
2681 		break;
2682 	case MAC_STAT_IPACKETS:
2683 		*val = ic->ic_stats.is_rx_frags;
2684 		break;
2685 	case MAC_STAT_OBYTES:
2686 		*val = ic->ic_stats.is_tx_bytes;
2687 		break;
2688 	case MAC_STAT_OPACKETS:
2689 		*val = ic->ic_stats.is_tx_frags;
2690 		break;
2691 	case MAC_STAT_OERRORS:
2692 	case WIFI_STAT_TX_FAILED:
2693 		*val = sc->sc_tx_err;
2694 		break;
2695 	case WIFI_STAT_TX_RETRANS:
2696 		*val = sc->sc_tx_retries;
2697 		break;
2698 	case WIFI_STAT_FCS_ERRORS:
2699 	case WIFI_STAT_WEP_ERRORS:
2700 	case WIFI_STAT_TX_FRAGS:
2701 	case WIFI_STAT_MCAST_TX:
2702 	case WIFI_STAT_RTS_SUCCESS:
2703 	case WIFI_STAT_RTS_FAILURE:
2704 	case WIFI_STAT_ACK_FAILURE:
2705 	case WIFI_STAT_RX_FRAGS:
2706 	case WIFI_STAT_MCAST_RX:
2707 	case WIFI_STAT_RX_DUPS:
2708 		mutex_exit(&sc->sc_glock);
2709 		return (ieee80211_stat(ic, stat, val));
2710 	default:
2711 		mutex_exit(&sc->sc_glock);
2712 		return (ENOTSUP);
2713 	}
2714 	mutex_exit(&sc->sc_glock);
2715 
2716 	return (IWK_SUCCESS);
2717 
2718 }
2719 
2720 static int
2721 iwk_m_start(void *arg)
2722 {
2723 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2724 	ieee80211com_t	*ic = &sc->sc_ic;
2725 	int err;
2726 
2727 	err = iwk_init(sc);
2728 
2729 	if (err != IWK_SUCCESS) {
2730 		/*
2731 		 * The hw init err(eg. RF is OFF). Return Success to make
2732 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2733 		 * background.
2734 		 */
2735 		mutex_enter(&sc->sc_glock);
2736 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2737 		mutex_exit(&sc->sc_glock);
2738 		return (IWK_SUCCESS);
2739 	}
2740 
2741 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2742 
2743 	mutex_enter(&sc->sc_glock);
2744 	sc->sc_flags |= IWK_F_RUNNING;
2745 	mutex_exit(&sc->sc_glock);
2746 
2747 	return (IWK_SUCCESS);
2748 }
2749 
2750 static void
2751 iwk_m_stop(void *arg)
2752 {
2753 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2754 	ieee80211com_t	*ic = &sc->sc_ic;
2755 
2756 	iwk_stop(sc);
2757 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2758 	mutex_enter(&sc->sc_mt_lock);
2759 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2760 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2761 	mutex_exit(&sc->sc_mt_lock);
2762 	mutex_enter(&sc->sc_glock);
2763 	sc->sc_flags &= ~IWK_F_RUNNING;
2764 	mutex_exit(&sc->sc_glock);
2765 }
2766 
2767 /*ARGSUSED*/
2768 static int
2769 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2770 {
2771 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2772 	ieee80211com_t	*ic = &sc->sc_ic;
2773 	int err;
2774 
2775 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2776 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2777 		mutex_enter(&sc->sc_glock);
2778 		err = iwk_config(sc);
2779 		mutex_exit(&sc->sc_glock);
2780 		if (err != IWK_SUCCESS) {
2781 			cmn_err(CE_WARN,
2782 			    "iwk_m_unicst(): "
2783 			    "failed to configure device\n");
2784 			goto fail;
2785 		}
2786 	}
2787 	return (IWK_SUCCESS);
2788 fail:
2789 	return (err);
2790 }
2791 
2792 /*ARGSUSED*/
2793 static int
2794 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2795 {
2796 	return (IWK_SUCCESS);
2797 }
2798 
2799 /*ARGSUSED*/
2800 static int
2801 iwk_m_promisc(void *arg, boolean_t on)
2802 {
2803 	return (IWK_SUCCESS);
2804 }
2805 
2806 static void
2807 iwk_thread(iwk_sc_t *sc)
2808 {
2809 	ieee80211com_t	*ic = &sc->sc_ic;
2810 	clock_t clk;
2811 	int times = 0, err, n = 0, timeout = 0;
2812 	uint32_t tmp;
2813 
2814 	mutex_enter(&sc->sc_mt_lock);
2815 	while (sc->sc_mf_thread_switch) {
2816 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2817 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2818 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2819 		} else {
2820 			sc->sc_flags |= IWK_F_RADIO_OFF;
2821 		}
2822 		/*
2823 		 * If in SUSPEND or the RF is OFF, do nothing
2824 		 */
2825 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
2826 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
2827 			mutex_exit(&sc->sc_mt_lock);
2828 			delay(drv_usectohz(100000));
2829 			mutex_enter(&sc->sc_mt_lock);
2830 			continue;
2831 		}
2832 
2833 		/*
2834 		 * recovery fatal error
2835 		 */
2836 		if (ic->ic_mach &&
2837 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2838 
2839 			IWK_DBG((IWK_DEBUG_FW,
2840 			    "iwk_thread(): "
2841 			    "try to recover fatal hw error: %d\n", times++));
2842 
2843 			iwk_stop(sc);
2844 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2845 
2846 			mutex_exit(&sc->sc_mt_lock);
2847 			delay(drv_usectohz(2000000 + n*500000));
2848 			mutex_enter(&sc->sc_mt_lock);
2849 			err = iwk_init(sc);
2850 			if (err != IWK_SUCCESS) {
2851 				n++;
2852 				if (n < 20)
2853 					continue;
2854 			}
2855 			n = 0;
2856 			if (!err)
2857 				sc->sc_flags |= IWK_F_RUNNING;
2858 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2859 			mutex_exit(&sc->sc_mt_lock);
2860 			delay(drv_usectohz(2000000));
2861 			if (sc->sc_ostate != IEEE80211_S_INIT)
2862 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
2863 			mutex_enter(&sc->sc_mt_lock);
2864 		}
2865 
2866 		/*
2867 		 * rate ctl
2868 		 */
2869 		if (ic->ic_mach &&
2870 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
2871 			clk = ddi_get_lbolt();
2872 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
2873 				iwk_amrr_timeout(sc);
2874 			}
2875 		}
2876 
2877 		mutex_exit(&sc->sc_mt_lock);
2878 		delay(drv_usectohz(100000));
2879 		mutex_enter(&sc->sc_mt_lock);
2880 
2881 		if (sc->sc_tx_timer) {
2882 			timeout++;
2883 			if (timeout == 10) {
2884 				sc->sc_tx_timer--;
2885 				if (sc->sc_tx_timer == 0) {
2886 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2887 					sc->sc_ostate = IEEE80211_S_RUN;
2888 					IWK_DBG((IWK_DEBUG_FW,
2889 					    "iwk_thread(): try to recover from"
2890 					    " 'send fail\n"));
2891 				}
2892 				timeout = 0;
2893 			}
2894 		}
2895 
2896 	}
2897 	sc->sc_mf_thread = NULL;
2898 	cv_signal(&sc->sc_mt_cv);
2899 	mutex_exit(&sc->sc_mt_lock);
2900 }
2901 
2902 
2903 /*
2904  * Send a command to the firmware.
2905  */
2906 static int
2907 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
2908 {
2909 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2910 	iwk_tx_desc_t *desc;
2911 	iwk_cmd_t *cmd;
2912 	clock_t clk;
2913 
2914 	ASSERT(size <= sizeof (cmd->data));
2915 	ASSERT(mutex_owned(&sc->sc_glock));
2916 
2917 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
2918 	desc = ring->data[ring->cur].desc;
2919 	cmd = ring->data[ring->cur].cmd;
2920 
2921 	cmd->hdr.type = (uint8_t)code;
2922 	cmd->hdr.flags = 0;
2923 	cmd->hdr.qid = ring->qid;
2924 	cmd->hdr.idx = ring->cur;
2925 	(void) memcpy(cmd->data, buf, size);
2926 	(void) memset(desc, 0, sizeof (*desc));
2927 
2928 	desc->val0 = LE_32(1 << 24);
2929 	desc->pa[0].tb1_addr =
2930 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
2931 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
2932 
2933 	/* kick cmd ring XXX */
2934 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2935 	    tfd_offset[ring->cur].val = 8;
2936 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2937 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2938 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2939 	}
2940 	ring->cur = (ring->cur + 1) % ring->count;
2941 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2942 
2943 	if (async)
2944 		return (IWK_SUCCESS);
2945 	else {
2946 		sc->sc_flags &= ~IWK_F_CMD_DONE;
2947 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
2948 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
2949 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
2950 			    0)
2951 				break;
2952 		}
2953 		if (sc->sc_flags & IWK_F_CMD_DONE)
2954 			return (IWK_SUCCESS);
2955 		else
2956 			return (IWK_FAIL);
2957 	}
2958 }
2959 
2960 static void
2961 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
2962 {
2963 	iwk_led_cmd_t led;
2964 
2965 	led.interval = LE_32(100000);	/* unit: 100ms */
2966 	led.id = id;
2967 	led.off = off;
2968 	led.on = on;
2969 
2970 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
2971 }
2972 
2973 static int
2974 iwk_hw_set_before_auth(iwk_sc_t *sc)
2975 {
2976 	ieee80211com_t *ic = &sc->sc_ic;
2977 	ieee80211_node_t *in = ic->ic_bss;
2978 	iwk_add_sta_t node;
2979 	iwk_link_quality_cmd_t link_quality;
2980 	struct ieee80211_rateset rs;
2981 	uint16_t masks = 0, rate;
2982 	int i, err;
2983 
2984 	/* update adapter's configuration according the info of target AP */
2985 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
2986 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
2987 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
2988 		sc->sc_config.cck_basic_rates  = 0x03;
2989 		sc->sc_config.ofdm_basic_rates = 0;
2990 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
2991 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
2992 		sc->sc_config.cck_basic_rates  = 0;
2993 		sc->sc_config.ofdm_basic_rates = 0x15;
2994 	} else { /* assume 802.11b/g */
2995 		sc->sc_config.cck_basic_rates  = 0x0f;
2996 		sc->sc_config.ofdm_basic_rates = 0xff;
2997 	}
2998 
2999 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3000 	    RXON_FLG_SHORT_SLOT_MSK);
3001 
3002 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3003 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3004 	else
3005 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3006 
3007 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3008 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3009 	else
3010 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3011 
3012 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3013 	    "filter_flags %x  cck %x ofdm %x"
3014 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3015 	    sc->sc_config.chan, sc->sc_config.flags,
3016 	    sc->sc_config.filter_flags,
3017 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3018 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3019 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3020 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3021 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3022 	    sizeof (iwk_rxon_cmd_t), 1);
3023 	if (err != IWK_SUCCESS) {
3024 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3025 		    " failed to config chan%d\n",
3026 		    sc->sc_config.chan);
3027 		return (err);
3028 	}
3029 
3030 	/* obtain current temperature of chipset */
3031 	sc->sc_tempera = iwk_curr_tempera(sc);
3032 
3033 	/* make Tx power calibration to determine the gains of DSP and radio */
3034 	err = iwk_tx_power_calibration(sc);
3035 	if (err) {
3036 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3037 		    "failed to set tx power table\n");
3038 		return (err);
3039 	}
3040 
3041 	/* add default AP node */
3042 	(void) memset(&node, 0, sizeof (node));
3043 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3044 	node.id = IWK_AP_ID;
3045 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3046 	if (err != IWK_SUCCESS) {
3047 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3048 		    "failed to add BSS node\n");
3049 		return (err);
3050 	}
3051 
3052 	/* TX_LINK_QUALITY cmd ? */
3053 	(void) memset(&link_quality, 0, sizeof (link_quality));
3054 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3055 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3056 		if (i < rs.ir_nrates)
3057 			rate = rs.ir_rates[rs.ir_nrates - i];
3058 		else
3059 			rate = 2;
3060 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3061 			masks |= RATE_MCS_CCK_MSK;
3062 		masks |= RATE_MCS_ANT_B_MSK;
3063 		masks &= ~RATE_MCS_ANT_A_MSK;
3064 		link_quality.rate_n_flags[i] =
3065 		    iwk_rate_to_plcp(rate) | masks;
3066 	}
3067 
3068 	link_quality.general_params.single_stream_ant_msk = 2;
3069 	link_quality.general_params.dual_stream_ant_msk = 3;
3070 	link_quality.agg_params.agg_dis_start_th = 3;
3071 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3072 	link_quality.sta_id = IWK_AP_ID;
3073 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3074 	    sizeof (link_quality), 1);
3075 	if (err != IWK_SUCCESS) {
3076 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3077 		    "failed to config link quality table\n");
3078 		return (err);
3079 	}
3080 
3081 	return (IWK_SUCCESS);
3082 }
3083 
3084 /*
3085  * Send a scan request(assembly scan cmd) to the firmware.
3086  */
3087 static int
3088 iwk_scan(iwk_sc_t *sc)
3089 {
3090 	ieee80211com_t *ic = &sc->sc_ic;
3091 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3092 	iwk_tx_desc_t *desc;
3093 	iwk_tx_data_t *data;
3094 	iwk_cmd_t *cmd;
3095 	iwk_scan_hdr_t *hdr;
3096 	iwk_scan_chan_t *chan;
3097 	struct ieee80211_frame *wh;
3098 	ieee80211_node_t *in = ic->ic_bss;
3099 	struct ieee80211_rateset *rs;
3100 	enum ieee80211_phymode mode;
3101 	uint8_t *frm;
3102 	int i, pktlen, nrates;
3103 
3104 	sc->sc_flags |= IWK_F_SCANNING;
3105 
3106 	data = &ring->data[ring->cur];
3107 	desc = data->desc;
3108 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3109 
3110 	cmd->hdr.type = REPLY_SCAN_CMD;
3111 	cmd->hdr.flags = 0;
3112 	cmd->hdr.qid = ring->qid;
3113 	cmd->hdr.idx = ring->cur | 0x40;
3114 
3115 	hdr = (iwk_scan_hdr_t *)cmd->data;
3116 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3117 	hdr->nchan = 11;
3118 	hdr->quiet_time = LE_16(5);
3119 	hdr->quiet_plcp_th = LE_16(1);
3120 
3121 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3122 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3123 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3124 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3125 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3126 
3127 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3128 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3129 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3130 	hdr->tx_cmd.tx_flags |= (0x200);
3131 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3132 	hdr->tx_cmd.rate.r.rate_n_flags |=
3133 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3134 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3135 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3136 
3137 	if (ic->ic_des_esslen)
3138 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3139 		    ic->ic_des_esslen);
3140 	else
3141 		bzero(hdr->direct_scan[0].ssid,
3142 		    sizeof (hdr->direct_scan[0].ssid));
3143 	/*
3144 	 * a probe request frame is required after the REPLY_SCAN_CMD
3145 	 */
3146 	wh = (struct ieee80211_frame *)(hdr + 1);
3147 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3148 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3149 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3150 	(void) memset(wh->i_addr1, 0xff, 6);
3151 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3152 	(void) memset(wh->i_addr3, 0xff, 6);
3153 	*(uint16_t *)&wh->i_dur[0] = 0;
3154 	*(uint16_t *)&wh->i_seq[0] = 0;
3155 
3156 	frm = (uint8_t *)(wh + 1);
3157 
3158 	/* essid IE */
3159 	*frm++ = IEEE80211_ELEMID_SSID;
3160 	*frm++ = in->in_esslen;
3161 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3162 	frm += in->in_esslen;
3163 
3164 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3165 	rs = &ic->ic_sup_rates[mode];
3166 
3167 	/* supported rates IE */
3168 	*frm++ = IEEE80211_ELEMID_RATES;
3169 	nrates = rs->ir_nrates;
3170 	if (nrates > IEEE80211_RATE_SIZE)
3171 		nrates = IEEE80211_RATE_SIZE;
3172 	*frm++ = (uint8_t)nrates;
3173 	(void) memcpy(frm, rs->ir_rates, nrates);
3174 	frm += nrates;
3175 
3176 	/* supported xrates IE */
3177 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3178 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3179 		*frm++ = IEEE80211_ELEMID_XRATES;
3180 		*frm++ = (uint8_t)nrates;
3181 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3182 		frm += nrates;
3183 	}
3184 
3185 	/* optionnal IE (usually for wpa) */
3186 	if (ic->ic_opt_ie != NULL) {
3187 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3188 		frm += ic->ic_opt_ie_len;
3189 	}
3190 
3191 	/* setup length of probe request */
3192 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3193 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3194 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3195 
3196 	/*
3197 	 * the attribute of the scan channels are required after the probe
3198 	 * request frame.
3199 	 */
3200 	chan = (iwk_scan_chan_t *)frm;
3201 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3202 		chan->type = 3;
3203 		chan->chan = (uint8_t)i;
3204 		chan->tpc.tx_gain = 0x3f;
3205 		chan->tpc.dsp_atten = 110;
3206 		chan->active_dwell = LE_16(20);
3207 		chan->passive_dwell = LE_16(120);
3208 
3209 		frm += sizeof (iwk_scan_chan_t);
3210 	}
3211 
3212 	pktlen = _PTRDIFF(frm, cmd);
3213 
3214 	(void) memset(desc, 0, sizeof (*desc));
3215 	desc->val0 = LE_32(1 << 24);
3216 	desc->pa[0].tb1_addr =
3217 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3218 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3219 
3220 	/*
3221 	 * maybe for cmd, filling the byte cnt table is not necessary.
3222 	 * anyway, we fill it here.
3223 	 */
3224 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3225 	    tfd_offset[ring->cur].val = 8;
3226 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3227 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3228 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3229 	}
3230 
3231 	/* kick cmd ring */
3232 	ring->cur = (ring->cur + 1) % ring->count;
3233 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3234 
3235 	return (IWK_SUCCESS);
3236 }
3237 
3238 static int
3239 iwk_config(iwk_sc_t *sc)
3240 {
3241 	ieee80211com_t *ic = &sc->sc_ic;
3242 	iwk_powertable_cmd_t powertable;
3243 	iwk_bt_cmd_t bt;
3244 	iwk_add_sta_t node;
3245 	iwk_link_quality_cmd_t link_quality;
3246 	int i, err;
3247 	uint16_t masks = 0;
3248 
3249 	/*
3250 	 * set power mode. Disable power management at present, do it later
3251 	 */
3252 	(void) memset(&powertable, 0, sizeof (powertable));
3253 	powertable.flags = LE_16(0x8);
3254 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3255 	    sizeof (powertable), 0);
3256 	if (err != IWK_SUCCESS) {
3257 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3258 		return (err);
3259 	}
3260 
3261 	/* configure bt coexistence */
3262 	(void) memset(&bt, 0, sizeof (bt));
3263 	bt.flags = 3;
3264 	bt.lead_time = 0xaa;
3265 	bt.max_kill = 1;
3266 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3267 	    sizeof (bt), 0);
3268 	if (err != IWK_SUCCESS) {
3269 		cmn_err(CE_WARN,
3270 		    "iwk_config(): "
3271 		    "failed to configurate bt coexistence\n");
3272 		return (err);
3273 	}
3274 
3275 	/* configure rxon */
3276 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3277 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3278 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3279 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3280 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3281 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3282 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3283 	switch (ic->ic_opmode) {
3284 	case IEEE80211_M_STA:
3285 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3286 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3287 		    RXON_FILTER_DIS_DECRYPT_MSK |
3288 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3289 		break;
3290 	case IEEE80211_M_AHDEMO:
3291 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3292 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3293 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3294 		    RXON_FILTER_DIS_DECRYPT_MSK |
3295 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3296 		break;
3297 	case IEEE80211_M_HOSTAP:
3298 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3299 		break;
3300 	case IEEE80211_M_MONITOR:
3301 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3302 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3303 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3304 		break;
3305 	}
3306 	sc->sc_config.cck_basic_rates  = 0x0f;
3307 	sc->sc_config.ofdm_basic_rates = 0xff;
3308 
3309 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3310 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3311 
3312 	/* set antenna */
3313 
3314 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3315 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3316 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3317 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3318 
3319 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3320 	    sizeof (iwk_rxon_cmd_t), 0);
3321 	if (err != IWK_SUCCESS) {
3322 		cmn_err(CE_WARN, "iwk_config(): "
3323 		    "failed to set configure command\n");
3324 		return (err);
3325 	}
3326 	/* obtain current temperature of chipset */
3327 	sc->sc_tempera = iwk_curr_tempera(sc);
3328 
3329 	/* make Tx power calibration to determine the gains of DSP and radio */
3330 	err = iwk_tx_power_calibration(sc);
3331 	if (err) {
3332 		cmn_err(CE_WARN, "iwk_config(): "
3333 		    "failed to set tx power table\n");
3334 		return (err);
3335 	}
3336 
3337 	/* add broadcast node so that we can send broadcast frame */
3338 	(void) memset(&node, 0, sizeof (node));
3339 	(void) memset(node.bssid, 0xff, 6);
3340 	node.id = IWK_BROADCAST_ID;
3341 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3342 	if (err != IWK_SUCCESS) {
3343 		cmn_err(CE_WARN, "iwk_config(): "
3344 		    "failed to add broadcast node\n");
3345 		return (err);
3346 	}
3347 
3348 	/* TX_LINK_QUALITY cmd ? */
3349 	(void) memset(&link_quality, 0, sizeof (link_quality));
3350 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3351 		masks |= RATE_MCS_CCK_MSK;
3352 		masks |= RATE_MCS_ANT_B_MSK;
3353 		masks &= ~RATE_MCS_ANT_A_MSK;
3354 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3355 	}
3356 
3357 	link_quality.general_params.single_stream_ant_msk = 2;
3358 	link_quality.general_params.dual_stream_ant_msk = 3;
3359 	link_quality.agg_params.agg_dis_start_th = 3;
3360 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3361 	link_quality.sta_id = IWK_BROADCAST_ID;
3362 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3363 	    sizeof (link_quality), 0);
3364 	if (err != IWK_SUCCESS) {
3365 		cmn_err(CE_WARN, "iwk_config(): "
3366 		    "failed to config link quality table\n");
3367 		return (err);
3368 	}
3369 
3370 	return (IWK_SUCCESS);
3371 }
3372 
3373 static void
3374 iwk_stop_master(iwk_sc_t *sc)
3375 {
3376 	uint32_t tmp;
3377 	int n;
3378 
3379 	tmp = IWK_READ(sc, CSR_RESET);
3380 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3381 
3382 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3383 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3384 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3385 		return;
3386 
3387 	for (n = 0; n < 2000; n++) {
3388 		if (IWK_READ(sc, CSR_RESET) &
3389 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3390 			break;
3391 		DELAY(1000);
3392 	}
3393 	if (n == 2000)
3394 		IWK_DBG((IWK_DEBUG_HW,
3395 		    "timeout waiting for master stop\n"));
3396 }
3397 
3398 static int
3399 iwk_power_up(iwk_sc_t *sc)
3400 {
3401 	uint32_t tmp;
3402 
3403 	iwk_mac_access_enter(sc);
3404 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3405 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3406 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3407 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3408 	iwk_mac_access_exit(sc);
3409 
3410 	DELAY(5000);
3411 	return (IWK_SUCCESS);
3412 }
3413 
3414 static int
3415 iwk_preinit(iwk_sc_t *sc)
3416 {
3417 	uint32_t tmp;
3418 	int n;
3419 	uint8_t vlink;
3420 
3421 	/* clear any pending interrupts */
3422 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3423 
3424 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3425 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3426 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3427 
3428 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3429 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3430 
3431 	/* wait for clock ready */
3432 	for (n = 0; n < 1000; n++) {
3433 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3434 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3435 			break;
3436 		DELAY(10);
3437 	}
3438 	if (n == 1000) {
3439 		return (ETIMEDOUT);
3440 	}
3441 	iwk_mac_access_enter(sc);
3442 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3443 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3444 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3445 
3446 	DELAY(20);
3447 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3448 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3449 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3450 	iwk_mac_access_exit(sc);
3451 
3452 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3453 
3454 	(void) iwk_power_up(sc);
3455 
3456 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3457 		tmp = ddi_get32(sc->sc_cfg_handle,
3458 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3459 		ddi_put32(sc->sc_cfg_handle,
3460 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3461 		    tmp & ~(1 << 11));
3462 	}
3463 
3464 
3465 	vlink = ddi_get8(sc->sc_cfg_handle,
3466 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3467 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3468 	    vlink & ~2);
3469 
3470 	tmp = IWK_READ(sc, CSR_SW_VER);
3471 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3472 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3473 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3474 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3475 
3476 	/* make sure power supply on each part of the hardware */
3477 	iwk_mac_access_enter(sc);
3478 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3479 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3480 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3481 	DELAY(5);
3482 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3483 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3484 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3485 	iwk_mac_access_exit(sc);
3486 	return (IWK_SUCCESS);
3487 }
3488 
3489 /*
3490  * set up semphore flag to own EEPROM
3491  */
3492 static int iwk_eep_sem_down(iwk_sc_t *sc)
3493 {
3494 	int count1, count2;
3495 	uint32_t tmp;
3496 
3497 	for (count1 = 0; count1 < 1000; count1++) {
3498 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3499 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3500 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3501 
3502 		for (count2 = 0; count2 < 2; count2++) {
3503 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3504 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3505 				return (IWK_SUCCESS);
3506 			DELAY(10000);
3507 		}
3508 	}
3509 	return (IWK_FAIL);
3510 }
3511 
3512 /*
3513  * reset semphore flag to release EEPROM
3514  */
3515 static void iwk_eep_sem_up(iwk_sc_t *sc)
3516 {
3517 	uint32_t tmp;
3518 
3519 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3520 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3521 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3522 }
3523 
3524 /*
3525  * This function load all infomation in eeprom into iwk_eep
3526  * structure in iwk_sc_t structure
3527  */
3528 static int iwk_eep_load(iwk_sc_t *sc)
3529 {
3530 	int i, rr;
3531 	uint32_t rv, tmp, eep_gp;
3532 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3533 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3534 
3535 	/* read eeprom gp register in CSR */
3536 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3537 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3538 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3539 		IWK_DBG((IWK_DEBUG_EEPROM, "not find eeprom\n"));
3540 		return (IWK_FAIL);
3541 	}
3542 
3543 	rr = iwk_eep_sem_down(sc);
3544 	if (rr != 0) {
3545 		IWK_DBG((IWK_DEBUG_EEPROM, "driver failed to own EEPROM\n"));
3546 		return (IWK_FAIL);
3547 	}
3548 
3549 	for (addr = 0; addr < eep_sz; addr += 2) {
3550 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3551 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3552 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3553 
3554 		for (i = 0; i < 10; i++) {
3555 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3556 			if (rv & 1)
3557 				break;
3558 			DELAY(10);
3559 		}
3560 
3561 		if (!(rv & 1)) {
3562 			IWK_DBG((IWK_DEBUG_EEPROM,
3563 			    "time out when read eeprome\n"));
3564 			iwk_eep_sem_up(sc);
3565 			return (IWK_FAIL);
3566 		}
3567 
3568 		eep_p[addr/2] = rv >> 16;
3569 	}
3570 
3571 	iwk_eep_sem_up(sc);
3572 	return (IWK_SUCCESS);
3573 }
3574 
3575 /*
3576  * init mac address in ieee80211com_t struct
3577  */
3578 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3579 {
3580 	ieee80211com_t *ic = &sc->sc_ic;
3581 	struct iwk_eep *ep = &sc->sc_eep_map;
3582 
3583 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3584 
3585 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3586 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3587 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3588 }
3589 
3590 static int
3591 iwk_init(iwk_sc_t *sc)
3592 {
3593 	int qid, n, err;
3594 	clock_t clk;
3595 	uint32_t tmp;
3596 
3597 	mutex_enter(&sc->sc_glock);
3598 	sc->sc_flags &= ~IWK_F_FW_INIT;
3599 
3600 	(void) iwk_preinit(sc);
3601 
3602 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3603 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3604 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3605 		goto fail1;
3606 	}
3607 
3608 	/* init Rx ring */
3609 	iwk_mac_access_enter(sc);
3610 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3611 
3612 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3613 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3614 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3615 
3616 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3617 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3618 	    offsetof(struct iwk_shared, val0)) >> 4));
3619 
3620 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3621 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3622 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3623 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3624 	    (RX_QUEUE_SIZE_LOG <<
3625 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3626 	iwk_mac_access_exit(sc);
3627 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3628 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3629 
3630 	/* init Tx rings */
3631 	iwk_mac_access_enter(sc);
3632 	iwk_reg_write(sc, SCD_TXFACT, 0);
3633 
3634 	/* keep warm page */
3635 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3636 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3637 
3638 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3639 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3640 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3641 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3642 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3643 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3644 	}
3645 	iwk_mac_access_exit(sc);
3646 
3647 	/* clear "radio off" and "disable command" bits */
3648 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3649 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3650 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3651 
3652 	/* clear any pending interrupts */
3653 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3654 
3655 	/* enable interrupts */
3656 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3657 
3658 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3659 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3660 
3661 	/*
3662 	 * backup ucode data part for future use.
3663 	 */
3664 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3665 	    sc->sc_dma_fw_data.mem_va,
3666 	    sc->sc_dma_fw_data.alength);
3667 
3668 	for (n = 0; n < 2; n++) {
3669 		/* load firmware init segment into NIC */
3670 		err = iwk_load_firmware(sc);
3671 		if (err != IWK_SUCCESS) {
3672 			cmn_err(CE_WARN, "iwk_init(): "
3673 			    "failed to setup boot firmware\n");
3674 			continue;
3675 		}
3676 
3677 		/* now press "execute" start running */
3678 		IWK_WRITE(sc, CSR_RESET, 0);
3679 		break;
3680 	}
3681 	if (n == 2) {
3682 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
3683 		goto fail1;
3684 	}
3685 	/* ..and wait at most one second for adapter to initialize */
3686 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3687 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3688 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3689 			break;
3690 	}
3691 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3692 		cmn_err(CE_WARN,
3693 		    "iwk_init(): timeout waiting for firmware init\n");
3694 		goto fail1;
3695 	}
3696 
3697 	/*
3698 	 * at this point, the firmware is loaded OK, then config the hardware
3699 	 * with the ucode API, including rxon, txpower, etc.
3700 	 */
3701 	err = iwk_config(sc);
3702 	if (err) {
3703 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3704 		goto fail1;
3705 	}
3706 
3707 	/* at this point, hardware may receive beacons :) */
3708 	mutex_exit(&sc->sc_glock);
3709 	return (IWK_SUCCESS);
3710 
3711 fail1:
3712 	err = IWK_FAIL;
3713 	mutex_exit(&sc->sc_glock);
3714 	return (err);
3715 }
3716 
3717 static void
3718 iwk_stop(iwk_sc_t *sc)
3719 {
3720 	uint32_t tmp;
3721 	int i;
3722 
3723 
3724 	mutex_enter(&sc->sc_glock);
3725 
3726 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3727 	/* disable interrupts */
3728 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3729 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3730 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3731 
3732 	/* reset all Tx rings */
3733 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3734 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3735 
3736 	/* reset Rx ring */
3737 	iwk_reset_rx_ring(sc);
3738 
3739 	iwk_mac_access_enter(sc);
3740 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3741 	iwk_mac_access_exit(sc);
3742 
3743 	DELAY(5);
3744 
3745 	iwk_stop_master(sc);
3746 
3747 	sc->sc_tx_timer = 0;
3748 	tmp = IWK_READ(sc, CSR_RESET);
3749 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3750 	mutex_exit(&sc->sc_glock);
3751 }
3752 
3753 /*
3754  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3755  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3756  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3757  * INRIA Sophia - Projet Planete
3758  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3759  */
3760 #define	is_success(amrr)	\
3761 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3762 #define	is_failure(amrr)	\
3763 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3764 #define	is_enough(amrr)		\
3765 	((amrr)->txcnt > 100)
3766 #define	is_min_rate(in)		\
3767 	((in)->in_txrate == 0)
3768 #define	is_max_rate(in)		\
3769 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3770 #define	increase_rate(in)	\
3771 	((in)->in_txrate++)
3772 #define	decrease_rate(in)	\
3773 	((in)->in_txrate--)
3774 #define	reset_cnt(amrr)		\
3775 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3776 
3777 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3778 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
3779 
3780 static void
3781 iwk_amrr_init(iwk_amrr_t *amrr)
3782 {
3783 	amrr->success = 0;
3784 	amrr->recovery = 0;
3785 	amrr->txcnt = amrr->retrycnt = 0;
3786 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3787 }
3788 
3789 static void
3790 iwk_amrr_timeout(iwk_sc_t *sc)
3791 {
3792 	ieee80211com_t *ic = &sc->sc_ic;
3793 
3794 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
3795 	if (ic->ic_opmode == IEEE80211_M_STA)
3796 		iwk_amrr_ratectl(NULL, ic->ic_bss);
3797 	else
3798 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
3799 	sc->sc_clk = ddi_get_lbolt();
3800 }
3801 
3802 /* ARGSUSED */
3803 static void
3804 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
3805 {
3806 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
3807 	int need_change = 0;
3808 
3809 	if (is_success(amrr) && is_enough(amrr)) {
3810 		amrr->success++;
3811 		if (amrr->success >= amrr->success_threshold &&
3812 		    !is_max_rate(in)) {
3813 			amrr->recovery = 1;
3814 			amrr->success = 0;
3815 			increase_rate(in);
3816 			IWK_DBG((IWK_DEBUG_RATECTL,
3817 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
3818 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3819 			need_change = 1;
3820 		} else {
3821 			amrr->recovery = 0;
3822 		}
3823 	} else if (is_failure(amrr)) {
3824 		amrr->success = 0;
3825 		if (!is_min_rate(in)) {
3826 			if (amrr->recovery) {
3827 				amrr->success_threshold++;
3828 				if (amrr->success_threshold >
3829 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
3830 					amrr->success_threshold =
3831 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
3832 			} else {
3833 				amrr->success_threshold =
3834 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3835 			}
3836 			decrease_rate(in);
3837 			IWK_DBG((IWK_DEBUG_RATECTL,
3838 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
3839 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3840 			need_change = 1;
3841 		}
3842 		amrr->recovery = 0;	/* paper is incorrect */
3843 	}
3844 
3845 	if (is_enough(amrr) || need_change)
3846 		reset_cnt(amrr);
3847 }
3848 
3849 /*
3850  * calculate 4965 chipset's kelvin temperature according to
3851  * the data of init alive and satistics notification.
3852  * The details is described in iwk_calibration.h file
3853  */
3854 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
3855 {
3856 	int32_t  tempera;
3857 	int32_t  r1, r2, r3;
3858 	uint32_t  r4_u;
3859 	int32_t   r4_s;
3860 
3861 	if (iwk_is_fat_channel(sc)) {
3862 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
3863 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
3864 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
3865 		r4_u = sc->sc_card_alive_init.therm_r4[1];
3866 	} else {
3867 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
3868 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
3869 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
3870 		r4_u = sc->sc_card_alive_init.therm_r4[0];
3871 	}
3872 
3873 	if (sc->sc_flags & IWK_F_STATISTICS) {
3874 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
3875 		    (31-23)) >> (31-23);
3876 	} else {
3877 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
3878 	}
3879 
3880 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
3881 	    r1, r2, r3, r4_s));
3882 
3883 	if (r3 == r1) {
3884 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
3885 		    "failed to calculate temperature"
3886 		    "because r3 = r1\n");
3887 		return (DDI_FAILURE);
3888 	}
3889 
3890 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
3891 	tempera /= (r3 - r1);
3892 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
3893 
3894 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
3895 	    tempera, KELVIN_TO_CELSIUS(tempera)));
3896 
3897 	return (tempera);
3898 }
3899 
3900 /* Determine whether 4965 is using 2.4 GHz band */
3901 static inline int iwk_is_24G_band(iwk_sc_t *sc)
3902 {
3903 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
3904 }
3905 
3906 /* Determine whether 4965 is using fat channel */
3907 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
3908 {
3909 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
3910 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
3911 }
3912 
3913 /*
3914  * In MIMO mode, determine which group 4965's current channel belong to.
3915  * For more infomation about "channel group",
3916  * please refer to iwk_calibration.h file
3917  */
3918 static int iwk_txpower_grp(uint16_t channel)
3919 {
3920 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
3921 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
3922 		return (CALIB_CH_GROUP_5);
3923 	}
3924 
3925 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
3926 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
3927 		return (CALIB_CH_GROUP_1);
3928 	}
3929 
3930 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
3931 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
3932 		return (CALIB_CH_GROUP_2);
3933 	}
3934 
3935 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
3936 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
3937 		return (CALIB_CH_GROUP_3);
3938 	}
3939 
3940 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
3941 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
3942 		return (CALIB_CH_GROUP_4);
3943 	}
3944 
3945 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
3946 	    "can't find txpower group for channel %d.\n", channel);
3947 
3948 	return (DDI_FAILURE);
3949 }
3950 
3951 /* 2.4 GHz */
3952 static uint16_t iwk_eep_band_1[14] = {
3953 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
3954 };
3955 
3956 /* 5.2 GHz bands */
3957 static uint16_t iwk_eep_band_2[13] = {
3958 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
3959 };
3960 
3961 static uint16_t iwk_eep_band_3[12] = {
3962 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
3963 };
3964 
3965 static uint16_t iwk_eep_band_4[11] = {
3966 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
3967 };
3968 
3969 static uint16_t iwk_eep_band_5[6] = {
3970 	145, 149, 153, 157, 161, 165
3971 };
3972 
3973 static uint16_t iwk_eep_band_6[7] = {
3974 	1, 2, 3, 4, 5, 6, 7
3975 };
3976 
3977 static uint16_t iwk_eep_band_7[11] = {
3978 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
3979 };
3980 
3981 /* Get regulatory data from eeprom for a given channel */
3982 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
3983     uint16_t channel,
3984     int is_24G, int is_fat, int is_hi_chan)
3985 {
3986 	int32_t i;
3987 	uint16_t chan;
3988 
3989 	if (is_fat) {  /* 11n mode */
3990 
3991 		if (is_hi_chan) {
3992 			chan = channel - 4;
3993 		} else {
3994 			chan = channel;
3995 		}
3996 
3997 		for (i = 0; i < 7; i++) {
3998 			if (iwk_eep_band_6[i] == chan) {
3999 				return (&sc->sc_eep_map.band_24_channels[i]);
4000 			}
4001 		}
4002 		for (i = 0; i < 11; i++) {
4003 			if (iwk_eep_band_7[i] == chan) {
4004 				return (&sc->sc_eep_map.band_52_channels[i]);
4005 			}
4006 		}
4007 	} else if (is_24G) {  /* 2.4 GHz band */
4008 		for (i = 0; i < 14; i++) {
4009 			if (iwk_eep_band_1[i] == channel) {
4010 				return (&sc->sc_eep_map.band_1_channels[i]);
4011 			}
4012 		}
4013 	} else {  /* 5 GHz band */
4014 		for (i = 0; i < 13; i++) {
4015 			if (iwk_eep_band_2[i] == channel) {
4016 				return (&sc->sc_eep_map.band_2_channels[i]);
4017 			}
4018 		}
4019 		for (i = 0; i < 12; i++) {
4020 			if (iwk_eep_band_3[i] == channel) {
4021 				return (&sc->sc_eep_map.band_3_channels[i]);
4022 			}
4023 		}
4024 		for (i = 0; i < 11; i++) {
4025 			if (iwk_eep_band_4[i] == channel) {
4026 				return (&sc->sc_eep_map.band_4_channels[i]);
4027 			}
4028 		}
4029 		for (i = 0; i < 6; i++) {
4030 			if (iwk_eep_band_5[i] == channel) {
4031 				return (&sc->sc_eep_map.band_5_channels[i]);
4032 			}
4033 		}
4034 	}
4035 
4036 	return (NULL);
4037 }
4038 
4039 /*
4040  * Determine which subband a given channel belongs
4041  * to in 2.4 GHz or 5 GHz band
4042  */
4043 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4044 {
4045 	int32_t b_n = -1;
4046 
4047 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4048 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4049 			continue;
4050 		}
4051 
4052 		if ((channel >=
4053 		    (uint16_t)sc->sc_eep_map.calib_info.
4054 		    band_info_tbl[b_n].ch_from) &&
4055 		    (channel <=
4056 		    (uint16_t)sc->sc_eep_map.calib_info.
4057 		    band_info_tbl[b_n].ch_to)) {
4058 			break;
4059 		}
4060 	}
4061 
4062 	return (b_n);
4063 }
4064 
4065 /* Make a special division for interpolation operation */
4066 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4067 {
4068 	int32_t sign = 1;
4069 
4070 	if (num < 0) {
4071 		sign = -sign;
4072 		num = -num;
4073 	}
4074 
4075 	if (denom < 0) {
4076 		sign = -sign;
4077 		denom = -denom;
4078 	}
4079 
4080 	*res = ((num*2 + denom) / (denom*2)) * sign;
4081 
4082 	return (IWK_SUCCESS);
4083 }
4084 
4085 /* Make interpolation operation */
4086 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4087     int32_t x2, int32_t y2)
4088 {
4089 	int32_t val;
4090 
4091 	if (x2 == x1) {
4092 		return (y1);
4093 	} else {
4094 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4095 		return (val + y2);
4096 	}
4097 }
4098 
4099 /* Get interpolation measurement data of a given channel for all chains. */
4100 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4101     struct iwk_eep_calib_channel_info *chan_info)
4102 {
4103 	int32_t ban_n;
4104 	uint32_t ch1_n, ch2_n;
4105 	int32_t c, m;
4106 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4107 
4108 	/* determine subband number */
4109 	ban_n = iwk_band_number(sc, channel);
4110 	if (ban_n >= EEP_TX_POWER_BANDS) {
4111 		return (DDI_FAILURE);
4112 	}
4113 
4114 	ch1_n =
4115 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4116 	ch2_n =
4117 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4118 
4119 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4120 
4121 	/*
4122 	 * go through all chains on chipset
4123 	 */
4124 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4125 		/*
4126 		 * go through all factory measurements
4127 		 */
4128 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4129 			m1_p =
4130 			    &(sc->sc_eep_map.calib_info.
4131 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4132 			m2_p =
4133 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4134 			    ch2.measure[c][m]);
4135 			m_p = &(chan_info->measure[c][m]);
4136 
4137 			/*
4138 			 * make interpolation to get actual
4139 			 * Tx power for given channel
4140 			 */
4141 			m_p->actual_pow = iwk_interpolate_value(channel,
4142 			    ch1_n, m1_p->actual_pow,
4143 			    ch2_n, m2_p->actual_pow);
4144 
4145 			/* make interpolation to get index into gain table */
4146 			m_p->gain_idx = iwk_interpolate_value(channel,
4147 			    ch1_n, m1_p->gain_idx,
4148 			    ch2_n, m2_p->gain_idx);
4149 
4150 			/* make interpolation to get chipset temperature */
4151 			m_p->temperature = iwk_interpolate_value(channel,
4152 			    ch1_n, m1_p->temperature,
4153 			    ch2_n, m2_p->temperature);
4154 
4155 			/*
4156 			 * make interpolation to get power
4157 			 * amp detector level
4158 			 */
4159 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4160 			    m1_p->pa_det,
4161 			    ch2_n, m2_p->pa_det);
4162 		}
4163 	}
4164 
4165 	return (IWK_SUCCESS);
4166 }
4167 
4168 /*
4169  * Calculate voltage compensation for Tx power. For more infomation,
4170  * please refer to iwk_calibration.h file
4171  */
4172 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4173     int32_t curr_voltage)
4174 {
4175 	int32_t vol_comp = 0;
4176 
4177 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4178 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4179 		return (vol_comp);
4180 	}
4181 
4182 	(void) iwk_division(curr_voltage-eep_voltage,
4183 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4184 
4185 	if (curr_voltage > eep_voltage) {
4186 		vol_comp *= 2;
4187 	}
4188 	if ((vol_comp < -2) || (vol_comp > 2)) {
4189 		vol_comp = 0;
4190 	}
4191 
4192 	return (vol_comp);
4193 }
4194 
4195 /*
4196  * Thermal compensation values for txpower for various frequency ranges ...
4197  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4198  */
4199 static struct iwk_txpower_tempera_comp {
4200 	int32_t degrees_per_05db_a;
4201 	int32_t degrees_per_05db_a_denom;
4202 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4203 	{9, 2},			/* group 0 5.2, ch  34-43 */
4204 	{4, 1},			/* group 1 5.2, ch  44-70 */
4205 	{4, 1},			/* group 2 5.2, ch  71-124 */
4206 	{4, 1},			/* group 3 5.2, ch 125-200 */
4207 	{3, 1}			/* group 4 2.4, ch   all */
4208 };
4209 
4210 /*
4211  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4212  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4213  */
4214 static int32_t back_off_table[] = {
4215 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4216 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4217 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4218 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4219 	10			/* CCK */
4220 };
4221 
4222 /* determine minimum Tx power index in gain table */
4223 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4224 {
4225 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4226 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4227 	}
4228 
4229 	return (MIN_TX_GAIN_INDEX);
4230 }
4231 
4232 /*
4233  * Determine DSP and radio gain according to temperature and other factors.
4234  * This function is the majority of Tx power calibration
4235  */
4236 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4237     struct iwk_tx_power_db *tp_db)
4238 {
4239 	int is_24G, is_fat, is_high_chan, is_mimo;
4240 	int c, r;
4241 	int32_t target_power;
4242 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4243 	uint16_t channel;
4244 	uint8_t saturation_power;
4245 	int32_t regu_power;
4246 	int32_t curr_regu_power;
4247 	struct iwk_eep_channel *eep_chan_p;
4248 	struct iwk_eep_calib_channel_info eep_chan_calib;
4249 	int32_t eep_voltage, init_voltage;
4250 	int32_t voltage_compensation;
4251 	int32_t temperature;
4252 	int32_t degrees_per_05db_num;
4253 	int32_t degrees_per_05db_denom;
4254 	struct iwk_eep_calib_measure *measure_p;
4255 	int32_t interpo_temp;
4256 	int32_t power_limit;
4257 	int32_t atten_value;
4258 	int32_t tempera_comp[2];
4259 	int32_t interpo_gain_idx[2];
4260 	int32_t interpo_actual_pow[2];
4261 	union iwk_tx_power_dual_stream txpower_gains;
4262 	int32_t txpower_gains_idx;
4263 
4264 	channel = sc->sc_config.chan;
4265 
4266 	/* 2.4 GHz or 5 GHz band */
4267 	is_24G = iwk_is_24G_band(sc);
4268 
4269 	/* fat channel or not */
4270 	is_fat = iwk_is_fat_channel(sc);
4271 
4272 	/*
4273 	 * using low half channel number or high half channel number
4274 	 * identify fat channel
4275 	 */
4276 	if (is_fat && (sc->sc_config.flags &
4277 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4278 		is_high_chan = 1;
4279 	}
4280 
4281 	if ((channel > 0) && (channel < 200)) {
4282 		/* get regulatory channel data from eeprom */
4283 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4284 		    is_fat, is_high_chan);
4285 		if (NULL == eep_chan_p) {
4286 			cmn_err(CE_WARN,
4287 			    "iwk_txpower_table_cmd_init(): "
4288 			    "can't get channel infomation\n");
4289 			return (DDI_FAILURE);
4290 		}
4291 	} else {
4292 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4293 		    "channel(%d) isn't in proper range\n",
4294 		    channel);
4295 		return (DDI_FAILURE);
4296 	}
4297 
4298 	/* initial value of Tx power */
4299 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4300 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4301 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4302 		    "user TX power is too weak\n");
4303 		return (DDI_FAILURE);
4304 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4305 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4306 		    "user TX power is too strong\n");
4307 		return (DDI_FAILURE);
4308 	}
4309 
4310 	target_power = 2 * sc->sc_user_txpower;
4311 
4312 	/* determine which group current channel belongs to */
4313 	tx_grp = iwk_txpower_grp(channel);
4314 	if (tx_grp < 0) {
4315 		return (tx_grp);
4316 	}
4317 
4318 
4319 	if (is_fat) {
4320 		if (is_high_chan) {
4321 			channel -= 2;
4322 		} else {
4323 			channel += 2;
4324 		}
4325 	}
4326 
4327 	/* determine saturation power */
4328 	if (is_24G) {
4329 		saturation_power =
4330 		    sc->sc_eep_map.calib_info.saturation_power24;
4331 	} else {
4332 		saturation_power =
4333 		    sc->sc_eep_map.calib_info.saturation_power52;
4334 	}
4335 
4336 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4337 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4338 		if (is_24G) {
4339 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4340 		} else {
4341 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4342 		}
4343 	}
4344 
4345 	/* determine regulatory power */
4346 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4347 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4348 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4349 		if (is_24G) {
4350 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4351 		} else {
4352 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4353 		}
4354 	}
4355 
4356 	/*
4357 	 * get measurement data for current channel
4358 	 * suach as temperature,index to gain table,actual Tx power
4359 	 */
4360 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4361 
4362 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4363 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4364 
4365 	/* calculate voltage compensation to Tx power */
4366 	voltage_compensation =
4367 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4368 
4369 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4370 		temperature = sc->sc_tempera;
4371 	} else {
4372 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4373 	}
4374 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4375 		temperature = sc->sc_tempera;
4376 	} else {
4377 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4378 	}
4379 	temperature = KELVIN_TO_CELSIUS(temperature);
4380 
4381 	degrees_per_05db_num =
4382 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4383 	degrees_per_05db_denom =
4384 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4385 
4386 	for (c = 0; c < 2; c++) {  /* go through all chains */
4387 		measure_p = &eep_chan_calib.measure[c][1];
4388 		interpo_temp = measure_p->temperature;
4389 
4390 		/* determine temperature compensation to Tx power */
4391 		(void) iwk_division(
4392 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4393 		    degrees_per_05db_num, &tempera_comp[c]);
4394 
4395 		interpo_gain_idx[c] = measure_p->gain_idx;
4396 		interpo_actual_pow[c] = measure_p->actual_pow;
4397 	}
4398 
4399 	/*
4400 	 * go through all rate entries in Tx power table
4401 	 */
4402 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4403 		if (r & 0x8) {
4404 			/* need to lower regulatory power for MIMO mode */
4405 			curr_regu_power = regu_power -
4406 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4407 			is_mimo = 1;
4408 		} else {
4409 			curr_regu_power = regu_power;
4410 			is_mimo = 0;
4411 		}
4412 
4413 		power_limit = saturation_power - back_off_table[r];
4414 		if (power_limit > curr_regu_power) {
4415 			/* final Tx power limit */
4416 			power_limit = curr_regu_power;
4417 		}
4418 
4419 		if (target_power > power_limit) {
4420 			target_power = power_limit; /* final target Tx power */
4421 		}
4422 
4423 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4424 			if (is_mimo) {
4425 				atten_value =
4426 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4427 			} else {
4428 				atten_value = 0;
4429 			}
4430 
4431 			/*
4432 			 * calculate index in gain table
4433 			 * this step is very important
4434 			 */
4435 			txpower_gains_idx = interpo_gain_idx[c] -
4436 			    (target_power - interpo_actual_pow[c]) -
4437 			    tempera_comp[c] - voltage_compensation +
4438 			    atten_value;
4439 
4440 			if (txpower_gains_idx <
4441 			    iwk_min_power_index(r, is_24G)) {
4442 				txpower_gains_idx =
4443 				    iwk_min_power_index(r, is_24G);
4444 			}
4445 
4446 			if (!is_24G) {
4447 				/*
4448 				 * support negative index for 5 GHz
4449 				 * band
4450 				 */
4451 				txpower_gains_idx += 9;
4452 			}
4453 
4454 			if (POWER_TABLE_CCK_ENTRY == r) {
4455 				/* for CCK mode, make necessary attenuaton */
4456 				txpower_gains_idx +=
4457 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4458 			}
4459 
4460 			if (txpower_gains_idx > 107) {
4461 				txpower_gains_idx = 107;
4462 			} else if (txpower_gains_idx < 0) {
4463 				txpower_gains_idx = 0;
4464 			}
4465 
4466 			/* search DSP and radio gains in gain table */
4467 			txpower_gains.s.radio_tx_gain[c] =
4468 			    gains_table[is_24G][txpower_gains_idx].radio;
4469 			txpower_gains.s.dsp_predis_atten[c] =
4470 			    gains_table[is_24G][txpower_gains_idx].dsp;
4471 
4472 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4473 			    "rate_index: %d, "
4474 			    "gain_index %d, c: %d,is_mimo: %d\n",
4475 			    r, txpower_gains_idx, c, is_mimo));
4476 		}
4477 
4478 		/* initialize Tx power table */
4479 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4480 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4481 		} else {
4482 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4483 		}
4484 	}
4485 
4486 	return (IWK_SUCCESS);
4487 }
4488 
4489 /*
4490  * make Tx power calibration to adjust Tx power.
4491  * This is completed by sending out Tx power table command.
4492  */
4493 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4494 {
4495 	iwk_tx_power_table_cmd_t cmd;
4496 	int rv;
4497 
4498 	if (sc->sc_flags & IWK_F_SCANNING) {
4499 		return (IWK_SUCCESS);
4500 	}
4501 
4502 	/* necessary initialization to Tx power table command */
4503 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
4504 	cmd.channel = sc->sc_config.chan;
4505 	cmd.channel_normal_width = 0;
4506 
4507 	/* initialize Tx power table */
4508 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
4509 	if (rv) {
4510 		cmn_err(CE_NOTE, "rv= %d\n", rv);
4511 		return (rv);
4512 	}
4513 
4514 	/* send out Tx power table command */
4515 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
4516 	if (rv) {
4517 		return (rv);
4518 	}
4519 
4520 	/* record current temperature */
4521 	sc->sc_last_tempera = sc->sc_tempera;
4522 
4523 	return (IWK_SUCCESS);
4524 }
4525 
4526 /* This function is the handler of statistics notification from uCode */
4527 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
4528 {
4529 	int is_diff;
4530 	struct iwk_notif_statistics *statistics_p =
4531 	    (struct iwk_notif_statistics *)(desc + 1);
4532 
4533 	mutex_enter(&sc->sc_glock);
4534 
4535 	is_diff = (sc->sc_statistics.general.temperature !=
4536 	    statistics_p->general.temperature) ||
4537 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
4538 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
4539 
4540 	/* update statistics data */
4541 	(void) memcpy(&sc->sc_statistics, statistics_p,
4542 	    sizeof (struct iwk_notif_statistics));
4543 
4544 	sc->sc_flags |= IWK_F_STATISTICS;
4545 
4546 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
4547 		/* make Receiver gain balance calibration */
4548 		(void) iwk_rxgain_diff(sc);
4549 
4550 		/* make Receiver sensitivity calibration */
4551 		(void) iwk_rx_sens(sc);
4552 	}
4553 
4554 
4555 	if (!is_diff) {
4556 		mutex_exit(&sc->sc_glock);
4557 		return;
4558 	}
4559 
4560 	/* calibration current temperature of 4965 chipset */
4561 	sc->sc_tempera = iwk_curr_tempera(sc);
4562 
4563 	/* distinct temperature change will trigger Tx power calibration */
4564 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
4565 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
4566 		/* make Tx power calibration */
4567 		(void) iwk_tx_power_calibration(sc);
4568 	}
4569 
4570 	mutex_exit(&sc->sc_glock);
4571 }
4572 
4573 /* Determine this station is in associated state or not */
4574 static int iwk_is_associated(iwk_sc_t *sc)
4575 {
4576 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
4577 }
4578 
4579 /* Make necessary preparation for Receiver gain balance calibration */
4580 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
4581 {
4582 	int i, rv;
4583 	struct iwk_calibration_cmd cmd;
4584 	struct iwk_rx_gain_diff *gain_diff_p;
4585 
4586 	gain_diff_p = &sc->sc_rxgain_diff;
4587 
4588 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
4589 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
4590 
4591 	for (i = 0; i < RX_CHAINS_NUM; i++) {
4592 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
4593 	}
4594 
4595 	if (iwk_is_associated(sc)) {
4596 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4597 		cmd.diff_gain_a = 0;
4598 		cmd.diff_gain_b = 0;
4599 		cmd.diff_gain_c = 0;
4600 
4601 		/* assume the gains of every Rx chains is balanceable */
4602 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
4603 		    sizeof (cmd), 1);
4604 		if (rv) {
4605 			return (rv);
4606 		}
4607 
4608 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
4609 	}
4610 
4611 	return (IWK_SUCCESS);
4612 }
4613 
4614 /*
4615  * make Receiver gain balance to balance Rx gain between Rx chains
4616  * and determine which chain is disconnected
4617  */
4618 static int iwk_rxgain_diff(iwk_sc_t *sc)
4619 {
4620 	int i, is_24G, rv;
4621 	int max_beacon_chain_n;
4622 	int min_noise_chain_n;
4623 	uint16_t channel_n;
4624 	int32_t beacon_diff;
4625 	int32_t noise_diff;
4626 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
4627 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
4628 	struct iwk_calibration_cmd cmd;
4629 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4630 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4631 	struct statistics_rx_non_phy *rx_general_p =
4632 	    &sc->sc_statistics.rx.general;
4633 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
4634 
4635 	if (INTERFERENCE_DATA_AVAILABLE !=
4636 	    rx_general_p->interference_data_flag) {
4637 		return (IWK_SUCCESS);
4638 	}
4639 
4640 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
4641 		return (IWK_SUCCESS);
4642 	}
4643 
4644 	is_24G = iwk_is_24G_band(sc);
4645 	channel_n = sc->sc_config.chan;	 /* channel number */
4646 
4647 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
4648 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
4649 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
4650 	    !is_24G)) {
4651 		return (IWK_SUCCESS);
4652 	}
4653 
4654 	/* Rx chain's noise strength from statistics notification */
4655 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
4656 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
4657 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
4658 
4659 	/* Rx chain's beacon strength from statistics notification */
4660 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
4661 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
4662 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
4663 
4664 	gain_diff_p->beacon_count++;
4665 
4666 	/* accumulate chain's noise strength */
4667 	gain_diff_p->noise_stren_a += noise_chain_a;
4668 	gain_diff_p->noise_stren_b += noise_chain_b;
4669 	gain_diff_p->noise_stren_c += noise_chain_c;
4670 
4671 	/* accumulate chain's beacon strength */
4672 	gain_diff_p->beacon_stren_a += beacon_chain_a;
4673 	gain_diff_p->beacon_stren_b += beacon_chain_b;
4674 	gain_diff_p->beacon_stren_c += beacon_chain_c;
4675 
4676 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
4677 		/* calculate average beacon strength */
4678 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
4679 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
4680 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
4681 
4682 		/* calculate average noise strength */
4683 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
4684 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4685 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4686 
4687 		/* determine maximum beacon strength among 3 chains */
4688 		if ((beacon_aver[0] >= beacon_aver[1]) &&
4689 		    (beacon_aver[0] >= beacon_aver[2])) {
4690 			max_beacon_chain_n = 0;
4691 			gain_diff_p->connected_chains = 1 << 0;
4692 		} else if (beacon_aver[1] >= beacon_aver[2]) {
4693 			max_beacon_chain_n = 1;
4694 			gain_diff_p->connected_chains = 1 << 1;
4695 		} else {
4696 			max_beacon_chain_n = 2;
4697 			gain_diff_p->connected_chains = 1 << 2;
4698 		}
4699 
4700 		/* determine which chain is disconnected */
4701 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4702 			if (i != max_beacon_chain_n) {
4703 				beacon_diff = beacon_aver[max_beacon_chain_n] -
4704 				    beacon_aver[i];
4705 				if (beacon_diff > MAX_ALLOWED_DIFF) {
4706 					gain_diff_p->disconnect_chain[i] = 1;
4707 				} else {
4708 					gain_diff_p->connected_chains |=
4709 					    (1 << i);
4710 				}
4711 			}
4712 		}
4713 
4714 		/*
4715 		 * if chain A and B are both disconnected,
4716 		 * assume the stronger in beacon strength is connected
4717 		 */
4718 		if (gain_diff_p->disconnect_chain[0] &&
4719 		    gain_diff_p->disconnect_chain[1]) {
4720 			if (beacon_aver[0] >= beacon_aver[1]) {
4721 				gain_diff_p->disconnect_chain[0] = 0;
4722 				gain_diff_p->connected_chains |= (1 << 0);
4723 			} else {
4724 				gain_diff_p->disconnect_chain[1] = 0;
4725 				gain_diff_p->connected_chains |= (1 << 1);
4726 			}
4727 		}
4728 
4729 		/* determine minimum noise strength among 3 chains */
4730 		if (!gain_diff_p->disconnect_chain[0]) {
4731 			min_noise_chain_n = 0;
4732 
4733 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4734 				if (!gain_diff_p->disconnect_chain[i] &&
4735 				    (noise_aver[i] <=
4736 				    noise_aver[min_noise_chain_n])) {
4737 					min_noise_chain_n = i;
4738 				}
4739 
4740 			}
4741 		} else {
4742 			min_noise_chain_n = 1;
4743 
4744 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4745 				if (!gain_diff_p->disconnect_chain[i] &&
4746 				    (noise_aver[i] <=
4747 				    noise_aver[min_noise_chain_n])) {
4748 					min_noise_chain_n = i;
4749 				}
4750 			}
4751 		}
4752 
4753 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
4754 
4755 		/* determine gain difference between chains */
4756 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4757 			if (!gain_diff_p->disconnect_chain[i] &&
4758 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
4759 			    gain_diff_p->gain_diff_chain[i])) {
4760 
4761 				noise_diff = noise_aver[i] -
4762 				    noise_aver[min_noise_chain_n];
4763 				gain_diff_p->gain_diff_chain[i] =
4764 				    (uint8_t)((noise_diff * 10) / 15);
4765 
4766 				if (gain_diff_p->gain_diff_chain[i] > 3) {
4767 					gain_diff_p->gain_diff_chain[i] = 3;
4768 				}
4769 
4770 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
4771 			} else {
4772 				gain_diff_p->gain_diff_chain[i] = 0;
4773 			}
4774 		}
4775 
4776 		if (!gain_diff_p->gain_diff_send) {
4777 			gain_diff_p->gain_diff_send = 1;
4778 
4779 			(void) memset(&cmd, 0, sizeof (cmd));
4780 
4781 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4782 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
4783 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
4784 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
4785 
4786 			/*
4787 			 * send out PHY calibration command to
4788 			 * adjust every chain's Rx gain
4789 			 */
4790 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
4791 			    &cmd, sizeof (cmd), 1);
4792 			if (rv) {
4793 				return (rv);
4794 			}
4795 
4796 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
4797 		}
4798 
4799 		gain_diff_p->beacon_stren_a = 0;
4800 		gain_diff_p->beacon_stren_b = 0;
4801 		gain_diff_p->beacon_stren_c = 0;
4802 
4803 		gain_diff_p->noise_stren_a = 0;
4804 		gain_diff_p->noise_stren_b = 0;
4805 		gain_diff_p->noise_stren_c = 0;
4806 	}
4807 
4808 	return (IWK_SUCCESS);
4809 }
4810 
4811 /* Make necessary preparation for Receiver sensitivity calibration */
4812 static int iwk_rx_sens_init(iwk_sc_t *sc)
4813 {
4814 	int i, rv;
4815 	struct iwk_rx_sensitivity_cmd cmd;
4816 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
4817 
4818 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
4819 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
4820 
4821 	rx_sens_p->auto_corr_ofdm_x4 = 90;
4822 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
4823 	rx_sens_p->auto_corr_ofdm_x1 = 105;
4824 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
4825 
4826 	rx_sens_p->auto_corr_cck_x4 = 125;
4827 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
4828 	rx_sens_p->min_energy_det_cck = 100;
4829 
4830 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
4831 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
4832 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
4833 
4834 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
4835 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
4836 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
4837 	rx_sens_p->last_false_alarm_cnt_cck = 0;
4838 
4839 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
4840 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
4841 	rx_sens_p->cck_no_false_alarm_num = 0;
4842 	rx_sens_p->cck_beacon_idx = 0;
4843 
4844 	for (i = 0; i < 10; i++) {
4845 		rx_sens_p->cck_beacon_min[i] = 0;
4846 	}
4847 
4848 	rx_sens_p->cck_noise_idx = 0;
4849 	rx_sens_p->cck_noise_ref = 0;
4850 
4851 	for (i = 0; i < 20; i++) {
4852 		rx_sens_p->cck_noise_max[i] = 0;
4853 	}
4854 
4855 	rx_sens_p->cck_noise_diff = 0;
4856 	rx_sens_p->cck_no_false_alarm_num = 0;
4857 
4858 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
4859 
4860 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
4861 	    rx_sens_p->auto_corr_ofdm_x4;
4862 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
4863 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
4864 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
4865 	    rx_sens_p->auto_corr_ofdm_x1;
4866 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
4867 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
4868 
4869 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
4870 	    rx_sens_p->auto_corr_cck_x4;
4871 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
4872 	    rx_sens_p->auto_corr_mrc_cck_x4;
4873 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
4874 
4875 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
4876 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
4877 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
4878 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
4879 
4880 	/* at first, set up Rx to maximum sensitivity */
4881 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
4882 	if (rv) {
4883 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
4884 		    "in the process of initialization, "
4885 		    "failed to send rx sensitivity command\n");
4886 		return (rv);
4887 	}
4888 
4889 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
4890 
4891 	return (IWK_SUCCESS);
4892 }
4893 
4894 /*
4895  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
4896  * for more infomation, please refer to iwk_calibration.h file
4897  */
4898 static int iwk_rx_sens(iwk_sc_t *sc)
4899 {
4900 	int rv;
4901 	uint32_t actual_rx_time;
4902 	struct statistics_rx_non_phy *rx_general_p =
4903 	    &sc->sc_statistics.rx.general;
4904 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
4905 	struct iwk_rx_sensitivity_cmd cmd;
4906 
4907 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
4908 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4909 		    "sensitivity initialization has not finished.\n");
4910 		return (DDI_FAILURE);
4911 	}
4912 
4913 	if (INTERFERENCE_DATA_AVAILABLE !=
4914 	    rx_general_p->interference_data_flag) {
4915 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4916 		    "can't make rx sensitivity calibration,"
4917 		    "because of invalid statistics\n");
4918 		return (DDI_FAILURE);
4919 	}
4920 
4921 	actual_rx_time = rx_general_p->channel_load;
4922 	if (!actual_rx_time) {
4923 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4924 		    "can't make rx sensitivity calibration,"
4925 		    "because has not enough rx time\n");
4926 		return (DDI_FAILURE);
4927 	}
4928 
4929 	/* make Rx sensitivity calibration for OFDM mode */
4930 	rv = iwk_ofdm_sens(sc, actual_rx_time);
4931 	if (rv) {
4932 		return (rv);
4933 	}
4934 
4935 	/* make Rx sensitivity calibration for CCK mode */
4936 	rv = iwk_cck_sens(sc, actual_rx_time);
4937 	if (rv) {
4938 		return (rv);
4939 	}
4940 
4941 	/*
4942 	 * if the sum of false alarm had not changed, nothing will be done
4943 	 */
4944 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
4945 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
4946 		return (IWK_SUCCESS);
4947 	}
4948 
4949 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
4950 
4951 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
4952 	    rx_sens_p->auto_corr_ofdm_x4;
4953 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
4954 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
4955 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
4956 	    rx_sens_p->auto_corr_ofdm_x1;
4957 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
4958 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
4959 
4960 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
4961 	    rx_sens_p->auto_corr_cck_x4;
4962 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
4963 	    rx_sens_p->auto_corr_mrc_cck_x4;
4964 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
4965 	    rx_sens_p->min_energy_det_cck;
4966 
4967 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
4968 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
4969 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
4970 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
4971 
4972 	/*
4973 	 * send sensitivity command to complete actual sensitivity calibration
4974 	 */
4975 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
4976 	if (rv) {
4977 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4978 		    "fail to send rx sensitivity command\n");
4979 		return (rv);
4980 	}
4981 
4982 	return (IWK_SUCCESS);
4983 
4984 }
4985 
4986 /*
4987  * make Rx sensitivity calibration for CCK mode.
4988  * This is preparing parameters for Sensitivity command
4989  */
4990 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
4991 {
4992 	int i;
4993 	uint8_t noise_a, noise_b, noise_c;
4994 	uint8_t max_noise_abc, max_noise_20;
4995 	uint32_t beacon_a, beacon_b, beacon_c;
4996 	uint32_t min_beacon_abc, max_beacon_10;
4997 	uint32_t cck_fa, cck_bp;
4998 	uint32_t cck_sum_fa_bp;
4999 	uint32_t temp;
5000 	struct statistics_rx_non_phy *rx_general_p =
5001 	    &sc->sc_statistics.rx.general;
5002 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5003 
5004 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5005 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5006 
5007 	/* accumulate false alarm */
5008 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5009 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5010 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5011 		cck_fa += (0xFFFFFFFF - temp);
5012 	} else {
5013 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5014 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5015 	}
5016 
5017 	/* accumulate bad plcp */
5018 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5019 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5020 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5021 		cck_bp += (0xFFFFFFFF - temp);
5022 	} else {
5023 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5024 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5025 	}
5026 
5027 	/*
5028 	 * calculate relative value
5029 	 */
5030 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5031 	rx_sens_p->cck_noise_diff = 0;
5032 
5033 	noise_a =
5034 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5035 	noise_b =
5036 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5037 	noise_c =
5038 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5039 
5040 	beacon_a = rx_general_p->beacon_energy_a;
5041 	beacon_b = rx_general_p->beacon_energy_b;
5042 	beacon_c = rx_general_p->beacon_energy_c;
5043 
5044 	/* determine maximum noise among 3 chains */
5045 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5046 		max_noise_abc = noise_a;
5047 	} else if (noise_b >= noise_c) {
5048 		max_noise_abc = noise_b;
5049 	} else {
5050 		max_noise_abc = noise_c;
5051 	}
5052 
5053 	/* record maximum noise among 3 chains */
5054 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5055 	rx_sens_p->cck_noise_idx++;
5056 	if (rx_sens_p->cck_noise_idx >= 20) {
5057 		rx_sens_p->cck_noise_idx = 0;
5058 	}
5059 
5060 	/* determine maximum noise among 20 max noise */
5061 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5062 	for (i = 0; i < 20; i++) {
5063 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5064 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5065 		}
5066 	}
5067 
5068 	/* determine minimum beacon among 3 chains */
5069 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5070 		min_beacon_abc = beacon_a;
5071 	} else if (beacon_b <= beacon_c) {
5072 		min_beacon_abc = beacon_b;
5073 	} else {
5074 		min_beacon_abc = beacon_c;
5075 	}
5076 
5077 	/* record miminum beacon among 3 chains */
5078 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5079 	rx_sens_p->cck_beacon_idx++;
5080 	if (rx_sens_p->cck_beacon_idx >= 10) {
5081 		rx_sens_p->cck_beacon_idx = 0;
5082 	}
5083 
5084 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5085 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5086 	for (i = 0; i < 10; i++) {
5087 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5088 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5089 		}
5090 	}
5091 
5092 	/* add a little margin */
5093 	max_beacon_10 += 6;
5094 
5095 	/* record the count of having no false alarms */
5096 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5097 		rx_sens_p->cck_no_false_alarm_num++;
5098 	} else {
5099 		rx_sens_p->cck_no_false_alarm_num = 0;
5100 	}
5101 
5102 	/*
5103 	 * adjust parameters in sensitivity command
5104 	 * according to different status.
5105 	 * for more infomation, please refer to iwk_calibration.h file
5106 	 */
5107 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5108 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5109 
5110 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5111 			rx_sens_p->cck_noise_ref = max_noise_20;
5112 
5113 			if (rx_sens_p->min_energy_det_cck > 2) {
5114 				rx_sens_p->min_energy_det_cck -= 2;
5115 			}
5116 		}
5117 
5118 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5119 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5120 		} else {
5121 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5122 				rx_sens_p->auto_corr_cck_x4 += 3;
5123 			} else {
5124 				rx_sens_p->auto_corr_cck_x4 = 200;
5125 			}
5126 		}
5127 
5128 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5129 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5130 		} else {
5131 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5132 		}
5133 
5134 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5135 
5136 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5137 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5138 
5139 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5140 		    (int32_t)max_noise_20;
5141 
5142 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5143 		    ((rx_sens_p->cck_noise_diff > 2) ||
5144 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5145 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5146 				rx_sens_p->min_energy_det_cck += 2;
5147 			} else {
5148 				rx_sens_p->min_energy_det_cck = 97;
5149 			}
5150 
5151 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5152 				rx_sens_p->auto_corr_cck_x4 -= 3;
5153 			} else {
5154 				rx_sens_p->auto_corr_cck_x4 = 125;
5155 			}
5156 
5157 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5158 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5159 			} else {
5160 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5161 			}
5162 
5163 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5164 		} else {
5165 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5166 		}
5167 	} else {
5168 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5169 
5170 		rx_sens_p->cck_noise_ref = max_noise_20;
5171 
5172 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5173 			rx_sens_p->min_energy_det_cck -= 8;
5174 		}
5175 
5176 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5177 	}
5178 
5179 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5180 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5181 	}
5182 
5183 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5184 
5185 	return (IWK_SUCCESS);
5186 }
5187 
5188 /*
5189  * make Rx sensitivity calibration for OFDM mode.
5190  * This is preparing parameters for Sensitivity command
5191  */
5192 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5193 {
5194 	uint32_t temp;
5195 	uint16_t temp1;
5196 	uint32_t ofdm_fa, ofdm_bp;
5197 	uint32_t ofdm_sum_fa_bp;
5198 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5199 
5200 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5201 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5202 
5203 	/* accumulate false alarm */
5204 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5205 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5206 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5207 		ofdm_fa += (0xFFFFFFFF - temp);
5208 	} else {
5209 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5210 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5211 	}
5212 
5213 	/* accumulate bad plcp */
5214 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5215 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5216 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5217 		ofdm_bp += (0xFFFFFFFF - temp);
5218 	} else {
5219 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5220 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5221 	}
5222 
5223 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5224 
5225 	/*
5226 	 * adjust parameter in sensitivity command according to different status
5227 	 */
5228 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5229 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5230 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5231 
5232 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5233 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5234 		    (temp1 <= 210) ? temp1 : 210;
5235 
5236 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5237 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5238 
5239 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5240 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5241 		    (temp1 <= 270) ? temp1 : 270;
5242 
5243 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5244 
5245 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5246 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5247 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5248 
5249 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5250 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5251 		    (temp1 >= 170) ? temp1 : 170;
5252 
5253 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5254 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5255 
5256 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5257 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5258 		    (temp1 >= 220) ? temp1 : 220;
5259 
5260 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5261 
5262 	} else {
5263 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5264 	}
5265 
5266 	return (IWK_SUCCESS);
5267 }
5268 
5269 /*
5270  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5271  *     a 256-entry history of uCode execution within a circular buffer.
5272  *     Its header format is:
5273  *
5274  *	uint32_t log_size;	log capacity (in number of entries)
5275  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5276  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5277  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5278  *
5279  *     The header is followed by the circular buffer of log entries.  Entries
5280  *     with timestamps have the following format:
5281  *
5282  *	uint32_t event_id;     range 0 - 1500
5283  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5284  *	uint32_t data;         event_id-specific data value
5285  *
5286  *     Entries without timestamps contain only event_id and data.
5287  */
5288 
5289 /*
5290  * iwk_write_event_log - Write event log to dmesg
5291  */
5292 static void iwk_write_event_log(iwk_sc_t *sc)
5293 {
5294 	uint32_t log_event_table_ptr;	/* Start address of event table */
5295 	uint32_t startptr;	/* Start address of log data */
5296 	uint32_t logptr;	/* address of log data entry */
5297 	uint32_t i, n, num_events;
5298 	uint32_t event_id, data1, data2; /* log data */
5299 
5300 	uint32_t log_size;   /* log capacity (in number of entries) */
5301 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5302 	uint32_t wraps;	/* # times uCode has wrapped to */
5303 			/* the top of circular buffer */
5304 	uint32_t idx; /* index of entry to be filled in next */
5305 
5306 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5307 	if (!(log_event_table_ptr)) {
5308 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5309 		return;
5310 	}
5311 
5312 	iwk_mac_access_enter(sc);
5313 
5314 	/* Read log header */
5315 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5316 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5317 	type = iwk_mem_read(sc, log_event_table_ptr);
5318 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5319 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5320 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5321 	idx = iwk_mem_read(sc, log_event_table_ptr);
5322 	startptr = log_event_table_ptr +
5323 	    sizeof (uint32_t); /* addr of start of log data */
5324 	if (!log_size & !wraps) {
5325 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5326 		iwk_mac_access_exit(sc);
5327 		return;
5328 	}
5329 
5330 	if (!wraps) {
5331 		num_events = idx;
5332 		logptr = startptr;
5333 	} else {
5334 		num_events = log_size - idx;
5335 		n = type ? 2 : 3;
5336 		logptr = startptr + (idx * n * sizeof (uint32_t));
5337 	}
5338 
5339 	for (i = 0; i < num_events; i++) {
5340 		event_id = iwk_mem_read(sc, logptr);
5341 		logptr += sizeof (uint32_t);
5342 		data1 = iwk_mem_read(sc, logptr);
5343 		logptr += sizeof (uint32_t);
5344 		if (type == 0) { /* no timestamp */
5345 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5346 			    event_id, data1));
5347 		} else { /* timestamp */
5348 			data2 = iwk_mem_read(sc, logptr);
5349 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5350 			    data1, event_id, data2);
5351 			IWK_DBG((IWK_DEBUG_EEPROM,
5352 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5353 			    data1, event_id, data2));
5354 			logptr += sizeof (uint32_t);
5355 		}
5356 	}
5357 
5358 	/*
5359 	 * Print the wrapped around entries, if any
5360 	 */
5361 	if (wraps) {
5362 		logptr = startptr;
5363 		for (i = 0; i < idx; i++) {
5364 			event_id = iwk_mem_read(sc, logptr);
5365 			logptr += sizeof (uint32_t);
5366 			data1 = iwk_mem_read(sc, logptr);
5367 			logptr += sizeof (uint32_t);
5368 			if (type == 0) { /* no timestamp */
5369 				IWK_DBG((IWK_DEBUG_EEPROM,
5370 				    "Event ID=%d, Data=%x0x", event_id, data1));
5371 			} else { /* timestamp */
5372 				data2 = iwk_mem_read(sc, logptr);
5373 				IWK_DBG((IWK_DEBUG_EEPROM,
5374 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5375 				    data1, event_id, data2));
5376 				logptr += sizeof (uint32_t);
5377 			}
5378 		}
5379 	}
5380 
5381 	iwk_mac_access_exit(sc);
5382 }
5383 
5384 /*
5385  * error_event_table_ptr indicates base of the error log.  This contains
5386  * information about any uCode error that occurs.  For 4965, the format is:
5387  *
5388  * uint32_t valid;        (nonzero) valid, (0) log is empty
5389  * uint32_t error_id;     type of error
5390  * uint32_t pc;           program counter
5391  * uint32_t blink1;       branch link
5392  * uint32_t blink2;       branch link
5393  * uint32_t ilink1;       interrupt link
5394  * uint32_t ilink2;       interrupt link
5395  * uint32_t data1;        error-specific data
5396  * uint32_t data2;        error-specific data
5397  * uint32_t line;         source code line of error
5398  * uint32_t bcon_time;    beacon timer
5399  * uint32_t tsf_low;      network timestamp function timer
5400  * uint32_t tsf_hi;       network timestamp function timer
5401  */
5402 /*
5403  * iwk_write_error_log - Write error log to dmesg
5404  */
5405 static void iwk_write_error_log(iwk_sc_t *sc)
5406 {
5407 	uint32_t err_ptr;	/* Start address of error log */
5408 	uint32_t valid;		/* is error log valid */
5409 
5410 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5411 	if (!(err_ptr)) {
5412 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5413 		return;
5414 	}
5415 
5416 	iwk_mac_access_enter(sc);
5417 
5418 	valid = iwk_mem_read(sc, err_ptr);
5419 	if (!(valid)) {
5420 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5421 		iwk_mac_access_exit(sc);
5422 		return;
5423 	}
5424 	err_ptr += sizeof (uint32_t);
5425 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
5426 	err_ptr += sizeof (uint32_t);
5427 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
5428 	err_ptr += sizeof (uint32_t);
5429 	IWK_DBG((IWK_DEBUG_EEPROM,
5430 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5431 	err_ptr += sizeof (uint32_t);
5432 	IWK_DBG((IWK_DEBUG_EEPROM,
5433 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5434 	err_ptr += sizeof (uint32_t);
5435 	IWK_DBG((IWK_DEBUG_EEPROM,
5436 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5437 	err_ptr += sizeof (uint32_t);
5438 	IWK_DBG((IWK_DEBUG_EEPROM,
5439 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5440 	err_ptr += sizeof (uint32_t);
5441 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
5442 	err_ptr += sizeof (uint32_t);
5443 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
5444 	err_ptr += sizeof (uint32_t);
5445 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
5446 	err_ptr += sizeof (uint32_t);
5447 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
5448 	err_ptr += sizeof (uint32_t);
5449 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
5450 	err_ptr += sizeof (uint32_t);
5451 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
5452 
5453 	iwk_mac_access_exit(sc);
5454 }
5455