xref: /titanic_50/usr/src/uts/common/io/iwk/iwk2.c (revision eabd0450c0ea06b7993daac8f9545c7061ae7cae)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 /*
32  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/byteorder.h>
37 #include <sys/conf.h>
38 #include <sys/cmn_err.h>
39 #include <sys/stat.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/strsubr.h>
43 #include <sys/ethernet.h>
44 #include <inet/common.h>
45 #include <inet/nd.h>
46 #include <inet/mi.h>
47 #include <sys/note.h>
48 #include <sys/stream.h>
49 #include <sys/strsun.h>
50 #include <sys/modctl.h>
51 #include <sys/devops.h>
52 #include <sys/dlpi.h>
53 #include <sys/mac.h>
54 #include <sys/mac_wifi.h>
55 #include <sys/net80211.h>
56 #include <sys/net80211_proto.h>
57 #include <sys/varargs.h>
58 #include <sys/policy.h>
59 #include <sys/pci.h>
60 
61 #include "iwk_hw.h"
62 #include "iwk_eeprom.h"
63 #include "iwk2_var.h"
64 #include <inet/wifi_ioctl.h>
65 
66 #ifdef DEBUG
67 #define	IWK_DEBUG_80211		(1 << 0)
68 #define	IWK_DEBUG_CMD		(1 << 1)
69 #define	IWK_DEBUG_DMA		(1 << 2)
70 #define	IWK_DEBUG_EEPROM	(1 << 3)
71 #define	IWK_DEBUG_FW		(1 << 4)
72 #define	IWK_DEBUG_HW		(1 << 5)
73 #define	IWK_DEBUG_INTR		(1 << 6)
74 #define	IWK_DEBUG_MRR		(1 << 7)
75 #define	IWK_DEBUG_PIO		(1 << 8)
76 #define	IWK_DEBUG_RX		(1 << 9)
77 #define	IWK_DEBUG_SCAN		(1 << 10)
78 #define	IWK_DEBUG_TX		(1 << 11)
79 #define	IWK_DEBUG_RATECTL	(1 << 12)
80 #define	IWK_DEBUG_RADIO		(1 << 13)
81 uint32_t iwk_dbg_flags = 0;
82 #define	IWK_DBG(x) \
83 	iwk_dbg x
84 #else
85 #define	IWK_DBG(x)
86 #endif
87 
88 static void	*iwk_soft_state_p = NULL;
89 static uint8_t iwk_fw_bin [] = {
90 #include "fw-iw/iw4965.ucode.hex"
91 };
92 
93 /* DMA attributes for a shared page */
94 static ddi_dma_attr_t sh_dma_attr = {
95 	DMA_ATTR_V0,	/* version of this structure */
96 	0,		/* lowest usable address */
97 	0xffffffffU,	/* highest usable address */
98 	0xffffffffU,	/* maximum DMAable byte count */
99 	0x1000,		/* alignment in bytes */
100 	0x1000,		/* burst sizes (any?) */
101 	1,		/* minimum transfer */
102 	0xffffffffU,	/* maximum transfer */
103 	0xffffffffU,	/* maximum segment length */
104 	1,		/* maximum number of segments */
105 	1,		/* granularity */
106 	0,		/* flags (reserved) */
107 };
108 
109 /* DMA attributes for a keep warm DRAM descriptor */
110 static ddi_dma_attr_t kw_dma_attr = {
111 	DMA_ATTR_V0,	/* version of this structure */
112 	0,		/* lowest usable address */
113 	0xffffffffU,	/* highest usable address */
114 	0xffffffffU,	/* maximum DMAable byte count */
115 	0x1000,		/* alignment in bytes */
116 	0x1000,		/* burst sizes (any?) */
117 	1,		/* minimum transfer */
118 	0xffffffffU,	/* maximum transfer */
119 	0xffffffffU,	/* maximum segment length */
120 	1,		/* maximum number of segments */
121 	1,		/* granularity */
122 	0,		/* flags (reserved) */
123 };
124 
125 /* DMA attributes for a ring descriptor */
126 static ddi_dma_attr_t ring_desc_dma_attr = {
127 	DMA_ATTR_V0,	/* version of this structure */
128 	0,		/* lowest usable address */
129 	0xffffffffU,	/* highest usable address */
130 	0xffffffffU,	/* maximum DMAable byte count */
131 	0x100,		/* alignment in bytes */
132 	0x100,		/* burst sizes (any?) */
133 	1,		/* minimum transfer */
134 	0xffffffffU,	/* maximum transfer */
135 	0xffffffffU,	/* maximum segment length */
136 	1,		/* maximum number of segments */
137 	1,		/* granularity */
138 	0,		/* flags (reserved) */
139 };
140 
141 /* DMA attributes for a cmd */
142 static ddi_dma_attr_t cmd_dma_attr = {
143 	DMA_ATTR_V0,	/* version of this structure */
144 	0,		/* lowest usable address */
145 	0xffffffffU,	/* highest usable address */
146 	0xffffffffU,	/* maximum DMAable byte count */
147 	4,		/* alignment in bytes */
148 	0x100,		/* burst sizes (any?) */
149 	1,		/* minimum transfer */
150 	0xffffffffU,	/* maximum transfer */
151 	0xffffffffU,	/* maximum segment length */
152 	1,		/* maximum number of segments */
153 	1,		/* granularity */
154 	0,		/* flags (reserved) */
155 };
156 
157 /* DMA attributes for a rx buffer */
158 static ddi_dma_attr_t rx_buffer_dma_attr = {
159 	DMA_ATTR_V0,	/* version of this structure */
160 	0,		/* lowest usable address */
161 	0xffffffffU,	/* highest usable address */
162 	0xffffffffU,	/* maximum DMAable byte count */
163 	0x100,		/* alignment in bytes */
164 	0x100,		/* burst sizes (any?) */
165 	1,		/* minimum transfer */
166 	0xffffffffU,	/* maximum transfer */
167 	0xffffffffU,	/* maximum segment length */
168 	1,		/* maximum number of segments */
169 	1,		/* granularity */
170 	0,		/* flags (reserved) */
171 };
172 
173 /*
174  * DMA attributes for a tx buffer.
175  * the maximum number of segments is 4 for the hardware.
176  * now all the wifi drivers put the whole frame in a single
177  * descriptor, so we define the maximum  number of segments 1,
178  * just the same as the rx_buffer. we consider leverage the HW
179  * ability in the future, that is why we don't define rx and tx
180  * buffer_dma_attr as the same.
181  */
182 static ddi_dma_attr_t tx_buffer_dma_attr = {
183 	DMA_ATTR_V0,	/* version of this structure */
184 	0,		/* lowest usable address */
185 	0xffffffffU,	/* highest usable address */
186 	0xffffffffU,	/* maximum DMAable byte count */
187 	4,		/* alignment in bytes */
188 	0x100,		/* burst sizes (any?) */
189 	1,		/* minimum transfer */
190 	0xffffffffU,	/* maximum transfer */
191 	0xffffffffU,	/* maximum segment length */
192 	1,		/* maximum number of segments */
193 	1,		/* granularity */
194 	0,		/* flags (reserved) */
195 };
196 
197 /* DMA attributes for text and data part in the firmware */
198 static ddi_dma_attr_t fw_dma_attr = {
199 	DMA_ATTR_V0,	/* version of this structure */
200 	0,		/* lowest usable address */
201 	0xffffffffU,	/* highest usable address */
202 	0x7fffffff,	/* maximum DMAable byte count */
203 	0x10,		/* alignment in bytes */
204 	0x100,		/* burst sizes (any?) */
205 	1,		/* minimum transfer */
206 	0xffffffffU,	/* maximum transfer */
207 	0xffffffffU,	/* maximum segment length */
208 	1,		/* maximum number of segments */
209 	1,		/* granularity */
210 	0,		/* flags (reserved) */
211 };
212 
213 
214 /* regs access attributes */
215 static ddi_device_acc_attr_t iwk_reg_accattr = {
216 	DDI_DEVICE_ATTR_V0,
217 	DDI_STRUCTURE_LE_ACC,
218 	DDI_STRICTORDER_ACC,
219 	DDI_DEFAULT_ACC
220 };
221 
222 /* DMA access attributes */
223 static ddi_device_acc_attr_t iwk_dma_accattr = {
224 	DDI_DEVICE_ATTR_V0,
225 	DDI_NEVERSWAP_ACC,
226 	DDI_STRICTORDER_ACC,
227 	DDI_DEFAULT_ACC
228 };
229 
230 static int	iwk_ring_init(iwk_sc_t *);
231 static void	iwk_ring_free(iwk_sc_t *);
232 static int	iwk_alloc_shared(iwk_sc_t *);
233 static void	iwk_free_shared(iwk_sc_t *);
234 static int	iwk_alloc_kw(iwk_sc_t *);
235 static void	iwk_free_kw(iwk_sc_t *);
236 static int	iwk_alloc_fw_dma(iwk_sc_t *);
237 static void	iwk_free_fw_dma(iwk_sc_t *);
238 static int	iwk_alloc_rx_ring(iwk_sc_t *);
239 static void	iwk_reset_rx_ring(iwk_sc_t *);
240 static void	iwk_free_rx_ring(iwk_sc_t *);
241 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
242     int, int);
243 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
244 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 
246 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
247 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
248     const uint8_t mac[IEEE80211_ADDR_LEN]);
249 static void	iwk_mac_access_enter(iwk_sc_t *);
250 static void	iwk_mac_access_exit(iwk_sc_t *);
251 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
252 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
253 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
254 		    uint32_t *, int);
255 static int	iwk_load_firmware(iwk_sc_t *);
256 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
257 		    iwk_rx_data_t *);
258 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
259 		    iwk_rx_data_t *);
260 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
261 static uint_t	iwk_intr(caddr_t);
262 static int	iwk_eep_load(iwk_sc_t *sc);
263 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
264 static int	iwk_eep_sem_down(iwk_sc_t *sc);
265 static void	iwk_eep_sem_up(iwk_sc_t *sc);
266 static uint_t	iwk_rx_softintr(caddr_t);
267 static uint8_t	iwk_rate_to_plcp(int);
268 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
269 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
270 static int	iwk_hw_set_before_auth(iwk_sc_t *);
271 static int	iwk_scan(iwk_sc_t *);
272 static int	iwk_config(iwk_sc_t *);
273 static void	iwk_stop_master(iwk_sc_t *);
274 static int	iwk_power_up(iwk_sc_t *);
275 static int	iwk_preinit(iwk_sc_t *);
276 static int	iwk_init(iwk_sc_t *);
277 static void	iwk_stop(iwk_sc_t *);
278 
279 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
280 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
281 
282 /*
283  * GLD specific operations
284  */
285 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
286 static int	iwk_m_start(void *arg);
287 static void	iwk_m_stop(void *arg);
288 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
289 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
290 static int	iwk_m_promisc(void *arg, boolean_t on);
291 static mblk_t  *iwk_m_tx(void *arg, mblk_t *mp);
292 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
293 
294 static void	iwk_destroy_locks(iwk_sc_t *sc);
295 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
296 static void	iwk_thread(iwk_sc_t *sc);
297 
298 /*
299  * Supported rates for 802.11b/g modes (in 500Kbps unit).
300  * 11a and 11n support will be added later.
301  */
302 static const struct ieee80211_rateset iwk_rateset_11b =
303 	{ 4, { 2, 4, 11, 22 } };
304 
305 static const struct ieee80211_rateset iwk_rateset_11g =
306 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
307 
308 /*
309  * For mfthread only
310  */
311 extern pri_t minclsyspri;
312 
313 #define	DRV_NAME_4965	"iwk"
314 
315 /*
316  * Module Loading Data & Entry Points
317  */
318 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
319     iwk_detach, nodev, NULL, D_MP, NULL);
320 
321 static struct modldrv iwk_modldrv = {
322 	&mod_driverops,
323 	"Intel(R) 4965AGN driver(N)",
324 	&iwk_devops
325 };
326 
327 static struct modlinkage iwk_modlinkage = {
328 	MODREV_1,
329 	&iwk_modldrv,
330 	NULL
331 };
332 
333 int
334 _init(void)
335 {
336 	int	status;
337 
338 	status = ddi_soft_state_init(&iwk_soft_state_p,
339 	    sizeof (iwk_sc_t), 1);
340 	if (status != DDI_SUCCESS)
341 		return (status);
342 
343 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
344 	status = mod_install(&iwk_modlinkage);
345 	if (status != DDI_SUCCESS) {
346 		mac_fini_ops(&iwk_devops);
347 		ddi_soft_state_fini(&iwk_soft_state_p);
348 	}
349 
350 	return (status);
351 }
352 
353 int
354 _fini(void)
355 {
356 	int status;
357 
358 	status = mod_remove(&iwk_modlinkage);
359 	if (status == DDI_SUCCESS) {
360 		mac_fini_ops(&iwk_devops);
361 		ddi_soft_state_fini(&iwk_soft_state_p);
362 	}
363 
364 	return (status);
365 }
366 
367 int
368 _info(struct modinfo *mip)
369 {
370 	return (mod_info(&iwk_modlinkage, mip));
371 }
372 
373 /*
374  * Mac Call Back entries
375  */
376 mac_callbacks_t	iwk_m_callbacks = {
377 	MC_IOCTL,
378 	iwk_m_stat,
379 	iwk_m_start,
380 	iwk_m_stop,
381 	iwk_m_promisc,
382 	iwk_m_multicst,
383 	iwk_m_unicst,
384 	iwk_m_tx,
385 	NULL,
386 	iwk_m_ioctl
387 };
388 
389 #ifdef DEBUG
390 void
391 iwk_dbg(uint32_t flags, const char *fmt, ...)
392 {
393 	va_list	ap;
394 
395 	if (flags & iwk_dbg_flags) {
396 		va_start(ap, fmt);
397 		vcmn_err(CE_NOTE, fmt, ap);
398 		va_end(ap);
399 	}
400 }
401 #endif
402 
403 /*
404  * device operations
405  */
406 int
407 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
408 {
409 	iwk_sc_t		*sc;
410 	ieee80211com_t	*ic;
411 	int			instance, err, i;
412 	char			strbuf[32];
413 	wifi_data_t		wd = { 0 };
414 	mac_register_t		*macp;
415 
416 	if (cmd != DDI_ATTACH) {
417 		err = DDI_FAILURE;
418 		goto attach_fail1;
419 	}
420 
421 	instance = ddi_get_instance(dip);
422 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
423 	if (err != DDI_SUCCESS) {
424 		cmn_err(CE_WARN,
425 		    "iwk_attach(): failed to allocate soft state\n");
426 		goto attach_fail1;
427 	}
428 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
429 	sc->sc_dip = dip;
430 
431 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
432 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
433 	if (err != DDI_SUCCESS) {
434 		cmn_err(CE_WARN,
435 		    "iwk_attach(): failed to map config spaces regs\n");
436 		goto attach_fail2;
437 	}
438 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
439 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
440 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
441 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
442 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
443 	if (!sc->sc_clsz)
444 		sc->sc_clsz = 16;
445 	sc->sc_clsz = (sc->sc_clsz << 2);
446 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
447 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
448 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
449 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
450 	/*
451 	 * Map operating registers
452 	 */
453 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
454 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
455 	if (err != DDI_SUCCESS) {
456 		cmn_err(CE_WARN,
457 		    "iwk_attach(): failed to map device regs\n");
458 		goto attach_fail2a;
459 	}
460 
461 	/*
462 	 * Initialize mutexs and condvars
463 	 */
464 	err = ddi_get_iblock_cookie(dip, 0, &sc->sc_iblk);
465 	if (err != DDI_SUCCESS) {
466 		cmn_err(CE_WARN,
467 		    "iwk_attach(): failed to do ddi_get_iblock_cookie()\n");
468 		goto attach_fail2b;
469 	}
470 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, sc->sc_iblk);
471 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, sc->sc_iblk);
472 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
473 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
474 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
475 	/*
476 	 * initialize the mfthread
477 	 */
478 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
479 	    (void *) sc->sc_iblk);
480 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
481 	sc->sc_mf_thread = NULL;
482 	sc->sc_mf_thread_switch = 0;
483 
484 	/*
485 	 * Allocate shared page.
486 	 */
487 	err = iwk_alloc_shared(sc);
488 	if (err != DDI_SUCCESS) {
489 		cmn_err(CE_WARN, "failed to allocate shared page\n");
490 		goto attach_fail3;
491 	}
492 
493 	/*
494 	 * Allocate keep warm page.
495 	 */
496 	err = iwk_alloc_kw(sc);
497 	if (err != DDI_SUCCESS) {
498 		cmn_err(CE_WARN, "failed to allocate keep warm page\n");
499 		goto attach_fail3a;
500 	}
501 
502 	/*
503 	 * Do some necessary hardware initializations.
504 	 */
505 	err = iwk_preinit(sc);
506 	if (err != DDI_SUCCESS) {
507 		cmn_err(CE_WARN, "failed to init hardware\n");
508 		goto attach_fail4;
509 	}
510 
511 	/* initialize EEPROM */
512 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
513 	if (err != 0) {
514 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
515 		goto attach_fail4;
516 	}
517 
518 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
519 		IWK_DBG((IWK_DEBUG_EEPROM, "older EEPROM detected"));
520 		goto attach_fail4;
521 	}
522 
523 	iwk_get_mac_from_eep(sc);
524 
525 	err = iwk_ring_init(sc);
526 	if (err != DDI_SUCCESS) {
527 		cmn_err(CE_WARN, "iwk_attach(): "
528 		    "failed to allocate and initialize ring\n");
529 		goto attach_fail4;
530 	}
531 
532 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
533 
534 	err = iwk_alloc_fw_dma(sc);
535 	if (err != DDI_SUCCESS) {
536 		cmn_err(CE_WARN, "iwk_attach(): "
537 		    "failed to allocate firmware dma\n");
538 		goto attach_fail5;
539 	}
540 
541 	/*
542 	 * Initialize the wifi part, which will be used by
543 	 * generic layer
544 	 */
545 	ic = &sc->sc_ic;
546 	ic->ic_phytype  = IEEE80211_T_OFDM;
547 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
548 	ic->ic_state    = IEEE80211_S_INIT;
549 	ic->ic_maxrssi  = 100; /* experimental number */
550 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
551 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
552 	/*
553 	 * use software WEP and TKIP, hardware CCMP;
554 	 */
555 	ic->ic_caps |= IEEE80211_C_AES_CCM;
556 	/*
557 	 * Support WPA/WPA2
558 	 */
559 	ic->ic_caps |= IEEE80211_C_WPA;
560 	/* set supported .11b and .11g rates */
561 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
562 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
563 
564 	/* set supported .11b and .11g channels (1 through 14) */
565 	for (i = 1; i <= 14; i++) {
566 		ic->ic_sup_channels[i].ich_freq =
567 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
568 		ic->ic_sup_channels[i].ich_flags =
569 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
570 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
571 	}
572 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
573 	ic->ic_xmit = iwk_send;
574 	/*
575 	 * init Wifi layer
576 	 */
577 	ieee80211_attach(ic);
578 
579 	/*
580 	 * different instance has different WPA door
581 	 */
582 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
583 	    ddi_driver_name(dip),
584 	    ddi_get_instance(dip));
585 
586 	/*
587 	 * Override 80211 default routines
588 	 */
589 	sc->sc_newstate = ic->ic_newstate;
590 	ic->ic_newstate = iwk_newstate;
591 	ic->ic_crypto.cs_key_set = iwk_key_set;
592 	ieee80211_media_init(ic);
593 	/*
594 	 * initialize default tx key
595 	 */
596 	ic->ic_def_txkey = 0;
597 
598 	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW,
599 	    &sc->sc_rx_softint_id, &sc->sc_iblk, NULL, iwk_rx_softintr,
600 	    (caddr_t)sc);
601 	if (err != DDI_SUCCESS) {
602 		cmn_err(CE_WARN,
603 		    "iwk_attach(): failed to do ddi_add_softintr()\n");
604 		goto attach_fail7;
605 	}
606 
607 	/*
608 	 * Add the interrupt handler
609 	 */
610 	err = ddi_add_intr(dip, 0, &sc->sc_iblk, NULL,
611 	    iwk_intr, (caddr_t)sc);
612 	if (err != DDI_SUCCESS) {
613 		cmn_err(CE_WARN,
614 		    "iwk_attach(): failed to do ddi_add_intr()\n");
615 		goto attach_fail8;
616 	}
617 
618 	/*
619 	 * Initialize pointer to device specific functions
620 	 */
621 	wd.wd_secalloc = WIFI_SEC_NONE;
622 	wd.wd_opmode = ic->ic_opmode;
623 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
624 
625 	macp = mac_alloc(MAC_VERSION);
626 	if (err != DDI_SUCCESS) {
627 		cmn_err(CE_WARN,
628 		    "iwk_attach(): failed to do mac_alloc()\n");
629 		goto attach_fail9;
630 	}
631 
632 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
633 	macp->m_driver		= sc;
634 	macp->m_dip		= dip;
635 	macp->m_src_addr	= ic->ic_macaddr;
636 	macp->m_callbacks	= &iwk_m_callbacks;
637 	macp->m_min_sdu		= 0;
638 	macp->m_max_sdu		= IEEE80211_MTU;
639 	macp->m_pdata		= &wd;
640 	macp->m_pdata_size	= sizeof (wd);
641 
642 	/*
643 	 * Register the macp to mac
644 	 */
645 	err = mac_register(macp, &ic->ic_mach);
646 	mac_free(macp);
647 	if (err != DDI_SUCCESS) {
648 		cmn_err(CE_WARN,
649 		    "iwk_attach(): failed to do mac_register()\n");
650 		goto attach_fail9;
651 	}
652 
653 	/*
654 	 * Create minor node of type DDI_NT_NET_WIFI
655 	 */
656 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
657 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
658 	    instance + 1, DDI_NT_NET_WIFI, 0);
659 	if (err != DDI_SUCCESS)
660 		cmn_err(CE_WARN,
661 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
662 
663 	/*
664 	 * Notify link is down now
665 	 */
666 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
667 
668 	/*
669 	 * create the mf thread to handle the link status,
670 	 * recovery fatal error, etc.
671 	 */
672 
673 	sc->sc_mf_thread_switch = 1;
674 	if (sc->sc_mf_thread == NULL)
675 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
676 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
677 
678 	sc->sc_flags |= IWK_F_ATTACHED;
679 
680 	return (DDI_SUCCESS);
681 attach_fail9:
682 	ddi_remove_intr(dip, 0, sc->sc_iblk);
683 attach_fail8:
684 	ddi_remove_softintr(sc->sc_rx_softint_id);
685 	sc->sc_rx_softint_id = NULL;
686 attach_fail7:
687 	ieee80211_detach(ic);
688 attach_fail6:
689 	iwk_free_fw_dma(sc);
690 attach_fail5:
691 	iwk_ring_free(sc);
692 attach_fail4:
693 	iwk_free_kw(sc);
694 attach_fail3a:
695 	iwk_free_shared(sc);
696 attach_fail3:
697 	iwk_destroy_locks(sc);
698 attach_fail2b:
699 	ddi_regs_map_free(&sc->sc_handle);
700 attach_fail2a:
701 	ddi_regs_map_free(&sc->sc_cfg_handle);
702 attach_fail2:
703 	ddi_soft_state_free(iwk_soft_state_p, instance);
704 attach_fail1:
705 	return (err);
706 }
707 
708 int
709 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
710 {
711 	iwk_sc_t	*sc;
712 	int err;
713 
714 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
715 	ASSERT(sc != NULL);
716 
717 	if (cmd != DDI_DETACH)
718 		return (DDI_FAILURE);
719 	if (!(sc->sc_flags & IWK_F_ATTACHED))
720 		return (DDI_FAILURE);
721 
722 	/*
723 	 * Destroy the mf_thread
724 	 */
725 	mutex_enter(&sc->sc_mt_lock);
726 	sc->sc_mf_thread_switch = 0;
727 	while (sc->sc_mf_thread != NULL) {
728 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
729 			break;
730 	}
731 	mutex_exit(&sc->sc_mt_lock);
732 
733 	iwk_stop(sc);
734 	DELAY(500000);
735 
736 	/*
737 	 * Unregiste from the MAC layer subsystem
738 	 */
739 	err = mac_unregister(sc->sc_ic.ic_mach);
740 	if (err != DDI_SUCCESS)
741 		return (err);
742 
743 	mutex_enter(&sc->sc_glock);
744 	iwk_free_fw_dma(sc);
745 	iwk_ring_free(sc);
746 	iwk_free_kw(sc);
747 	iwk_free_shared(sc);
748 	mutex_exit(&sc->sc_glock);
749 
750 	ddi_remove_intr(dip, 0, sc->sc_iblk);
751 	ddi_remove_softintr(sc->sc_rx_softint_id);
752 	sc->sc_rx_softint_id = NULL;
753 
754 	/*
755 	 * detach ieee80211
756 	 */
757 	ieee80211_detach(&sc->sc_ic);
758 
759 	iwk_destroy_locks(sc);
760 
761 	ddi_regs_map_free(&sc->sc_handle);
762 	ddi_regs_map_free(&sc->sc_cfg_handle);
763 	ddi_remove_minor_node(dip, NULL);
764 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
765 
766 	return (DDI_SUCCESS);
767 }
768 
769 static void
770 iwk_destroy_locks(iwk_sc_t *sc)
771 {
772 	cv_destroy(&sc->sc_mt_cv);
773 	mutex_destroy(&sc->sc_mt_lock);
774 	cv_destroy(&sc->sc_tx_cv);
775 	cv_destroy(&sc->sc_cmd_cv);
776 	cv_destroy(&sc->sc_fw_cv);
777 	mutex_destroy(&sc->sc_tx_lock);
778 	mutex_destroy(&sc->sc_glock);
779 }
780 
781 /*
782  * Allocate an area of memory and a DMA handle for accessing it
783  */
784 static int
785 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
786     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
787     uint_t dma_flags, iwk_dma_t *dma_p)
788 {
789 	caddr_t vaddr;
790 	int err;
791 
792 	/*
793 	 * Allocate handle
794 	 */
795 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
796 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
797 	if (err != DDI_SUCCESS) {
798 		dma_p->dma_hdl = NULL;
799 		return (DDI_FAILURE);
800 	}
801 
802 	/*
803 	 * Allocate memory
804 	 */
805 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
806 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
807 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
808 	if (err != DDI_SUCCESS) {
809 		ddi_dma_free_handle(&dma_p->dma_hdl);
810 		dma_p->dma_hdl = NULL;
811 		dma_p->acc_hdl = NULL;
812 		return (DDI_FAILURE);
813 	}
814 
815 	/*
816 	 * Bind the two together
817 	 */
818 	dma_p->mem_va = vaddr;
819 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
820 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
821 	    &dma_p->cookie, &dma_p->ncookies);
822 	if (err != DDI_DMA_MAPPED) {
823 		ddi_dma_mem_free(&dma_p->acc_hdl);
824 		ddi_dma_free_handle(&dma_p->dma_hdl);
825 		dma_p->acc_hdl = NULL;
826 		dma_p->dma_hdl = NULL;
827 		return (DDI_FAILURE);
828 	}
829 
830 	dma_p->nslots = ~0U;
831 	dma_p->size = ~0U;
832 	dma_p->token = ~0U;
833 	dma_p->offset = 0;
834 	return (DDI_SUCCESS);
835 }
836 
837 /*
838  * Free one allocated area of DMAable memory
839  */
840 static void
841 iwk_free_dma_mem(iwk_dma_t *dma_p)
842 {
843 	if (dma_p->dma_hdl != NULL) {
844 		if (dma_p->ncookies) {
845 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
846 			dma_p->ncookies = 0;
847 		}
848 		ddi_dma_free_handle(&dma_p->dma_hdl);
849 		dma_p->dma_hdl = NULL;
850 	}
851 
852 	if (dma_p->acc_hdl != NULL) {
853 		ddi_dma_mem_free(&dma_p->acc_hdl);
854 		dma_p->acc_hdl = NULL;
855 	}
856 }
857 
858 /*
859  *
860  */
861 static int
862 iwk_alloc_fw_dma(iwk_sc_t *sc)
863 {
864 	int err = DDI_SUCCESS;
865 	iwk_dma_t *dma_p;
866 	char *t;
867 
868 	/*
869 	 * firmware image layout:
870 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
871 	 */
872 	t = (char *)(sc->sc_hdr + 1);
873 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
874 	    &fw_dma_attr, &iwk_dma_accattr,
875 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
876 	    &sc->sc_dma_fw_text);
877 	dma_p = &sc->sc_dma_fw_text;
878 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
879 	    dma_p->ncookies, dma_p->cookie.dmac_address,
880 	    dma_p->cookie.dmac_size));
881 	if (err != DDI_SUCCESS) {
882 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
883 		    " text dma memory");
884 		goto fail;
885 	}
886 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
887 
888 	t += LE_32(sc->sc_hdr->textsz);
889 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
890 	    &fw_dma_attr, &iwk_dma_accattr,
891 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
892 	    &sc->sc_dma_fw_data);
893 	dma_p = &sc->sc_dma_fw_data;
894 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
895 	    dma_p->ncookies, dma_p->cookie.dmac_address,
896 	    dma_p->cookie.dmac_size));
897 	if (err != DDI_SUCCESS) {
898 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
899 		    " data dma memory");
900 		goto fail;
901 	}
902 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
903 
904 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
905 	    &fw_dma_attr, &iwk_dma_accattr,
906 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
907 	    &sc->sc_dma_fw_data_bak);
908 	dma_p = &sc->sc_dma_fw_data_bak;
909 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
910 	    "size:%lx]\n",
911 	    dma_p->ncookies, dma_p->cookie.dmac_address,
912 	    dma_p->cookie.dmac_size));
913 	if (err != DDI_SUCCESS) {
914 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
915 		    " data bakeup dma memory");
916 		goto fail;
917 	}
918 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
919 
920 	t += LE_32(sc->sc_hdr->datasz);
921 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
922 	    &fw_dma_attr, &iwk_dma_accattr,
923 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
924 	    &sc->sc_dma_fw_init_text);
925 	dma_p = &sc->sc_dma_fw_init_text;
926 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
927 	    "size:%lx]\n",
928 	    dma_p->ncookies, dma_p->cookie.dmac_address,
929 	    dma_p->cookie.dmac_size));
930 	if (err != DDI_SUCCESS) {
931 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
932 		    "init text dma memory");
933 		goto fail;
934 	}
935 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
936 
937 	t += LE_32(sc->sc_hdr->init_textsz);
938 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
939 	    &fw_dma_attr, &iwk_dma_accattr,
940 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
941 	    &sc->sc_dma_fw_init_data);
942 	dma_p = &sc->sc_dma_fw_init_data;
943 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
944 	    "size:%lx]\n",
945 	    dma_p->ncookies, dma_p->cookie.dmac_address,
946 	    dma_p->cookie.dmac_size));
947 	if (err != DDI_SUCCESS) {
948 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
949 		    "init data dma memory");
950 		goto fail;
951 	}
952 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
953 
954 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
955 fail:
956 	return (err);
957 }
958 
959 static void
960 iwk_free_fw_dma(iwk_sc_t *sc)
961 {
962 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
963 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
964 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
965 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
966 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
967 }
968 
969 /*
970  * Allocate a shared page between host and NIC.
971  */
972 static int
973 iwk_alloc_shared(iwk_sc_t *sc)
974 {
975 	iwk_dma_t *dma_p;
976 	int err = DDI_SUCCESS;
977 
978 	/* must be aligned on a 4K-page boundary */
979 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
980 	    &sh_dma_attr, &iwk_dma_accattr,
981 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
982 	    &sc->sc_dma_sh);
983 	if (err != DDI_SUCCESS)
984 		goto fail;
985 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
986 
987 	dma_p = &sc->sc_dma_sh;
988 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
989 	    dma_p->ncookies, dma_p->cookie.dmac_address,
990 	    dma_p->cookie.dmac_size));
991 
992 	return (err);
993 fail:
994 	iwk_free_shared(sc);
995 	return (err);
996 }
997 
998 static void
999 iwk_free_shared(iwk_sc_t *sc)
1000 {
1001 	iwk_free_dma_mem(&sc->sc_dma_sh);
1002 }
1003 
1004 /*
1005  * Allocate a keep warm page.
1006  */
1007 static int
1008 iwk_alloc_kw(iwk_sc_t *sc)
1009 {
1010 	iwk_dma_t *dma_p;
1011 	int err = DDI_SUCCESS;
1012 
1013 	/* must be aligned on a 4K-page boundary */
1014 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1015 	    &kw_dma_attr, &iwk_dma_accattr,
1016 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1017 	    &sc->sc_dma_kw);
1018 	if (err != DDI_SUCCESS)
1019 		goto fail;
1020 
1021 	dma_p = &sc->sc_dma_kw;
1022 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1023 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1024 	    dma_p->cookie.dmac_size));
1025 
1026 	return (err);
1027 fail:
1028 	iwk_free_kw(sc);
1029 	return (err);
1030 }
1031 
1032 static void
1033 iwk_free_kw(iwk_sc_t *sc)
1034 {
1035 	iwk_free_dma_mem(&sc->sc_dma_kw);
1036 }
1037 
1038 static int
1039 iwk_alloc_rx_ring(iwk_sc_t *sc)
1040 {
1041 	iwk_rx_ring_t *ring;
1042 	iwk_rx_data_t *data;
1043 	iwk_dma_t *dma_p;
1044 	int i, err = DDI_SUCCESS;
1045 
1046 	ring = &sc->sc_rxq;
1047 	ring->cur = 0;
1048 
1049 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1050 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1051 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1052 	    &ring->dma_desc);
1053 	if (err != DDI_SUCCESS) {
1054 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring desc "
1055 		    "failed\n"));
1056 		goto fail;
1057 	}
1058 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1059 	dma_p = &ring->dma_desc;
1060 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1061 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1062 	    dma_p->cookie.dmac_size));
1063 
1064 	/*
1065 	 * Allocate Rx buffers.
1066 	 */
1067 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1068 		data = &ring->data[i];
1069 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1070 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1071 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1072 		    &data->dma_data);
1073 		if (err != DDI_SUCCESS) {
1074 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring "
1075 			    "buf[%d] failed\n", i));
1076 			goto fail;
1077 		}
1078 		/*
1079 		 * the physical address bit [8-36] are used,
1080 		 * instead of bit [0-31] in 3945.
1081 		 */
1082 		ring->desc[i] = LE_32((uint32_t)
1083 		    (data->dma_data.cookie.dmac_address >> 8));
1084 	}
1085 	dma_p = &ring->data[0].dma_data;
1086 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1087 	    "size:%lx]\n",
1088 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1089 	    dma_p->cookie.dmac_size));
1090 
1091 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1092 
1093 	return (err);
1094 
1095 fail:
1096 	iwk_free_rx_ring(sc);
1097 	return (err);
1098 }
1099 
1100 static void
1101 iwk_reset_rx_ring(iwk_sc_t *sc)
1102 {
1103 	int n;
1104 
1105 	iwk_mac_access_enter(sc);
1106 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1107 	for (n = 0; n < 2000; n++) {
1108 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1109 			break;
1110 		DELAY(1000);
1111 	}
1112 #ifdef DEBUG
1113 	if (n == 2000)
1114 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1115 #endif
1116 	iwk_mac_access_exit(sc);
1117 
1118 	sc->sc_rxq.cur = 0;
1119 }
1120 
1121 static void
1122 iwk_free_rx_ring(iwk_sc_t *sc)
1123 {
1124 	int i;
1125 
1126 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1127 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1128 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1129 			    DDI_DMA_SYNC_FORCPU);
1130 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1131 	}
1132 
1133 	if (sc->sc_rxq.dma_desc.dma_hdl)
1134 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1135 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1136 }
1137 
1138 static int
1139 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1140     int slots, int qid)
1141 {
1142 	iwk_tx_data_t *data;
1143 	iwk_tx_desc_t *desc_h;
1144 	uint32_t paddr_desc_h;
1145 	iwk_cmd_t *cmd_h;
1146 	uint32_t paddr_cmd_h;
1147 	iwk_dma_t *dma_p;
1148 	int i, err = DDI_SUCCESS;
1149 
1150 	ring->qid = qid;
1151 	ring->count = TFD_QUEUE_SIZE_MAX;
1152 	ring->window = slots;
1153 	ring->queued = 0;
1154 	ring->cur = 0;
1155 
1156 	err = iwk_alloc_dma_mem(sc,
1157 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1158 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1159 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1160 	    &ring->dma_desc);
1161 	if (err != DDI_SUCCESS) {
1162 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring desc[%d]"
1163 		    " failed\n", qid));
1164 		goto fail;
1165 	}
1166 	dma_p = &ring->dma_desc;
1167 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1168 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1169 	    dma_p->cookie.dmac_size));
1170 
1171 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1172 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1173 
1174 	err = iwk_alloc_dma_mem(sc,
1175 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1176 	    &cmd_dma_attr, &iwk_dma_accattr,
1177 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1178 	    &ring->dma_cmd);
1179 	if (err != DDI_SUCCESS) {
1180 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring cmd[%d]"
1181 		    " failed\n", qid));
1182 		goto fail;
1183 	}
1184 	dma_p = &ring->dma_cmd;
1185 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1186 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1187 	    dma_p->cookie.dmac_size));
1188 
1189 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1190 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1191 
1192 	/*
1193 	 * Allocate Tx buffers.
1194 	 */
1195 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1196 	    KM_NOSLEEP);
1197 	if (ring->data == NULL) {
1198 		IWK_DBG((IWK_DEBUG_DMA, "could not allocate "
1199 		    "tx data slots\n"));
1200 		goto fail;
1201 	}
1202 
1203 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1204 		data = &ring->data[i];
1205 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1206 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1207 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1208 		    &data->dma_data);
1209 		if (err != DDI_SUCCESS) {
1210 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx "
1211 			    "ring buf[%d] failed\n", i));
1212 			goto fail;
1213 		}
1214 
1215 		data->desc = desc_h + i;
1216 		data->paddr_desc = paddr_desc_h +
1217 		    ((caddr_t)data->desc - (caddr_t)desc_h);
1218 		data->cmd = cmd_h +  i; /* (i % slots); */
1219 		data->paddr_cmd = paddr_cmd_h +
1220 		    ((caddr_t)data->cmd - (caddr_t)cmd_h);
1221 		    /* ((i % slots) * sizeof (iwk_cmd_t)); */
1222 	}
1223 	dma_p = &ring->data[0].dma_data;
1224 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1225 	    "size:%lx]\n",
1226 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1227 	    dma_p->cookie.dmac_size));
1228 
1229 	return (err);
1230 
1231 fail:
1232 	if (ring->data)
1233 		kmem_free(ring->data,
1234 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1235 	iwk_free_tx_ring(sc, ring);
1236 	return (err);
1237 }
1238 
1239 static void
1240 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1241 {
1242 	iwk_tx_data_t *data;
1243 	int i, n;
1244 
1245 	iwk_mac_access_enter(sc);
1246 
1247 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1248 	for (n = 0; n < 200; n++) {
1249 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1250 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1251 			break;
1252 		DELAY(10);
1253 	}
1254 #ifdef DEBUG
1255 	if (n == 200 && iwk_dbg_flags > 0) {
1256 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1257 		    ring->qid));
1258 	}
1259 #endif
1260 	iwk_mac_access_exit(sc);
1261 
1262 	for (i = 0; i < ring->count; i++) {
1263 		data = &ring->data[i];
1264 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1265 	}
1266 
1267 	ring->queued = 0;
1268 	ring->cur = 0;
1269 }
1270 
1271 /*ARGSUSED*/
1272 static void
1273 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1274 {
1275 	int i;
1276 
1277 	if (ring->dma_desc.dma_hdl != NULL)
1278 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1279 	iwk_free_dma_mem(&ring->dma_desc);
1280 
1281 	if (ring->dma_cmd.dma_hdl != NULL)
1282 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1283 	iwk_free_dma_mem(&ring->dma_cmd);
1284 
1285 	if (ring->data != NULL) {
1286 		for (i = 0; i < ring->count; i++) {
1287 			if (ring->data[i].dma_data.dma_hdl)
1288 				IWK_DMA_SYNC(ring->data[i].dma_data,
1289 				    DDI_DMA_SYNC_FORDEV);
1290 			iwk_free_dma_mem(&ring->data[i].dma_data);
1291 		}
1292 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1293 	}
1294 }
1295 
1296 static int
1297 iwk_ring_init(iwk_sc_t *sc)
1298 {
1299 	int i, err = DDI_SUCCESS;
1300 
1301 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1302 		if (i == IWK_CMD_QUEUE_NUM)
1303 			continue;
1304 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1305 		    i);
1306 		if (err != DDI_SUCCESS)
1307 			goto fail;
1308 	}
1309 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1310 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1311 	if (err != DDI_SUCCESS)
1312 		goto fail;
1313 	err = iwk_alloc_rx_ring(sc);
1314 	if (err != DDI_SUCCESS)
1315 		goto fail;
1316 	return (err);
1317 
1318 fail:
1319 	return (err);
1320 }
1321 
1322 static void
1323 iwk_ring_free(iwk_sc_t *sc)
1324 {
1325 	int i = IWK_NUM_QUEUES;
1326 
1327 	iwk_free_rx_ring(sc);
1328 	while (--i >= 0) {
1329 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1330 	}
1331 }
1332 
1333 /*ARGSUSED*/
1334 static int
1335 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1336 {
1337 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1338 	iwk_tx_power_table_cmd_t txpower;
1339 	enum ieee80211_state ostate = ic->ic_state;
1340 	int i, err = IWK_SUCCESS;
1341 
1342 	mutex_enter(&sc->sc_glock);
1343 	switch (nstate) {
1344 	case IEEE80211_S_SCAN:
1345 		if (ostate == IEEE80211_S_INIT) {
1346 			ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1347 			/* let LED blink when scanning */
1348 			iwk_set_led(sc, 2, 10, 2);
1349 
1350 			if ((err = iwk_scan(sc)) != 0) {
1351 				IWK_DBG((IWK_DEBUG_80211,
1352 				    "could not initiate scan\n"));
1353 				ic->ic_flags &= ~(IEEE80211_F_SCAN |
1354 				    IEEE80211_F_ASCAN);
1355 				mutex_exit(&sc->sc_glock);
1356 				return (err);
1357 			}
1358 		}
1359 		ic->ic_state = nstate;
1360 		mutex_exit(&sc->sc_glock);
1361 		return (IWK_SUCCESS);
1362 
1363 	case IEEE80211_S_AUTH:
1364 		/* reset state to handle reassociations correctly */
1365 		sc->sc_config.assoc_id = 0;
1366 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1367 
1368 		/*
1369 		 * before sending authentication and association request frame,
1370 		 * we need do something in the hardware, such as setting the
1371 		 * channel same to the target AP...
1372 		 */
1373 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1374 			IWK_DBG((IWK_DEBUG_80211,
1375 			    "could not send authentication request\n"));
1376 			mutex_exit(&sc->sc_glock);
1377 			return (err);
1378 		}
1379 		break;
1380 
1381 	case IEEE80211_S_RUN:
1382 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1383 			/* let LED blink when monitoring */
1384 			iwk_set_led(sc, 2, 10, 10);
1385 			break;
1386 		}
1387 
1388 		if (ic->ic_opmode != IEEE80211_M_STA) {
1389 			(void) iwk_hw_set_before_auth(sc);
1390 			/* need setup beacon here */
1391 		}
1392 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1393 
1394 		/* update adapter's configuration */
1395 		sc->sc_config.assoc_id = sc->sc_assoc_id & 0x3fff;
1396 		/* short preamble/slot time are negotiated when associating */
1397 		sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1398 		    RXON_FLG_SHORT_SLOT_MSK);
1399 
1400 		if (ic->ic_flags & IEEE80211_F_SHSLOT)
1401 			sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
1402 
1403 		if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1404 			sc->sc_config.flags |=
1405 			    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1406 
1407 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ASSOC_MSK);
1408 
1409 		if (ic->ic_opmode != IEEE80211_M_STA)
1410 			sc->sc_config.filter_flags |=
1411 			    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1412 
1413 		IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1414 		    " filter_flags %x\n",
1415 		    sc->sc_config.chan, sc->sc_config.flags,
1416 		    sc->sc_config.filter_flags));
1417 		err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1418 		    sizeof (iwk_rxon_cmd_t), 1);
1419 		if (err != IWK_SUCCESS) {
1420 			IWK_DBG((IWK_DEBUG_80211,
1421 			    "could not update configuration\n"));
1422 			mutex_exit(&sc->sc_glock);
1423 			return (err);
1424 		}
1425 
1426 		/*
1427 		 * set Tx power for 2.4GHz channels
1428 		 * (need further investigation. fix tx power at present)
1429 		 * This cmd should be issued each time the reply_rxon cmd is
1430 		 * invoked.
1431 		 */
1432 		(void) memset(&txpower, 0, sizeof (txpower));
1433 		txpower.band = 1; /* for 2.4G */
1434 		txpower.channel = sc->sc_config.chan;
1435 		txpower.channel_normal_width = 0;
1436 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
1437 			txpower.tx_power.ht_ofdm_power[i].s.ramon_tx_gain =
1438 			    0x3f3f;
1439 			txpower.tx_power.ht_ofdm_power[i].s.dsp_predis_atten =
1440 			    110 | (110 << 8);
1441 		}
1442 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1443 		    .s.ramon_tx_gain = 0x3f3f;
1444 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1445 		    .s.dsp_predis_atten = 110 | (110 << 8);
1446 		err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
1447 		    sizeof (txpower), 1);
1448 		if (err != IWK_SUCCESS) {
1449 			cmn_err(CE_WARN, "iwk_newstate(): failed to "
1450 			    "set txpower\n");
1451 			return (err);
1452 		}
1453 
1454 		/* set LED on after associated */
1455 		iwk_set_led(sc, 2, 0, 1);
1456 		break;
1457 
1458 	case IEEE80211_S_INIT:
1459 		/* set LED off after init */
1460 		iwk_set_led(sc, 2, 1, 0);
1461 		break;
1462 	case IEEE80211_S_ASSOC:
1463 		break;
1464 	}
1465 
1466 	mutex_exit(&sc->sc_glock);
1467 	return (sc->sc_newstate(ic, nstate, arg));
1468 }
1469 
1470 /*ARGSUSED*/
1471 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1472     const uint8_t mac[IEEE80211_ADDR_LEN])
1473 {
1474 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1475 	iwk_add_sta_t node;
1476 	int err;
1477 
1478 	switch (k->wk_cipher->ic_cipher) {
1479 	case IEEE80211_CIPHER_WEP:
1480 	case IEEE80211_CIPHER_TKIP:
1481 		return (1); /* sofeware do it. */
1482 	case IEEE80211_CIPHER_AES_CCM:
1483 		break;
1484 	default:
1485 		return (0);
1486 	}
1487 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK
1488 	    | RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1489 
1490 	mutex_enter(&sc->sc_glock);
1491 
1492 	/* update ap/multicast node */
1493 	(void) memset(&node, 0, sizeof (node));
1494 	if (IEEE80211_IS_MULTICAST(mac)) {
1495 		(void) memset(node.bssid, 0xff, 6);
1496 		node.id = IWK_BROADCAST_ID;
1497 	} else {
1498 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1499 		node.id = IWK_AP_ID;
1500 	}
1501 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1502 		node.key_flags = 0;
1503 		node.keyp = k->wk_keyix;
1504 	} else {
1505 		node.key_flags = (1 << 14);
1506 		node.keyp = k->wk_keyix + 4;
1507 	}
1508 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1509 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1510 	node.sta_mask = STA_MODIFY_KEY_MASK;
1511 	node.control = 1;
1512 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1513 	if (err != IWK_SUCCESS) {
1514 		cmn_err(CE_WARN, "iwk_key_set():"
1515 		    "failed to update ap node\n");
1516 		mutex_exit(&sc->sc_glock);
1517 		return (0);
1518 	}
1519 	mutex_exit(&sc->sc_glock);
1520 	return (1);
1521 }
1522 
1523 /*
1524  * exclusive access to mac begin.
1525  */
1526 static void
1527 iwk_mac_access_enter(iwk_sc_t *sc)
1528 {
1529 	uint32_t tmp;
1530 	int n;
1531 
1532 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1533 	IWK_WRITE(sc, CSR_GP_CNTRL,
1534 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1535 
1536 	/* wait until we succeed */
1537 	for (n = 0; n < 1000; n++) {
1538 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1539 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1540 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1541 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1542 			break;
1543 		DELAY(10);
1544 	}
1545 	if (n == 1000)
1546 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1547 }
1548 
1549 /*
1550  * exclusive access to mac end.
1551  */
1552 static void
1553 iwk_mac_access_exit(iwk_sc_t *sc)
1554 {
1555 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1556 	IWK_WRITE(sc, CSR_GP_CNTRL,
1557 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1558 }
1559 
1560 /*
1561  * this function defined here for future use.
1562  * static uint32_t
1563  * iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1564  * {
1565  * 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1566  * 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1567  * }
1568  */
1569 
1570 static void
1571 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1572 {
1573 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1574 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1575 }
1576 
1577 static uint32_t
1578 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1579 {
1580 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1581 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1582 }
1583 
1584 static void
1585 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1586 {
1587 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1588 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1589 }
1590 
1591 static void
1592 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1593     uint32_t *data, int wlen)
1594 {
1595 	for (; wlen > 0; wlen--, data++, addr += 4)
1596 		iwk_reg_write(sc, addr, *data);
1597 }
1598 
1599 
1600 /*
1601  * ucode load/initialization steps:
1602  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1603  * BSM contains a small memory that *always* stays powered up, so it can
1604  * retain the bootstrap program even when the card is in a power-saving
1605  * power-down state.  The BSM loads the small program into ARC processor's
1606  * instruction memory when triggered by power-up.
1607  * 2)  load Initialize image via bootstrap program.
1608  * The Initialize image sets up regulatory and calibration data for the
1609  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1610  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1611  * correction.
1612  */
1613 static int
1614 iwk_load_firmware(iwk_sc_t *sc)
1615 {
1616 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1617 	uint32_t size = sc->sc_hdr->bootsz;
1618 	int n, err = IWK_SUCCESS;
1619 
1620 	/*
1621 	 * The physical address bit [4-35] of the initialize uCode.
1622 	 * In the initialize alive notify interrupt the physical address of
1623 	 * the runtime ucode will be set for loading.
1624 	 */
1625 	iwk_mac_access_enter(sc);
1626 
1627 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1628 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1629 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1630 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1631 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1632 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1633 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1634 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1635 
1636 	/* load bootstrap code into BSM memory */
1637 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1638 	    size / sizeof (uint32_t));
1639 
1640 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1641 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1642 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1643 
1644 	/*
1645 	 * prepare to load initialize uCode
1646 	 */
1647 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1648 
1649 	/* wait while the adapter is busy loading the firmware */
1650 	for (n = 0; n < 1000; n++) {
1651 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1652 		    BSM_WR_CTRL_REG_BIT_START))
1653 			break;
1654 		DELAY(10);
1655 	}
1656 	if (n == 1000) {
1657 		IWK_DBG((IWK_DEBUG_FW,
1658 		    "timeout transferring firmware\n"));
1659 		err = ETIMEDOUT;
1660 		return (err);
1661 	}
1662 
1663 	/* for future power-save mode use */
1664 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1665 
1666 	iwk_mac_access_exit(sc);
1667 
1668 	return (err);
1669 }
1670 
1671 /*ARGSUSED*/
1672 static void
1673 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1674 {
1675 	ieee80211com_t *ic = &sc->sc_ic;
1676 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1677 	iwk_rx_phy_res_t *stat;
1678 	ieee80211_node_t *in;
1679 	uint32_t *tail;
1680 	struct ieee80211_frame *wh;
1681 	mblk_t *mp;
1682 	uint16_t len, rssi, mrssi, agc;
1683 	int16_t t;
1684 	uint32_t ants, i;
1685 	struct iwk_rx_non_cfg_phy *phyinfo;
1686 
1687 	/* assuming not 11n here. cope with 11n in phase-II */
1688 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1689 	if (stat->cfg_phy_cnt > 20) {
1690 		return;
1691 	}
1692 
1693 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1694 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1695 	mrssi = 0;
1696 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK)
1697 	    >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1698 	for (i = 0; i < 3; i++) {
1699 		if (ants & (1 << i))
1700 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1701 	}
1702 	t = mrssi - agc - 44; /* t is the dBM value */
1703 	/*
1704 	 * convert dBm to percentage ???
1705 	 */
1706 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
1707 	    / (75 * 75);
1708 	if (rssi > 100)
1709 		rssi = 100;
1710 	if (rssi < 1)
1711 		rssi = 1;
1712 	len = stat->byte_count;
1713 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1714 
1715 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1716 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1717 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1718 	    len, stat->rate.r.s.rate, stat->channel,
1719 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1720 	    stat->cfg_phy_cnt, LE_32(*tail)));
1721 
1722 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
1723 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
1724 		return;
1725 	}
1726 
1727 	/*
1728 	 * discard Rx frames with bad CRC
1729 	 */
1730 	if ((LE_32(*tail) &
1731 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
1732 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1733 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
1734 		    LE_32(*tail)));
1735 		sc->sc_rx_err++;
1736 		return;
1737 	}
1738 
1739 	wh = (struct ieee80211_frame *)
1740 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
1741 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
1742 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
1743 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
1744 		    sc->sc_assoc_id));
1745 	}
1746 #ifdef DEBUG
1747 	if (iwk_dbg_flags & IWK_DEBUG_RX)
1748 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
1749 #endif
1750 	in = ieee80211_find_rxnode(ic, wh);
1751 	mp = allocb(len, BPRI_MED);
1752 	if (mp) {
1753 		(void) memcpy(mp->b_wptr, wh, len);
1754 		mp->b_wptr += len;
1755 
1756 		/* send the frame to the 802.11 layer */
1757 		(void) ieee80211_input(ic, mp, in, rssi, 0);
1758 	} else {
1759 		sc->sc_rx_nobuf++;
1760 		IWK_DBG((IWK_DEBUG_RX,
1761 		    "iwk_rx_intr(): alloc rx buf failed\n"));
1762 	}
1763 	/* release node reference */
1764 	ieee80211_free_node(in);
1765 }
1766 
1767 /*ARGSUSED*/
1768 static void
1769 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1770 {
1771 	ieee80211com_t *ic = &sc->sc_ic;
1772 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
1773 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
1774 
1775 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
1776 	    " retries=%d frame_count=%x nkill=%d "
1777 	    "rate=%x duration=%d status=%x\n",
1778 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
1779 	    stat->bt_kill_count, stat->rate.r.s.rate,
1780 	    LE_32(stat->duration), LE_32(stat->status)));
1781 
1782 	if (stat->ntries > 0) {
1783 		sc->sc_tx_retries++;
1784 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
1785 		    sc->sc_tx_retries));
1786 	}
1787 
1788 	sc->sc_tx_timer = 0;
1789 
1790 	mutex_enter(&sc->sc_tx_lock);
1791 	ring->queued--;
1792 	if (ring->queued < 0)
1793 		ring->queued = 0;
1794 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
1795 		sc->sc_need_reschedule = 0;
1796 		mutex_exit(&sc->sc_tx_lock);
1797 		mac_tx_update(ic->ic_mach);
1798 		mutex_enter(&sc->sc_tx_lock);
1799 	}
1800 	mutex_exit(&sc->sc_tx_lock);
1801 }
1802 
1803 static void
1804 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1805 {
1806 	if ((desc->hdr.qid & 7) != 4) {
1807 		return;
1808 	}
1809 	mutex_enter(&sc->sc_glock);
1810 	sc->sc_flags |= IWK_F_CMD_DONE;
1811 	cv_signal(&sc->sc_cmd_cv);
1812 	mutex_exit(&sc->sc_glock);
1813 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
1814 	    "qid=%x idx=%d flags=%x type=0x%x\n",
1815 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
1816 	    desc->hdr.type));
1817 }
1818 
1819 static void
1820 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1821 {
1822 	uint32_t base, i;
1823 	struct iwk_alive_resp *ar =
1824 	    (struct iwk_alive_resp *)(desc + 1);
1825 
1826 	/* the microcontroller is ready */
1827 	IWK_DBG((IWK_DEBUG_FW,
1828 	    "microcode alive notification minor: %x major: %x type:"
1829 	    " %x subtype: %x\n",
1830 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
1831 
1832 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
1833 		IWK_DBG((IWK_DEBUG_FW,
1834 		    "microcontroller initialization failed\n"));
1835 	}
1836 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
1837 		IWK_DBG((IWK_DEBUG_FW,
1838 		    "initialization alive received.\n"));
1839 		(void) memcpy(&sc->sc_card_alive_init, ar,
1840 		    sizeof (struct iwk_init_alive_resp));
1841 		/* XXX get temperature */
1842 		iwk_mac_access_enter(sc);
1843 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1844 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
1845 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1846 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
1847 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1848 		    sc->sc_dma_fw_data.cookie.dmac_size);
1849 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1850 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
1851 		iwk_mac_access_exit(sc);
1852 	} else {
1853 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
1854 		(void) memcpy(&sc->sc_card_alive_run, ar,
1855 		    sizeof (struct iwk_alive_resp));
1856 
1857 		/*
1858 		 * Init SCD related registers to make Tx work. XXX
1859 		 */
1860 		iwk_mac_access_enter(sc);
1861 
1862 		/* read sram address of data base */
1863 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
1864 
1865 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
1866 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
1867 		    i < 128; i += 4)
1868 			iwk_mem_write(sc, base + i, 0);
1869 
1870 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
1871 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
1872 		    i < 256; i += 4)
1873 			iwk_mem_write(sc, base + i, 0);
1874 
1875 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
1876 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
1877 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
1878 			iwk_mem_write(sc, base + i, 0);
1879 
1880 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
1881 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
1882 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
1883 
1884 		/* initiate the tx queues */
1885 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
1886 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
1887 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
1888 			iwk_mem_write(sc, sc->sc_scd_base +
1889 			    SCD_CONTEXT_QUEUE_OFFSET(i),
1890 			    (SCD_WIN_SIZE & 0x7f));
1891 			iwk_mem_write(sc, sc->sc_scd_base +
1892 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
1893 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
1894 		}
1895 		/* interrupt enable on each queue0-7 */
1896 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
1897 		    (1 << IWK_NUM_QUEUES) - 1);
1898 		/* enable  each channel 0-7 */
1899 		iwk_reg_write(sc, SCD_TXFACT,
1900 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1901 		/*
1902 		 * queue 0-7 maps to FIFO 0-7 and
1903 		 * all queues work under FIFO mode (none-scheduler-ack)
1904 		 */
1905 		for (i = 0; i < 7; i++) {
1906 			iwk_reg_write(sc,
1907 			    SCD_QUEUE_STATUS_BITS(i),
1908 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1909 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
1910 			    SCD_QUEUE_STTS_REG_MSK);
1911 		}
1912 		iwk_mac_access_exit(sc);
1913 
1914 		sc->sc_flags |= IWK_F_FW_INIT;
1915 		cv_signal(&sc->sc_fw_cv);
1916 	}
1917 
1918 }
1919 
1920 static uint_t
1921 iwk_rx_softintr(caddr_t arg)
1922 {
1923 	iwk_sc_t *sc = (iwk_sc_t *)arg;
1924 	ieee80211com_t *ic = &sc->sc_ic;
1925 	iwk_rx_desc_t *desc;
1926 	iwk_rx_data_t *data;
1927 	uint32_t index;
1928 
1929 	mutex_enter(&sc->sc_glock);
1930 	if (sc->sc_rx_softint_pending != 1) {
1931 		mutex_exit(&sc->sc_glock);
1932 		return (DDI_INTR_UNCLAIMED);
1933 	}
1934 	/* disable interrupts */
1935 	IWK_WRITE(sc, CSR_INT_MASK, 0);
1936 	mutex_exit(&sc->sc_glock);
1937 
1938 	/*
1939 	 * firmware has moved the index of the rx queue, driver get it,
1940 	 * and deal with it.
1941 	 */
1942 	index = LE_32(sc->sc_shared->val0) & 0xfff;
1943 
1944 	while (sc->sc_rxq.cur != index) {
1945 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
1946 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
1947 
1948 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
1949 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
1950 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
1951 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
1952 
1953 		/* a command other than a tx need to be replied */
1954 		if (!(desc->hdr.qid & 0x80) &&
1955 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
1956 		    (desc->hdr.type != REPLY_TX))
1957 			iwk_cmd_intr(sc, desc);
1958 
1959 		switch (desc->hdr.type) {
1960 		case REPLY_4965_RX:
1961 			iwk_rx_intr(sc, desc, data);
1962 			break;
1963 
1964 		case REPLY_TX:
1965 			iwk_tx_intr(sc, desc, data);
1966 			break;
1967 
1968 		case REPLY_ALIVE:
1969 			iwk_ucode_alive(sc, desc);
1970 			break;
1971 
1972 		case CARD_STATE_NOTIFICATION:
1973 		{
1974 			uint32_t *status = (uint32_t *)(desc + 1);
1975 
1976 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
1977 			    LE_32(*status)));
1978 
1979 			if (LE_32(*status) & 1) {
1980 				/* the radio button has to be pushed */
1981 				cmn_err(CE_NOTE,
1982 				    "iwk: Radio transmitter is off\n");
1983 			}
1984 			break;
1985 		}
1986 		case SCAN_START_NOTIFICATION:
1987 		{
1988 			iwk_start_scan_t *scan =
1989 			    (iwk_start_scan_t *)(desc + 1);
1990 
1991 			IWK_DBG((IWK_DEBUG_SCAN,
1992 			    "scanning channel %d status %x\n",
1993 			    scan->chan, LE_32(scan->status)));
1994 
1995 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
1996 			break;
1997 		}
1998 		case SCAN_COMPLETE_NOTIFICATION:
1999 			IWK_DBG((IWK_DEBUG_SCAN, "scan finished\n"));
2000 			ieee80211_end_scan(ic);
2001 			break;
2002 		}
2003 
2004 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2005 	}
2006 
2007 	/*
2008 	 * driver dealt with what reveived in rx queue and tell the information
2009 	 * to the firmware.
2010 	 */
2011 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2012 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2013 
2014 	mutex_enter(&sc->sc_glock);
2015 	/* re-enable interrupts */
2016 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2017 	sc->sc_rx_softint_pending = 0;
2018 	mutex_exit(&sc->sc_glock);
2019 
2020 	return (DDI_INTR_CLAIMED);
2021 }
2022 
2023 static uint_t
2024 iwk_intr(caddr_t arg)
2025 {
2026 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2027 	uint32_t r, rfh;
2028 
2029 	mutex_enter(&sc->sc_glock);
2030 	r = IWK_READ(sc, CSR_INT);
2031 	if (r == 0 || r == 0xffffffff) {
2032 		mutex_exit(&sc->sc_glock);
2033 		return (DDI_INTR_UNCLAIMED);
2034 	}
2035 
2036 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2037 
2038 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2039 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2040 	/* disable interrupts */
2041 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2042 	/* ack interrupts */
2043 	IWK_WRITE(sc, CSR_INT, r);
2044 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2045 
2046 	if (sc->sc_rx_softint_id == NULL) {
2047 		mutex_exit(&sc->sc_glock);
2048 		return (DDI_INTR_CLAIMED);
2049 	}
2050 
2051 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2052 		IWK_DBG((IWK_DEBUG_FW, "fatal firmware error\n"));
2053 		mutex_exit(&sc->sc_glock);
2054 		iwk_stop(sc);
2055 		sc->sc_ostate = sc->sc_ic.ic_state;
2056 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2057 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2058 		return (DDI_INTR_CLAIMED);
2059 	}
2060 
2061 	if (r & BIT_INT_RF_KILL) {
2062 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2063 	}
2064 
2065 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2066 	    (rfh & FH_INT_RX_MASK)) {
2067 		sc->sc_rx_softint_pending = 1;
2068 		ddi_trigger_softintr(sc->sc_rx_softint_id);
2069 	}
2070 
2071 	if (r & BIT_INT_ALIVE)	{
2072 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2073 	}
2074 
2075 	/* re-enable interrupts */
2076 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2077 	mutex_exit(&sc->sc_glock);
2078 
2079 	return (DDI_INTR_CLAIMED);
2080 }
2081 
2082 static uint8_t
2083 iwk_rate_to_plcp(int rate)
2084 {
2085 	uint8_t ret;
2086 
2087 	switch (rate) {
2088 	/* CCK rates */
2089 	case 2:
2090 		ret = 0xa;
2091 		break;
2092 	case 4:
2093 		ret = 0x14;
2094 		break;
2095 	case 11:
2096 		ret = 0x37;
2097 		break;
2098 	case 22:
2099 		ret = 0x6e;
2100 		break;
2101 	/* OFDM rates */
2102 	case 12:
2103 		ret = 0xd;
2104 		break;
2105 	case 18:
2106 		ret = 0xf;
2107 		break;
2108 	case 24:
2109 		ret = 0x5;
2110 		break;
2111 	case 36:
2112 		ret = 0x7;
2113 		break;
2114 	case 48:
2115 		ret = 0x9;
2116 		break;
2117 	case 72:
2118 		ret = 0xb;
2119 		break;
2120 	case 96:
2121 		ret = 0x1;
2122 		break;
2123 	case 108:
2124 		ret = 0x3;
2125 		break;
2126 	default:
2127 		ret = 0;
2128 		break;
2129 	}
2130 	return (ret);
2131 }
2132 
2133 static mblk_t *
2134 iwk_m_tx(void *arg, mblk_t *mp)
2135 {
2136 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2137 	ieee80211com_t	*ic = &sc->sc_ic;
2138 	mblk_t			*next;
2139 
2140 	if (ic->ic_state != IEEE80211_S_RUN) {
2141 		freemsgchain(mp);
2142 		return (NULL);
2143 	}
2144 
2145 	while (mp != NULL) {
2146 		next = mp->b_next;
2147 		mp->b_next = NULL;
2148 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2149 			mp->b_next = next;
2150 			break;
2151 		}
2152 		mp = next;
2153 	}
2154 	return (mp);
2155 }
2156 
2157 /* ARGSUSED */
2158 static int
2159 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2160 {
2161 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2162 	iwk_tx_ring_t *ring;
2163 	iwk_tx_desc_t *desc;
2164 	iwk_tx_data_t *data;
2165 	iwk_cmd_t *cmd;
2166 	iwk_tx_cmd_t *tx;
2167 	ieee80211_node_t *in;
2168 	struct ieee80211_frame *wh;
2169 	struct ieee80211_key *k = NULL;
2170 	mblk_t *m, *m0;
2171 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2172 	uint16_t masks = 0;
2173 
2174 	ring = &sc->sc_txq[0];
2175 	data = &ring->data[ring->cur];
2176 	desc = data->desc;
2177 	cmd = data->cmd;
2178 	bzero(desc, sizeof (*desc));
2179 	bzero(cmd, sizeof (*cmd));
2180 
2181 	mutex_enter(&sc->sc_tx_lock);
2182 	if (ring->queued > ring->count - 64) {
2183 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2184 		sc->sc_need_reschedule = 1;
2185 		mutex_exit(&sc->sc_tx_lock);
2186 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2187 		    IEEE80211_FC0_TYPE_DATA) {
2188 			freemsg(mp);
2189 		}
2190 		sc->sc_tx_nobuf++;
2191 		err = IWK_FAIL;
2192 		goto exit;
2193 	}
2194 	mutex_exit(&sc->sc_tx_lock);
2195 
2196 	hdrlen = sizeof (struct ieee80211_frame);
2197 
2198 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2199 	if (m == NULL) { /* can not alloc buf, drop this package */
2200 		cmn_err(CE_WARN,
2201 		    "iwk_send(): failed to allocate msgbuf\n");
2202 		freemsg(mp);
2203 		err = IWK_SUCCESS;
2204 		goto exit;
2205 	}
2206 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2207 		mblen = MBLKL(m0);
2208 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2209 		off += mblen;
2210 	}
2211 	m->b_wptr += off;
2212 	freemsg(mp);
2213 
2214 	wh = (struct ieee80211_frame *)m->b_rptr;
2215 
2216 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2217 	if (in == NULL) {
2218 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2219 		freemsg(m);
2220 		sc->sc_tx_err++;
2221 		err = IWK_SUCCESS;
2222 		goto exit;
2223 	}
2224 	(void) ieee80211_encap(ic, m, in);
2225 
2226 	cmd->hdr.type = REPLY_TX;
2227 	cmd->hdr.flags = 0;
2228 	cmd->hdr.qid = ring->qid;
2229 	cmd->hdr.idx = ring->cur;
2230 
2231 	tx = (iwk_tx_cmd_t *)cmd->data;
2232 	tx->tx_flags = 0;
2233 
2234 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2235 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2236 	} else {
2237 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2238 	}
2239 
2240 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2241 		k = ieee80211_crypto_encap(ic, m);
2242 		if (k == NULL) {
2243 			freemsg(m);
2244 			sc->sc_tx_err++;
2245 			err = IWK_SUCCESS;
2246 			goto exit;
2247 		}
2248 
2249 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2250 			tx->sec_ctl = 2; /* for CCMP */
2251 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2252 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2253 		}
2254 
2255 		/* packet header may have moved, reset our local pointer */
2256 		wh = (struct ieee80211_frame *)m->b_rptr;
2257 	}
2258 
2259 	len = msgdsize(m);
2260 
2261 #ifdef DEBUG
2262 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2263 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2264 #endif
2265 
2266 	/* pickup a rate */
2267 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2268 	    IEEE80211_FC0_TYPE_MGT) {
2269 		/* mgmt frames are sent at 1M */
2270 		rate = in->in_rates.ir_rates[0];
2271 	} else {
2272 		/*
2273 		 * do it later: rate scaling in hardware.
2274 		 * maybe like the following, for management frame:
2275 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2276 		 * for data frame:
2277 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2278 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2279 		 * tx->initial_rate_index = 1;
2280 		 *
2281 		 * now the txrate is determined in tx cmd flags, set to the
2282 		 * max value 54M for 11g and 11M for 11b.
2283 		 */
2284 
2285 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2286 			rate = ic->ic_fixed_rate;
2287 		} else {
2288 			in->in_txrate = in->in_rates.ir_nrates - 1;
2289 			rate = in->in_rates.ir_rates[in->in_txrate];
2290 		}
2291 	}
2292 	rate &= IEEE80211_RATE_VAL;
2293 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2294 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2295 
2296 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2297 
2298 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2299 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2300 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2301 
2302 	/* retrieve destination node's id */
2303 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2304 		tx->sta_id = IWK_BROADCAST_ID;
2305 	} else {
2306 		tx->sta_id = IWK_AP_ID;
2307 	}
2308 
2309 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2310 	    IEEE80211_FC0_TYPE_MGT) {
2311 		/* tell h/w to set timestamp in probe responses */
2312 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2313 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2314 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2315 
2316 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2317 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2318 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2319 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2320 			tx->timeout.pm_frame_timeout = 3;
2321 		else
2322 			tx->timeout.pm_frame_timeout = 2;
2323 	} else
2324 		tx->timeout.pm_frame_timeout = 0;
2325 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2326 		masks |= RATE_MCS_CCK_MSK;
2327 
2328 	masks |= RATE_MCS_ANT_B_MSK;
2329 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2330 
2331 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2332 	    tx->tx_flags));
2333 
2334 	tx->rts_retry_limit = 60;
2335 	tx->data_retry_limit = 15;
2336 
2337 	tx->stop_time.life_time  = LE_32(0xffffffff);
2338 
2339 	tx->len = LE_16(len);
2340 
2341 	tx->dram_lsb_ptr =
2342 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2343 	tx->dram_msb_ptr = 0;
2344 	tx->driver_txop = 0;
2345 	tx->next_frame_len = 0;
2346 
2347 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2348 	m->b_rptr += hdrlen;
2349 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2350 
2351 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2352 	    ring->qid, ring->cur, len));
2353 
2354 	/*
2355 	 * first segment includes the tx cmd plus the 802.11 header,
2356 	 * the second includes the remaining of the 802.11 frame.
2357 	 */
2358 	desc->val0 = LE_32(2 << 24);
2359 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2360 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2361 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2362 	desc->pa[0].val2 =
2363 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2364 	    ((len - hdrlen) << 20);
2365 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2366 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2367 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2368 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2369 
2370 	mutex_enter(&sc->sc_tx_lock);
2371 	ring->queued++;
2372 	mutex_exit(&sc->sc_tx_lock);
2373 
2374 	/* kick ring */
2375 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].tfd_offset[ring->cur].val
2376 	    = 8 + len;
2377 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2378 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2379 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2380 	}
2381 
2382 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2383 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2384 
2385 	ring->cur = (ring->cur + 1) % ring->count;
2386 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2387 	freemsg(m);
2388 	/* release node reference */
2389 	ieee80211_free_node(in);
2390 
2391 	ic->ic_stats.is_tx_bytes += len;
2392 	ic->ic_stats.is_tx_frags++;
2393 
2394 	if (sc->sc_tx_timer == 0)
2395 		sc->sc_tx_timer = 10;
2396 exit:
2397 	return (err);
2398 }
2399 
2400 static void
2401 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2402 {
2403 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2404 	ieee80211com_t	*ic = &sc->sc_ic;
2405 	int		err;
2406 
2407 	err = ieee80211_ioctl(ic, wq, mp);
2408 	if (err == ENETRESET) {
2409 		(void) ieee80211_new_state(ic,
2410 		    IEEE80211_S_SCAN, -1);
2411 	}
2412 }
2413 
2414 /*ARGSUSED*/
2415 static int
2416 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2417 {
2418 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2419 	ieee80211com_t	*ic = &sc->sc_ic;
2420 	ieee80211_node_t *in = ic->ic_bss;
2421 	struct ieee80211_rateset *rs = &in->in_rates;
2422 
2423 	mutex_enter(&sc->sc_glock);
2424 	switch (stat) {
2425 	case MAC_STAT_IFSPEED:
2426 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2427 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2428 		    : ic->ic_fixed_rate) * 5000000ull;
2429 		break;
2430 	case MAC_STAT_NOXMTBUF:
2431 		*val = sc->sc_tx_nobuf;
2432 		break;
2433 	case MAC_STAT_NORCVBUF:
2434 		*val = sc->sc_rx_nobuf;
2435 		break;
2436 	case MAC_STAT_IERRORS:
2437 		*val = sc->sc_rx_err;
2438 		break;
2439 	case MAC_STAT_RBYTES:
2440 		*val = ic->ic_stats.is_rx_bytes;
2441 		break;
2442 	case MAC_STAT_IPACKETS:
2443 		*val = ic->ic_stats.is_rx_frags;
2444 		break;
2445 	case MAC_STAT_OBYTES:
2446 		*val = ic->ic_stats.is_tx_bytes;
2447 		break;
2448 	case MAC_STAT_OPACKETS:
2449 		*val = ic->ic_stats.is_tx_frags;
2450 		break;
2451 	case MAC_STAT_OERRORS:
2452 	case WIFI_STAT_TX_FAILED:
2453 		*val = sc->sc_tx_err;
2454 		break;
2455 	case WIFI_STAT_TX_RETRANS:
2456 		*val = sc->sc_tx_retries;
2457 		break;
2458 	case WIFI_STAT_FCS_ERRORS:
2459 	case WIFI_STAT_WEP_ERRORS:
2460 	case WIFI_STAT_TX_FRAGS:
2461 	case WIFI_STAT_MCAST_TX:
2462 	case WIFI_STAT_RTS_SUCCESS:
2463 	case WIFI_STAT_RTS_FAILURE:
2464 	case WIFI_STAT_ACK_FAILURE:
2465 	case WIFI_STAT_RX_FRAGS:
2466 	case WIFI_STAT_MCAST_RX:
2467 	case WIFI_STAT_RX_DUPS:
2468 		mutex_exit(&sc->sc_glock);
2469 		return (ieee80211_stat(ic, stat, val));
2470 	default:
2471 		mutex_exit(&sc->sc_glock);
2472 		return (ENOTSUP);
2473 	}
2474 	mutex_exit(&sc->sc_glock);
2475 
2476 	return (IWK_SUCCESS);
2477 
2478 }
2479 
2480 static int
2481 iwk_m_start(void *arg)
2482 {
2483 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2484 	ieee80211com_t	*ic = &sc->sc_ic;
2485 	int err;
2486 
2487 	err = iwk_init(sc);
2488 
2489 	if (err != IWK_SUCCESS) {
2490 		return (err);
2491 	}
2492 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2493 
2494 	return (err);
2495 }
2496 
2497 static void
2498 iwk_m_stop(void *arg)
2499 {
2500 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2501 	ieee80211com_t	*ic = &sc->sc_ic;
2502 
2503 	iwk_stop(sc);
2504 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2505 	mutex_enter(&sc->sc_mt_lock);
2506 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2507 	mutex_exit(&sc->sc_mt_lock);
2508 }
2509 
2510 /*ARGSUSED*/
2511 static int
2512 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2513 {
2514 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2515 	ieee80211com_t	*ic = &sc->sc_ic;
2516 	int err;
2517 
2518 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2519 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2520 		mutex_enter(&sc->sc_glock);
2521 		err = iwk_config(sc);
2522 		mutex_exit(&sc->sc_glock);
2523 		if (err != IWK_SUCCESS) {
2524 			cmn_err(CE_WARN,
2525 			    "iwk_m_unicst(): "
2526 			    "failed to configure device\n");
2527 			goto fail;
2528 		}
2529 	}
2530 	return (IWK_SUCCESS);
2531 fail:
2532 	return (err);
2533 }
2534 
2535 /*ARGSUSED*/
2536 static int
2537 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2538 {
2539 	return (IWK_SUCCESS);
2540 }
2541 
2542 /*ARGSUSED*/
2543 static int
2544 iwk_m_promisc(void *arg, boolean_t on)
2545 {
2546 	return (IWK_SUCCESS);
2547 }
2548 
2549 static void
2550 iwk_thread(iwk_sc_t *sc)
2551 {
2552 	ieee80211com_t	*ic = &sc->sc_ic;
2553 	int times = 0, err, n = 0, timeout = 0;
2554 
2555 	mutex_enter(&sc->sc_mt_lock);
2556 	while (sc->sc_mf_thread_switch) {
2557 		/*
2558 		 * recovery fatal error
2559 		 */
2560 		if (ic->ic_mach &&
2561 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2562 
2563 			IWK_DBG((IWK_DEBUG_FW,
2564 			    "iwk_thread(): "
2565 			    "try to recover fatal hw error: %d\n", times++));
2566 
2567 			iwk_stop(sc);
2568 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2569 
2570 			mutex_exit(&sc->sc_mt_lock);
2571 			delay(drv_usectohz(2000000 + n*500000));
2572 			mutex_enter(&sc->sc_mt_lock);
2573 			err = iwk_init(sc);
2574 			if (err != IWK_SUCCESS) {
2575 				n++;
2576 				if (n < 20)
2577 					continue;
2578 			}
2579 			n = 0;
2580 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2581 			mutex_exit(&sc->sc_mt_lock);
2582 			delay(drv_usectohz(2000000));
2583 			if (sc->sc_ostate != IEEE80211_S_INIT)
2584 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
2585 			mutex_enter(&sc->sc_mt_lock);
2586 		}
2587 
2588 		mutex_exit(&sc->sc_mt_lock);
2589 		delay(drv_usectohz(100000));
2590 		mutex_enter(&sc->sc_mt_lock);
2591 
2592 		if (sc->sc_tx_timer) {
2593 			timeout++;
2594 			if (timeout == 10) {
2595 				sc->sc_tx_timer--;
2596 				if (sc->sc_tx_timer == 0) {
2597 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2598 					sc->sc_ostate = IEEE80211_S_RUN;
2599 					IWK_DBG((IWK_DEBUG_FW,
2600 					    "iwk_thread(): try to recover from"
2601 					    " 'send fail\n"));
2602 				}
2603 				timeout = 0;
2604 			}
2605 		}
2606 
2607 	}
2608 	sc->sc_mf_thread = NULL;
2609 	cv_signal(&sc->sc_mt_cv);
2610 	mutex_exit(&sc->sc_mt_lock);
2611 }
2612 
2613 
2614 /*
2615  * Send a command to the firmware.
2616  */
2617 static int
2618 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
2619 {
2620 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2621 	iwk_tx_desc_t *desc;
2622 	iwk_cmd_t *cmd;
2623 
2624 	ASSERT(size <= sizeof (cmd->data));
2625 	ASSERT(mutex_owned(&sc->sc_glock));
2626 
2627 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
2628 	desc = ring->data[ring->cur].desc;
2629 	cmd = ring->data[ring->cur].cmd;
2630 
2631 	cmd->hdr.type = (uint8_t)code;
2632 	cmd->hdr.flags = 0;
2633 	cmd->hdr.qid = ring->qid;
2634 	cmd->hdr.idx = ring->cur;
2635 	(void) memcpy(cmd->data, buf, size);
2636 	(void) memset(desc, 0, sizeof (*desc));
2637 
2638 	desc->val0 = LE_32(1 << 24);
2639 	desc->pa[0].tb1_addr =
2640 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
2641 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
2642 
2643 	/* kick cmd ring XXX */
2644 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2645 	    .tfd_offset[ring->cur].val = 8;
2646 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2647 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2648 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2649 	}
2650 	ring->cur = (ring->cur + 1) % ring->count;
2651 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2652 
2653 	if (async)
2654 		return (IWK_SUCCESS);
2655 	else {
2656 		clock_t clk;
2657 		sc->sc_flags &= ~IWK_F_CMD_DONE;
2658 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
2659 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
2660 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk)
2661 			    < 0)
2662 				break;
2663 		}
2664 		if (sc->sc_flags & IWK_F_CMD_DONE)
2665 			return (IWK_SUCCESS);
2666 		else
2667 			return (IWK_FAIL);
2668 	}
2669 }
2670 
2671 static void
2672 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
2673 {
2674 	iwk_led_cmd_t led;
2675 
2676 	led.interval = LE_32(100000);	/* unit: 100ms */
2677 	led.id = id;
2678 	led.off = off;
2679 	led.on = on;
2680 
2681 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
2682 }
2683 
2684 static int
2685 iwk_hw_set_before_auth(iwk_sc_t *sc)
2686 {
2687 	ieee80211com_t *ic = &sc->sc_ic;
2688 	ieee80211_node_t *in = ic->ic_bss;
2689 	iwk_tx_power_table_cmd_t txpower;
2690 	iwk_add_sta_t node;
2691 	iwk_link_quality_cmd_t link_quality;
2692 	struct ieee80211_rateset rs;
2693 	uint16_t masks = 0, rate;
2694 	int i, err;
2695 
2696 	/* update adapter's configuration according the info of target AP */
2697 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
2698 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
2699 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
2700 		sc->sc_config.cck_basic_rates  = 0x03;
2701 		sc->sc_config.ofdm_basic_rates = 0;
2702 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
2703 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
2704 		sc->sc_config.cck_basic_rates  = 0;
2705 		sc->sc_config.ofdm_basic_rates = 0x15;
2706 	} else { /* assume 802.11b/g */
2707 		sc->sc_config.cck_basic_rates  = 0x0f;
2708 		sc->sc_config.ofdm_basic_rates = 0xff;
2709 	}
2710 
2711 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
2712 	    RXON_FLG_SHORT_SLOT_MSK);
2713 
2714 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2715 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
2716 	else
2717 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
2718 
2719 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
2720 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
2721 	else
2722 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
2723 
2724 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
2725 	    "filter_flags %x  cck %x ofdm %x"
2726 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
2727 	    sc->sc_config.chan, sc->sc_config.flags,
2728 	    sc->sc_config.filter_flags,
2729 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
2730 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
2731 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
2732 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
2733 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2734 	    sizeof (iwk_rxon_cmd_t), 1);
2735 	if (err != IWK_SUCCESS) {
2736 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2737 		    " failed to config chan%d\n",
2738 		    sc->sc_config.chan);
2739 		return (err);
2740 	}
2741 
2742 	/*
2743 	 * set Tx power for 2.4GHz channels
2744 	 * (need further investigation. fix tx power at present)
2745 	 */
2746 	(void) memset(&txpower, 0, sizeof (txpower));
2747 	txpower.band = 1; /* for 2.4G */
2748 	txpower.channel = sc->sc_config.chan;
2749 	txpower.channel_normal_width = 0;
2750 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2751 		txpower.tx_power.ht_ofdm_power[i].s
2752 		    .ramon_tx_gain = 0x3f3f;
2753 		txpower.tx_power.ht_ofdm_power[i].s
2754 		    .dsp_predis_atten = 110 | (110 << 8);
2755 	}
2756 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2757 	    s.ramon_tx_gain = 0x3f3f;
2758 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2759 	    s.dsp_predis_atten = 110 | (110 << 8);
2760 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2761 	    sizeof (txpower), 1);
2762 	if (err != IWK_SUCCESS) {
2763 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2764 		    " failed to set txpower\n");
2765 		return (err);
2766 	}
2767 
2768 	/* add default AP node */
2769 	(void) memset(&node, 0, sizeof (node));
2770 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
2771 	node.id = IWK_AP_ID;
2772 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
2773 	if (err != IWK_SUCCESS) {
2774 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2775 		    " failed to add BSS node\n");
2776 		return (err);
2777 	}
2778 
2779 	/* TX_LINK_QUALITY cmd ? */
2780 	(void) memset(&link_quality, 0, sizeof (link_quality));
2781 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
2782 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2783 		if (i < rs.ir_nrates)
2784 			rate = rs.ir_rates[rs.ir_nrates - i];
2785 		else
2786 			rate = 2;
2787 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2788 			masks |= RATE_MCS_CCK_MSK;
2789 		masks |= RATE_MCS_ANT_B_MSK;
2790 		masks &= ~RATE_MCS_ANT_A_MSK;
2791 		link_quality.rate_n_flags[i] =
2792 		    iwk_rate_to_plcp(rate) | masks;
2793 	}
2794 
2795 	link_quality.general_params.single_stream_ant_msk = 2;
2796 	link_quality.general_params.dual_stream_ant_msk = 3;
2797 	link_quality.agg_params.agg_dis_start_th = 3;
2798 	link_quality.agg_params.agg_time_limit = LE_16(4000);
2799 	link_quality.sta_id = IWK_AP_ID;
2800 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
2801 	    sizeof (link_quality), 1);
2802 	if (err != IWK_SUCCESS) {
2803 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
2804 		    "failed to config link quality table\n");
2805 		return (err);
2806 	}
2807 
2808 	return (IWK_SUCCESS);
2809 }
2810 
2811 /*
2812  * Send a scan request(assembly scan cmd) to the firmware.
2813  */
2814 static int
2815 iwk_scan(iwk_sc_t *sc)
2816 {
2817 	ieee80211com_t *ic = &sc->sc_ic;
2818 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2819 	iwk_tx_desc_t *desc;
2820 	iwk_tx_data_t *data;
2821 	iwk_cmd_t *cmd;
2822 	iwk_scan_hdr_t *hdr;
2823 	iwk_scan_chan_t *chan;
2824 	struct ieee80211_frame *wh;
2825 	ieee80211_node_t *in = ic->ic_bss;
2826 	struct ieee80211_rateset *rs;
2827 	enum ieee80211_phymode mode;
2828 	uint8_t *frm;
2829 	int i, pktlen, nrates;
2830 
2831 	data = &ring->data[ring->cur];
2832 	desc = data->desc;
2833 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
2834 
2835 	cmd->hdr.type = REPLY_SCAN_CMD;
2836 	cmd->hdr.flags = 0;
2837 	cmd->hdr.qid = ring->qid;
2838 	cmd->hdr.idx = ring->cur | 0x40;
2839 
2840 	hdr = (iwk_scan_hdr_t *)cmd->data;
2841 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
2842 	hdr->nchan = 11;
2843 	hdr->quiet_time = LE_16(5);
2844 	hdr->quiet_plcp_th = LE_16(1);
2845 
2846 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2847 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2848 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2849 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2850 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2851 
2852 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2853 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
2854 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
2855 	hdr->tx_cmd.tx_flags |= (0x200);
2856 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
2857 	hdr->tx_cmd.rate.r.rate_n_flags |=
2858 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
2859 	hdr->direct_scan[0].len = ic->ic_des_esslen;
2860 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
2861 
2862 	if (ic->ic_des_esslen)
2863 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
2864 		    ic->ic_des_esslen);
2865 	else
2866 		bzero(hdr->direct_scan[0].ssid,
2867 		    sizeof (hdr->direct_scan[0].ssid));
2868 	/*
2869 	 * a probe request frame is required after the REPLY_SCAN_CMD
2870 	 */
2871 	wh = (struct ieee80211_frame *)(hdr + 1);
2872 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
2873 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
2874 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
2875 	(void) memset(wh->i_addr1, 0xff, 6);
2876 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
2877 	(void) memset(wh->i_addr3, 0xff, 6);
2878 	*(uint16_t *)&wh->i_dur[0] = 0;
2879 	*(uint16_t *)&wh->i_seq[0] = 0;
2880 
2881 	frm = (uint8_t *)(wh + 1);
2882 
2883 	/* essid IE */
2884 	*frm++ = IEEE80211_ELEMID_SSID;
2885 	*frm++ = in->in_esslen;
2886 	(void) memcpy(frm, in->in_essid, in->in_esslen);
2887 	frm += in->in_esslen;
2888 
2889 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
2890 	rs = &ic->ic_sup_rates[mode];
2891 
2892 	/* supported rates IE */
2893 	*frm++ = IEEE80211_ELEMID_RATES;
2894 	nrates = rs->ir_nrates;
2895 	if (nrates > IEEE80211_RATE_SIZE)
2896 		nrates = IEEE80211_RATE_SIZE;
2897 	*frm++ = (uint8_t)nrates;
2898 	(void) memcpy(frm, rs->ir_rates, nrates);
2899 	frm += nrates;
2900 
2901 	/* supported xrates IE */
2902 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
2903 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
2904 		*frm++ = IEEE80211_ELEMID_XRATES;
2905 		*frm++ = (uint8_t)nrates;
2906 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
2907 		frm += nrates;
2908 	}
2909 
2910 	/* optionnal IE (usually for wpa) */
2911 	if (ic->ic_opt_ie != NULL) {
2912 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
2913 		frm += ic->ic_opt_ie_len;
2914 	}
2915 
2916 	/* setup length of probe request */
2917 	hdr->tx_cmd.len = LE_16(frm - (uint8_t *)wh);
2918 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
2919 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
2920 
2921 	/*
2922 	 * the attribute of the scan channels are required after the probe
2923 	 * request frame.
2924 	 */
2925 	chan = (iwk_scan_chan_t *)frm;
2926 	for (i = 1; i <= hdr->nchan; i++, chan++) {
2927 		chan->type = 3;
2928 		chan->chan = (uint8_t)i;
2929 		chan->tpc.tx_gain = 0x3f;
2930 		chan->tpc.dsp_atten = 110;
2931 		chan->active_dwell = LE_16(20);
2932 		chan->passive_dwell = LE_16(120);
2933 
2934 		frm += sizeof (iwk_scan_chan_t);
2935 	}
2936 
2937 	pktlen = frm - (uint8_t *)cmd;
2938 
2939 	(void) memset(desc, 0, sizeof (*desc));
2940 	desc->val0 = LE_32(1 << 24);
2941 	desc->pa[0].tb1_addr =
2942 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
2943 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
2944 
2945 	/*
2946 	 * maybe for cmd, filling the byte cnt table is not necessary.
2947 	 * anyway, we fill it here.
2948 	 */
2949 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2950 	    .tfd_offset[ring->cur].val = 8;
2951 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2952 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2953 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2954 	}
2955 #if 0
2956 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2957 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2958 #endif
2959 	/* kick cmd ring */
2960 	ring->cur = (ring->cur + 1) % ring->count;
2961 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2962 
2963 	return (IWK_SUCCESS);
2964 }
2965 
2966 static int
2967 iwk_config(iwk_sc_t *sc)
2968 {
2969 	ieee80211com_t *ic = &sc->sc_ic;
2970 	iwk_tx_power_table_cmd_t txpower;
2971 	iwk_powertable_cmd_t powertable;
2972 	iwk_bt_cmd_t bt;
2973 	iwk_add_sta_t node;
2974 	iwk_link_quality_cmd_t link_quality;
2975 	int i, err;
2976 	uint16_t masks = 0;
2977 
2978 	/*
2979 	 * set power mode. Disable power management at present, do it later
2980 	 */
2981 	(void) memset(&powertable, 0, sizeof (powertable));
2982 	powertable.flags = LE_16(0x8);
2983 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
2984 	    sizeof (powertable), 0);
2985 	if (err != IWK_SUCCESS) {
2986 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
2987 		return (err);
2988 	}
2989 
2990 	/* configure bt coexistence */
2991 	(void) memset(&bt, 0, sizeof (bt));
2992 	bt.flags = 3;
2993 	bt.lead_time = 0xaa;
2994 	bt.max_kill = 1;
2995 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
2996 	    sizeof (bt), 0);
2997 	if (err != IWK_SUCCESS) {
2998 		cmn_err(CE_WARN,
2999 		    "iwk_config(): "
3000 		    "failed to configurate bt coexistence\n");
3001 		return (err);
3002 	}
3003 
3004 	/* configure rxon */
3005 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3006 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3007 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3008 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3009 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | RXON_FLG_AUTO_DETECT_MSK
3010 	    | RXON_FLG_BAND_24G_MSK);
3011 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3012 	switch (ic->ic_opmode) {
3013 	case IEEE80211_M_STA:
3014 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3015 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3016 		    RXON_FILTER_DIS_DECRYPT_MSK |
3017 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3018 		break;
3019 	case IEEE80211_M_IBSS:
3020 	case IEEE80211_M_AHDEMO:
3021 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3022 		break;
3023 	case IEEE80211_M_HOSTAP:
3024 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3025 		break;
3026 	case IEEE80211_M_MONITOR:
3027 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3028 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3029 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3030 		break;
3031 	}
3032 	sc->sc_config.cck_basic_rates  = 0x0f;
3033 	sc->sc_config.ofdm_basic_rates = 0xff;
3034 
3035 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3036 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3037 
3038 	/* set antenna */
3039 
3040 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3041 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3042 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3043 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3044 
3045 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3046 	    sizeof (iwk_rxon_cmd_t), 0);
3047 	if (err != IWK_SUCCESS) {
3048 		cmn_err(CE_WARN, "iwk_config(): "
3049 		    "failed to set configure command\n");
3050 		return (err);
3051 	}
3052 
3053 	/*
3054 	 * set Tx power for 2.4GHz channels
3055 	 * (need further investigation. fix tx power at present)
3056 	 */
3057 	(void) memset(&txpower, 0, sizeof (txpower));
3058 	txpower.band = 1; /* for 2.4G */
3059 	txpower.channel = sc->sc_config.chan;
3060 	txpower.channel_normal_width = 0;
3061 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
3062 		txpower.tx_power.ht_ofdm_power[i]
3063 		    .s.ramon_tx_gain = 0x3f3f;
3064 		txpower.tx_power.ht_ofdm_power[i]
3065 		    .s.dsp_predis_atten = 110 | (110 << 8);
3066 	}
3067 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3068 	    .s.ramon_tx_gain = 0x3f3f;
3069 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3070 	    .s.dsp_predis_atten = 110 | (110 << 8);
3071 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3072 	    sizeof (txpower), 0);
3073 	if (err != IWK_SUCCESS) {
3074 		cmn_err(CE_WARN, "iwk_config(): failed to set txpower\n");
3075 		return (err);
3076 	}
3077 
3078 	/* add broadcast node so that we can send broadcast frame */
3079 	(void) memset(&node, 0, sizeof (node));
3080 	(void) memset(node.bssid, 0xff, 6);
3081 	node.id = IWK_BROADCAST_ID;
3082 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3083 	if (err != IWK_SUCCESS) {
3084 		cmn_err(CE_WARN, "iwk_config(): "
3085 		    "failed to add broadcast node\n");
3086 		return (err);
3087 	}
3088 
3089 	/* TX_LINK_QUALITY cmd ? */
3090 	(void) memset(&link_quality, 0, sizeof (link_quality));
3091 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3092 		masks |= RATE_MCS_CCK_MSK;
3093 		masks |= RATE_MCS_ANT_B_MSK;
3094 		masks &= ~RATE_MCS_ANT_A_MSK;
3095 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3096 	}
3097 
3098 	link_quality.general_params.single_stream_ant_msk = 2;
3099 	link_quality.general_params.dual_stream_ant_msk = 3;
3100 	link_quality.agg_params.agg_dis_start_th = 3;
3101 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3102 	link_quality.sta_id = IWK_BROADCAST_ID;
3103 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3104 	    sizeof (link_quality), 0);
3105 	if (err != IWK_SUCCESS) {
3106 		cmn_err(CE_WARN, "iwk_config(): "
3107 		    "failed to config link quality table\n");
3108 		return (err);
3109 	}
3110 
3111 	return (IWK_SUCCESS);
3112 }
3113 
3114 static void
3115 iwk_stop_master(iwk_sc_t *sc)
3116 {
3117 	uint32_t tmp;
3118 	int n;
3119 
3120 	tmp = IWK_READ(sc, CSR_RESET);
3121 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3122 
3123 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3124 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3125 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3126 		return;
3127 
3128 	for (n = 0; n < 2000; n++) {
3129 		if (IWK_READ(sc, CSR_RESET) &
3130 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3131 			break;
3132 		DELAY(1000);
3133 	}
3134 	if (n == 2000)
3135 		IWK_DBG((IWK_DEBUG_HW,
3136 		    "timeout waiting for master stop\n"));
3137 }
3138 
3139 static int
3140 iwk_power_up(iwk_sc_t *sc)
3141 {
3142 	uint32_t tmp;
3143 
3144 	iwk_mac_access_enter(sc);
3145 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3146 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3147 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3148 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3149 	iwk_mac_access_exit(sc);
3150 
3151 	DELAY(5000);
3152 	return (IWK_SUCCESS);
3153 }
3154 
3155 static int
3156 iwk_preinit(iwk_sc_t *sc)
3157 {
3158 	uint32_t tmp;
3159 	int n;
3160 	uint8_t vlink;
3161 
3162 	/* clear any pending interrupts */
3163 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3164 
3165 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3166 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3167 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3168 
3169 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3170 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3171 
3172 	/* wait for clock ready */
3173 	for (n = 0; n < 1000; n++) {
3174 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3175 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3176 			break;
3177 		DELAY(10);
3178 	}
3179 	if (n == 1000) {
3180 		return (ETIMEDOUT);
3181 	}
3182 	iwk_mac_access_enter(sc);
3183 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3184 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3185 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3186 
3187 	DELAY(20);
3188 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3189 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3190 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3191 	iwk_mac_access_exit(sc);
3192 
3193 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3194 
3195 	(void) iwk_power_up(sc);
3196 
3197 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3198 		tmp = ddi_get32(sc->sc_cfg_handle,
3199 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3200 		ddi_put32(sc->sc_cfg_handle,
3201 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3202 		    tmp & ~(1 << 11));
3203 	}
3204 
3205 
3206 	vlink = ddi_get8(sc->sc_cfg_handle,
3207 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3208 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3209 	    vlink & ~2);
3210 
3211 	tmp = IWK_READ(sc, CSR_SW_VER);
3212 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3213 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3214 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3215 
3216 	/* make sure power supply on each part of the hardware */
3217 	iwk_mac_access_enter(sc);
3218 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3219 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3220 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3221 	DELAY(5);
3222 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3223 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3224 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3225 	iwk_mac_access_exit(sc);
3226 	return (IWK_SUCCESS);
3227 }
3228 
3229 /*
3230  * set up semphore flag to own EEPROM
3231  */
3232 static int iwk_eep_sem_down(iwk_sc_t *sc)
3233 {
3234 	int count1, count2;
3235 	uint32_t tmp;
3236 
3237 	for (count1 = 0; count1 < 1000; count1++) {
3238 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3239 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3240 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3241 
3242 		for (count2 = 0; count2 < 2; count2++) {
3243 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3244 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3245 				return (IWK_SUCCESS);
3246 			DELAY(10000);
3247 		}
3248 	}
3249 	return (IWK_FAIL);
3250 }
3251 
3252 /*
3253  * reset semphore flag to release EEPROM
3254  */
3255 static void iwk_eep_sem_up(iwk_sc_t *sc)
3256 {
3257 	uint32_t tmp;
3258 
3259 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3260 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3261 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3262 }
3263 
3264 /*
3265  * This function load all infomation in eeprom into iwk_eep
3266  * structure in iwk_sc_t structure
3267  */
3268 static int iwk_eep_load(iwk_sc_t *sc)
3269 {
3270 	int i, rr;
3271 	uint32_t rv, tmp, eep_gp;
3272 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3273 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3274 
3275 	/* read eeprom gp register in CSR */
3276 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3277 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3278 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3279 		IWK_DBG((IWK_DEBUG_EEPROM, "not find eeprom\n"));
3280 		return (IWK_FAIL);
3281 	}
3282 
3283 	rr = iwk_eep_sem_down(sc);
3284 	if (rr != 0) {
3285 		IWK_DBG((IWK_DEBUG_EEPROM, "driver failed to own EEPROM\n"));
3286 		return (IWK_FAIL);
3287 	}
3288 
3289 	for (addr = 0; addr < eep_sz; addr += 2) {
3290 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3291 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3292 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3293 
3294 		for (i = 0; i < 10; i++) {
3295 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3296 			if (rv & 1)
3297 				break;
3298 			DELAY(10);
3299 		}
3300 
3301 		if (!(rv & 1)) {
3302 			IWK_DBG((IWK_DEBUG_EEPROM,
3303 			    "time out when read eeprome\n"));
3304 			iwk_eep_sem_up(sc);
3305 			return (IWK_FAIL);
3306 		}
3307 
3308 		eep_p[addr/2] = rv >> 16;
3309 	}
3310 
3311 	iwk_eep_sem_up(sc);
3312 	return (IWK_SUCCESS);
3313 }
3314 
3315 /*
3316  * init mac address in ieee80211com_t struct
3317  */
3318 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3319 {
3320 	ieee80211com_t *ic = &sc->sc_ic;
3321 	struct iwk_eep *ep = &sc->sc_eep_map;
3322 
3323 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3324 
3325 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3326 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3327 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3328 }
3329 
3330 static int
3331 iwk_init(iwk_sc_t *sc)
3332 {
3333 	int qid, n, err;
3334 	clock_t clk;
3335 
3336 	mutex_enter(&sc->sc_glock);
3337 	sc->sc_flags &= ~IWK_F_FW_INIT;
3338 
3339 	(void) iwk_preinit(sc);
3340 
3341 	/* init Rx ring */
3342 	iwk_mac_access_enter(sc);
3343 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3344 
3345 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3346 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3347 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3348 
3349 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3350 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3351 	    offsetof(struct iwk_shared, val0)) >> 4));
3352 
3353 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3354 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3355 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3356 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3357 	    (RX_QUEUE_SIZE_LOG <<
3358 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3359 	iwk_mac_access_exit(sc);
3360 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3361 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3362 
3363 	/* init Tx rings */
3364 	iwk_mac_access_enter(sc);
3365 	iwk_reg_write(sc, SCD_TXFACT, 0);
3366 
3367 	/* keep warn page */
3368 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3369 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3370 
3371 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3372 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3373 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3374 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3375 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3376 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3377 	}
3378 	iwk_mac_access_exit(sc);
3379 
3380 	/* clear "radio off" and "disable command" bits */
3381 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3382 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3383 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3384 
3385 	/* clear any pending interrupts */
3386 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3387 
3388 	/* enable interrupts */
3389 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3390 
3391 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3392 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3393 
3394 	/*
3395 	 * backup ucode data part for future use.
3396 	 */
3397 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3398 	    sc->sc_dma_fw_data.mem_va,
3399 	    sc->sc_dma_fw_data.alength);
3400 
3401 	for (n = 0; n < 2; n++) {
3402 		/* load firmware init segment into NIC */
3403 		err = iwk_load_firmware(sc);
3404 		if (err != IWK_SUCCESS) {
3405 			cmn_err(CE_WARN, "iwk_init(): "
3406 			    "failed to setup boot firmware\n");
3407 			continue;
3408 		}
3409 
3410 		/* now press "execute" start running */
3411 		IWK_WRITE(sc, CSR_RESET, 0);
3412 		break;
3413 	}
3414 	if (n == 2) {
3415 		cmn_err(CE_WARN, "iwk_init(): " "failed to load firmware\n");
3416 		goto fail1;
3417 	}
3418 	/* ..and wait at most one second for adapter to initialize */
3419 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3420 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3421 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3422 			break;
3423 	}
3424 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3425 		cmn_err(CE_WARN,
3426 		    "iwk_init(): timeout waiting for firmware init\n");
3427 		goto fail1;
3428 	}
3429 
3430 	/*
3431 	 * at this point, the firmware is loaded OK, then config the hardware
3432 	 * with the ucode API, including rxon, txpower, etc.
3433 	 */
3434 	err = iwk_config(sc);
3435 	if (err) {
3436 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3437 		goto fail1;
3438 	}
3439 
3440 	/* at this point, hardware may receive beacons :) */
3441 	mutex_exit(&sc->sc_glock);
3442 	return (IWK_SUCCESS);
3443 
3444 fail1:
3445 	err = IWK_FAIL;
3446 	mutex_exit(&sc->sc_glock);
3447 	return (err);
3448 }
3449 
3450 static void
3451 iwk_stop(iwk_sc_t *sc)
3452 {
3453 	uint32_t tmp;
3454 	int i;
3455 
3456 
3457 	mutex_enter(&sc->sc_glock);
3458 
3459 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3460 	/* disable interrupts */
3461 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3462 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3463 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3464 
3465 	/* reset all Tx rings */
3466 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3467 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3468 
3469 	/* reset Rx ring */
3470 	iwk_reset_rx_ring(sc);
3471 
3472 	iwk_mac_access_enter(sc);
3473 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3474 	iwk_mac_access_exit(sc);
3475 
3476 	DELAY(5);
3477 
3478 	iwk_stop_master(sc);
3479 
3480 	sc->sc_tx_timer = 0;
3481 	tmp = IWK_READ(sc, CSR_RESET);
3482 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3483 	mutex_exit(&sc->sc_glock);
3484 }
3485