xref: /illumos-gate/usr/src/uts/common/io/iwh/iwh.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2009, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Intel(R) WiFi Link 5100/5300 Driver
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/net80211_ht.h>
56 #include <sys/varargs.h>
57 #include <sys/policy.h>
58 #include <sys/pci.h>
59 
60 #include "iwh_calibration.h"
61 #include "iwh_hw.h"
62 #include "iwh_eeprom.h"
63 #include "iwh_var.h"
64 #include <inet/wifi_ioctl.h>
65 
66 #ifdef DEBUG
67 #define	IWH_DEBUG_80211		(1 << 0)
68 #define	IWH_DEBUG_CMD		(1 << 1)
69 #define	IWH_DEBUG_DMA		(1 << 2)
70 #define	IWH_DEBUG_EEPROM	(1 << 3)
71 #define	IWH_DEBUG_FW		(1 << 4)
72 #define	IWH_DEBUG_HW		(1 << 5)
73 #define	IWH_DEBUG_INTR		(1 << 6)
74 #define	IWH_DEBUG_MRR		(1 << 7)
75 #define	IWH_DEBUG_PIO		(1 << 8)
76 #define	IWH_DEBUG_RX		(1 << 9)
77 #define	IWH_DEBUG_SCAN		(1 << 10)
78 #define	IWH_DEBUG_TX		(1 << 11)
79 #define	IWH_DEBUG_RATECTL	(1 << 12)
80 #define	IWH_DEBUG_RADIO		(1 << 13)
81 #define	IWH_DEBUG_RESUME	(1 << 14)
82 #define	IWH_DEBUG_CALIBRATION	(1 << 15)
83 #define	IWH_DEBUG_BA		(1 << 16)
84 #define	IWH_DEBUG_RXON		(1 << 17)
85 #define	IWH_DEBUG_HWRATE	(1 << 18)
86 #define	IWH_DEBUG_HTRATE	(1 << 19)
87 #define	IWH_DEBUG_QOS		(1 << 20)
88 /*
89  * if want to see debug message of a given section,
90  * please set this flag to one of above values
91  */
92 uint32_t iwh_dbg_flags = 0;
93 #define	IWH_DBG(x) \
94 	iwh_dbg x
95 #else
96 #define	IWH_DBG(x)
97 #endif
98 
99 #define	MS(v, f)    (((v) & f) >> f##_S)
100 
101 static void	*iwh_soft_state_p = NULL;
102 
103 /*
104  * ucode will be compiled into driver image
105  */
106 static uint8_t iwh_fw_5000_bin[] = {
107 #include "fw-iw/fw_5000/iwh_5000.ucode"
108 };
109 
110 static uint8_t iwh_fw_5150_bin[] = {
111 #include "fw-iw/fw_5150/iwh_5150.ucode"
112 };
113 
114 /*
115  * DMA attributes for a shared page
116  */
117 static ddi_dma_attr_t sh_dma_attr = {
118 	DMA_ATTR_V0,	/* version of this structure */
119 	0,		/* lowest usable address */
120 	0xffffffffU,	/* highest usable address */
121 	0xffffffffU,	/* maximum DMAable byte count */
122 	0x1000,		/* alignment in bytes */
123 	0x1000,		/* burst sizes (any?) */
124 	1,		/* minimum transfer */
125 	0xffffffffU,	/* maximum transfer */
126 	0xffffffffU,	/* maximum segment length */
127 	1,		/* maximum number of segments */
128 	1,		/* granularity */
129 	0,		/* flags (reserved) */
130 };
131 
132 /*
133  * DMA attributes for a keep warm DRAM descriptor
134  */
135 static ddi_dma_attr_t kw_dma_attr = {
136 	DMA_ATTR_V0,	/* version of this structure */
137 	0,		/* lowest usable address */
138 	0xffffffffU,	/* highest usable address */
139 	0xffffffffU,	/* maximum DMAable byte count */
140 	0x1000,		/* alignment in bytes */
141 	0x1000,		/* burst sizes (any?) */
142 	1,		/* minimum transfer */
143 	0xffffffffU,	/* maximum transfer */
144 	0xffffffffU,	/* maximum segment length */
145 	1,		/* maximum number of segments */
146 	1,		/* granularity */
147 	0,		/* flags (reserved) */
148 };
149 
150 /*
151  * DMA attributes for a ring descriptor
152  */
153 static ddi_dma_attr_t ring_desc_dma_attr = {
154 	DMA_ATTR_V0,	/* version of this structure */
155 	0,		/* lowest usable address */
156 	0xffffffffU,	/* highest usable address */
157 	0xffffffffU,	/* maximum DMAable byte count */
158 	0x100,		/* alignment in bytes */
159 	0x100,		/* burst sizes (any?) */
160 	1,		/* minimum transfer */
161 	0xffffffffU,	/* maximum transfer */
162 	0xffffffffU,	/* maximum segment length */
163 	1,		/* maximum number of segments */
164 	1,		/* granularity */
165 	0,		/* flags (reserved) */
166 };
167 
168 /*
169  * DMA attributes for a cmd
170  */
171 static ddi_dma_attr_t cmd_dma_attr = {
172 	DMA_ATTR_V0,	/* version of this structure */
173 	0,		/* lowest usable address */
174 	0xffffffffU,	/* highest usable address */
175 	0xffffffffU,	/* maximum DMAable byte count */
176 	4,		/* alignment in bytes */
177 	0x100,		/* burst sizes (any?) */
178 	1,		/* minimum transfer */
179 	0xffffffffU,	/* maximum transfer */
180 	0xffffffffU,	/* maximum segment length */
181 	1,		/* maximum number of segments */
182 	1,		/* granularity */
183 	0,		/* flags (reserved) */
184 };
185 
186 /*
187  * DMA attributes for a rx buffer
188  */
189 static ddi_dma_attr_t rx_buffer_dma_attr = {
190 	DMA_ATTR_V0,	/* version of this structure */
191 	0,		/* lowest usable address */
192 	0xffffffffU,	/* highest usable address */
193 	0xffffffffU,	/* maximum DMAable byte count */
194 	0x100,		/* alignment in bytes */
195 	0x100,		/* burst sizes (any?) */
196 	1,		/* minimum transfer */
197 	0xffffffffU,	/* maximum transfer */
198 	0xffffffffU,	/* maximum segment length */
199 	1,		/* maximum number of segments */
200 	1,		/* granularity */
201 	0,		/* flags (reserved) */
202 };
203 
204 /*
205  * DMA attributes for a tx buffer.
206  * the maximum number of segments is 4 for the hardware.
207  * now all the wifi drivers put the whole frame in a single
208  * descriptor, so we define the maximum  number of segments 1,
209  * just the same as the rx_buffer. we consider leverage the HW
210  * ability in the future, that is why we don't define rx and tx
211  * buffer_dma_attr as the same.
212  */
213 static ddi_dma_attr_t tx_buffer_dma_attr = {
214 	DMA_ATTR_V0,	/* version of this structure */
215 	0,		/* lowest usable address */
216 	0xffffffffU,	/* highest usable address */
217 	0xffffffffU,	/* maximum DMAable byte count */
218 	4,		/* alignment in bytes */
219 	0x100,		/* burst sizes (any?) */
220 	1,		/* minimum transfer */
221 	0xffffffffU,	/* maximum transfer */
222 	0xffffffffU,	/* maximum segment length */
223 	1,		/* maximum number of segments */
224 	1,		/* granularity */
225 	0,		/* flags (reserved) */
226 };
227 
228 /*
229  * DMA attributes for text and data part in the firmware
230  */
231 static ddi_dma_attr_t fw_dma_attr = {
232 	DMA_ATTR_V0,	/* version of this structure */
233 	0,		/* lowest usable address */
234 	0xffffffffU,	/* highest usable address */
235 	0x7fffffff,	/* maximum DMAable byte count */
236 	0x10,		/* alignment in bytes */
237 	0x100,		/* burst sizes (any?) */
238 	1,		/* minimum transfer */
239 	0xffffffffU,	/* maximum transfer */
240 	0xffffffffU,	/* maximum segment length */
241 	1,		/* maximum number of segments */
242 	1,		/* granularity */
243 	0,		/* flags (reserved) */
244 };
245 
246 /*
247  * regs access attributes
248  */
249 static ddi_device_acc_attr_t iwh_reg_accattr = {
250 	DDI_DEVICE_ATTR_V0,
251 	DDI_STRUCTURE_LE_ACC,
252 	DDI_STRICTORDER_ACC,
253 	DDI_DEFAULT_ACC
254 };
255 
256 /*
257  * DMA access attributes for descriptor
258  */
259 static ddi_device_acc_attr_t iwh_dma_descattr = {
260 	DDI_DEVICE_ATTR_V0,
261 	DDI_STRUCTURE_LE_ACC,
262 	DDI_STRICTORDER_ACC,
263 	DDI_DEFAULT_ACC
264 };
265 
266 /*
267  * DMA access attributes
268  */
269 static ddi_device_acc_attr_t iwh_dma_accattr = {
270 	DDI_DEVICE_ATTR_V0,
271 	DDI_NEVERSWAP_ACC,
272 	DDI_STRICTORDER_ACC,
273 	DDI_DEFAULT_ACC
274 };
275 
276 static int	iwh_ring_init(iwh_sc_t *);
277 static void	iwh_ring_free(iwh_sc_t *);
278 static int	iwh_alloc_shared(iwh_sc_t *);
279 static void	iwh_free_shared(iwh_sc_t *);
280 static int	iwh_alloc_kw(iwh_sc_t *);
281 static void	iwh_free_kw(iwh_sc_t *);
282 static int	iwh_alloc_fw_dma(iwh_sc_t *);
283 static void	iwh_free_fw_dma(iwh_sc_t *);
284 static int	iwh_alloc_rx_ring(iwh_sc_t *);
285 static void	iwh_reset_rx_ring(iwh_sc_t *);
286 static void	iwh_free_rx_ring(iwh_sc_t *);
287 static int	iwh_alloc_tx_ring(iwh_sc_t *, iwh_tx_ring_t *,
288     int, int);
289 static void	iwh_reset_tx_ring(iwh_sc_t *, iwh_tx_ring_t *);
290 static void	iwh_free_tx_ring(iwh_tx_ring_t *);
291 static ieee80211_node_t *iwh_node_alloc(ieee80211com_t *);
292 static void	iwh_node_free(ieee80211_node_t *);
293 static int	iwh_newstate(ieee80211com_t *, enum ieee80211_state, int);
294 static void	iwh_mac_access_enter(iwh_sc_t *);
295 static void	iwh_mac_access_exit(iwh_sc_t *);
296 static uint32_t	iwh_reg_read(iwh_sc_t *, uint32_t);
297 static void	iwh_reg_write(iwh_sc_t *, uint32_t, uint32_t);
298 static int	iwh_load_init_firmware(iwh_sc_t *);
299 static int	iwh_load_run_firmware(iwh_sc_t *);
300 static void	iwh_tx_intr(iwh_sc_t *, iwh_rx_desc_t *);
301 static void	iwh_cmd_intr(iwh_sc_t *, iwh_rx_desc_t *);
302 static uint_t   iwh_intr(caddr_t, caddr_t);
303 static int	iwh_eep_load(iwh_sc_t *);
304 static void	iwh_get_mac_from_eep(iwh_sc_t *);
305 static int	iwh_eep_sem_down(iwh_sc_t *);
306 static void	iwh_eep_sem_up(iwh_sc_t *);
307 static uint_t   iwh_rx_softintr(caddr_t, caddr_t);
308 static uint8_t	iwh_rate_to_plcp(int);
309 static int	iwh_cmd(iwh_sc_t *, int, const void *, int, int);
310 static void	iwh_set_led(iwh_sc_t *, uint8_t, uint8_t, uint8_t);
311 static int	iwh_hw_set_before_auth(iwh_sc_t *);
312 static int	iwh_scan(iwh_sc_t *);
313 static int	iwh_config(iwh_sc_t *);
314 static void	iwh_stop_master(iwh_sc_t *);
315 static int	iwh_power_up(iwh_sc_t *);
316 static int	iwh_preinit(iwh_sc_t *);
317 static int	iwh_init(iwh_sc_t *);
318 static void	iwh_stop(iwh_sc_t *);
319 static int	iwh_quiesce(dev_info_t *t);
320 static void	iwh_amrr_init(iwh_amrr_t *);
321 static void	iwh_amrr_timeout(iwh_sc_t *);
322 static void	iwh_amrr_ratectl(void *, ieee80211_node_t *);
323 static void	iwh_ucode_alive(iwh_sc_t *, iwh_rx_desc_t *);
324 static void	iwh_rx_phy_intr(iwh_sc_t *, iwh_rx_desc_t *);
325 static void	iwh_rx_mpdu_intr(iwh_sc_t *, iwh_rx_desc_t *);
326 static void	iwh_release_calib_buffer(iwh_sc_t *);
327 static int	iwh_init_common(iwh_sc_t *);
328 static uint8_t	*iwh_eep_addr_trans(iwh_sc_t *, uint32_t);
329 static int	iwh_put_seg_fw(iwh_sc_t *, uint32_t, uint32_t, uint32_t);
330 static	int	iwh_alive_common(iwh_sc_t *);
331 static void	iwh_save_calib_result(iwh_sc_t *, iwh_rx_desc_t *);
332 static int	iwh_tx_power_table(iwh_sc_t *, int);
333 static int	iwh_attach(dev_info_t *, ddi_attach_cmd_t);
334 static int	iwh_detach(dev_info_t *, ddi_detach_cmd_t);
335 static void	iwh_destroy_locks(iwh_sc_t *);
336 static int	iwh_send(ieee80211com_t *, mblk_t *, uint8_t);
337 static void	iwh_thread(iwh_sc_t *);
338 static int	iwh_run_state_config(iwh_sc_t *);
339 static int	iwh_fast_recover(iwh_sc_t *);
340 static int	iwh_wme_update(ieee80211com_t *);
341 static int	iwh_qosparam_to_hw(iwh_sc_t *, int);
342 static int	iwh_wme_to_qos_ac(int);
343 static uint16_t	iwh_cw_e_to_cw(uint8_t);
344 static int	iwh_wmeparam_check(struct wmeParams *);
345 static inline int	iwh_wme_tid_qos_ac(int);
346 static inline int	iwh_qos_ac_to_txq(int);
347 static int	iwh_wme_tid_to_txq(int);
348 static void	iwh_init_ht_conf(iwh_sc_t *);
349 static void	iwh_overwrite_11n_rateset(iwh_sc_t *);
350 static void	iwh_overwrite_ic_default(iwh_sc_t *);
351 static void	iwh_config_rxon_chain(iwh_sc_t *);
352 static int	iwh_add_ap_sta(iwh_sc_t *);
353 static int	iwh_ap_lq(iwh_sc_t *);
354 static void	iwh_recv_action(struct ieee80211_node *,
355     const uint8_t *, const uint8_t *);
356 static int	iwh_send_action(struct ieee80211_node *,
357     int, int, uint16_t[4]);
358 static int	iwh_is_max_rate(ieee80211_node_t *);
359 static int	iwh_is_min_rate(ieee80211_node_t *);
360 static void	iwh_increase_rate(ieee80211_node_t *);
361 static void	iwh_decrease_rate(ieee80211_node_t *);
362 static int	iwh_alloc_dma_mem(iwh_sc_t *, size_t,
363     ddi_dma_attr_t *, ddi_device_acc_attr_t *,
364     uint_t, iwh_dma_t *);
365 static void	iwh_free_dma_mem(iwh_dma_t *);
366 static int	iwh_reset_hw(iwh_sc_t *);
367 
368 /*
369  * GLD specific operations
370  */
371 static int	iwh_m_stat(void *, uint_t, uint64_t *);
372 static int	iwh_m_start(void *);
373 static void	iwh_m_stop(void *);
374 static int	iwh_m_unicst(void *, const uint8_t *);
375 static int	iwh_m_multicst(void *, boolean_t, const uint8_t *);
376 static int	iwh_m_promisc(void *, boolean_t);
377 static mblk_t	*iwh_m_tx(void *, mblk_t *);
378 static void	iwh_m_ioctl(void *, queue_t *, mblk_t *);
379 static int	iwh_m_setprop(void *arg, const char *pr_name,
380     mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
381 static int	iwh_m_getprop(void *arg, const char *pr_name,
382     mac_prop_id_t wldp_pr_num, uint_t wldp_length,
383     void *wldp_buf);
384 static void	iwh_m_propinfo(void *arg, const char *pr_name,
385     mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
386 
387 /*
388  * Supported rates for 802.11b/g modes (in 500Kbps unit).
389  */
390 static const struct ieee80211_rateset iwh_rateset_11b =
391 	{ 4, { 2, 4, 11, 22 } };
392 
393 static const struct ieee80211_rateset iwh_rateset_11g =
394 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
395 
396 /*
397  * Default 11n reates supported by this station.
398  */
399 extern struct ieee80211_htrateset ieee80211_rateset_11n;
400 
401 /*
402  * For mfthread only
403  */
404 extern pri_t minclsyspri;
405 
406 #define	DRV_NAME_SP	"iwh"
407 
408 /*
409  * Module Loading Data & Entry Points
410  */
411 DDI_DEFINE_STREAM_OPS(iwh_devops, nulldev, nulldev, iwh_attach,
412     iwh_detach, nodev, NULL, D_MP, NULL, iwh_quiesce);
413 
414 static struct modldrv iwh_modldrv = {
415 	&mod_driverops,
416 	"Intel(R) ShirleyPeak/EchoPeak driver(N)",
417 	&iwh_devops
418 };
419 
420 static struct modlinkage iwh_modlinkage = {
421 	MODREV_1,
422 	&iwh_modldrv,
423 	NULL
424 };
425 
426 int
_init(void)427 _init(void)
428 {
429 	int status;
430 
431 	status = ddi_soft_state_init(&iwh_soft_state_p,
432 	    sizeof (iwh_sc_t), 1);
433 	if (status != DDI_SUCCESS) {
434 		return (status);
435 	}
436 
437 	mac_init_ops(&iwh_devops, DRV_NAME_SP);
438 	status = mod_install(&iwh_modlinkage);
439 	if (status != DDI_SUCCESS) {
440 		mac_fini_ops(&iwh_devops);
441 		ddi_soft_state_fini(&iwh_soft_state_p);
442 	}
443 
444 	return (status);
445 }
446 
447 int
_fini(void)448 _fini(void)
449 {
450 	int status;
451 
452 	status = mod_remove(&iwh_modlinkage);
453 	if (DDI_SUCCESS == status) {
454 		mac_fini_ops(&iwh_devops);
455 		ddi_soft_state_fini(&iwh_soft_state_p);
456 	}
457 
458 	return (status);
459 }
460 
461 int
_info(struct modinfo * mip)462 _info(struct modinfo *mip)
463 {
464 	return (mod_info(&iwh_modlinkage, mip));
465 }
466 
467 /*
468  * Mac Call Back entries
469  */
470 mac_callbacks_t	iwh_m_callbacks = {
471 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
472 	iwh_m_stat,
473 	iwh_m_start,
474 	iwh_m_stop,
475 	iwh_m_promisc,
476 	iwh_m_multicst,
477 	iwh_m_unicst,
478 	iwh_m_tx,
479 	NULL,
480 	iwh_m_ioctl,
481 	NULL,
482 	NULL,
483 	NULL,
484 	iwh_m_setprop,
485 	iwh_m_getprop,
486 	iwh_m_propinfo
487 };
488 
489 #ifdef DEBUG
490 void
iwh_dbg(uint32_t flags,const char * fmt,...)491 iwh_dbg(uint32_t flags, const char *fmt, ...)
492 {
493 	va_list	ap;
494 
495 	if (flags & iwh_dbg_flags) {
496 		va_start(ap, fmt);
497 		vcmn_err(CE_NOTE, fmt, ap);
498 		va_end(ap);
499 	}
500 }
501 #endif	/* DEBUG */
502 
503 /*
504  * device operations
505  */
506 int
iwh_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)507 iwh_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
508 {
509 	iwh_sc_t *sc;
510 	ieee80211com_t *ic;
511 	int instance, i;
512 	char strbuf[32];
513 	wifi_data_t wd = { 0 };
514 	mac_register_t *macp;
515 	int intr_type;
516 	int intr_count;
517 	int intr_actual;
518 	int err = DDI_FAILURE;
519 
520 	switch (cmd) {
521 	case DDI_ATTACH:
522 		break;
523 
524 	case DDI_RESUME:
525 		instance = ddi_get_instance(dip);
526 		sc = ddi_get_soft_state(iwh_soft_state_p,
527 		    instance);
528 		ASSERT(sc != NULL);
529 
530 		if (sc->sc_flags & IWH_F_RUNNING) {
531 			(void) iwh_init(sc);
532 		}
533 
534 		atomic_and_32(&sc->sc_flags, ~IWH_F_SUSPEND);
535 
536 		IWH_DBG((IWH_DEBUG_RESUME, "iwh_attach(): "
537 		    "resume\n"));
538 		return (DDI_SUCCESS);
539 
540 	default:
541 		goto attach_fail1;
542 	}
543 
544 	instance = ddi_get_instance(dip);
545 	err = ddi_soft_state_zalloc(iwh_soft_state_p, instance);
546 	if (err != DDI_SUCCESS) {
547 		cmn_err(CE_WARN, "iwh_attach(): "
548 		    "failed to allocate soft state\n");
549 		goto attach_fail1;
550 	}
551 
552 	sc = ddi_get_soft_state(iwh_soft_state_p, instance);
553 	ASSERT(sc != NULL);
554 
555 	sc->sc_dip = dip;
556 
557 	/*
558 	 * map configure space
559 	 */
560 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
561 	    &iwh_reg_accattr, &sc->sc_cfg_handle);
562 	if (err != DDI_SUCCESS) {
563 		cmn_err(CE_WARN, "iwh_attach(): "
564 		    "failed to map config spaces regs\n");
565 		goto attach_fail2;
566 	}
567 
568 	sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
569 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
570 	if ((sc->sc_dev_id != 0x4232) &&
571 	    (sc->sc_dev_id != 0x4235) &&
572 	    (sc->sc_dev_id != 0x4236) &&
573 	    (sc->sc_dev_id != 0x4237) &&
574 	    (sc->sc_dev_id != 0x423a) &&
575 	    (sc->sc_dev_id != 0x423b) &&
576 	    (sc->sc_dev_id != 0x423c) &&
577 	    (sc->sc_dev_id != 0x423d)) {
578 		cmn_err(CE_WARN, "iwh_attach(): "
579 		    "Do not support this device\n");
580 		goto attach_fail3;
581 	}
582 
583 	iwh_init_ht_conf(sc);
584 	iwh_overwrite_11n_rateset(sc);
585 
586 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
587 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
588 
589 	/*
590 	 * keep from disturbing C3 state of CPU
591 	 */
592 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
593 	    PCI_CFG_RETRY_TIMEOUT), 0);
594 
595 	/*
596 	 * determine the size of buffer for frame and command to ucode
597 	 */
598 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
599 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
600 	if (!sc->sc_clsz) {
601 		sc->sc_clsz = 16;
602 	}
603 	sc->sc_clsz = (sc->sc_clsz << 2);
604 
605 	sc->sc_dmabuf_sz = roundup(0x2000 + sizeof (struct ieee80211_frame) +
606 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
607 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
608 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
609 
610 	/*
611 	 * Map operating registers
612 	 */
613 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
614 	    0, 0, &iwh_reg_accattr, &sc->sc_handle);
615 	if (err != DDI_SUCCESS) {
616 		cmn_err(CE_WARN, "iwh_attach(): "
617 		    "failed to map device regs\n");
618 		goto attach_fail3;
619 	}
620 
621 	/*
622 	 * this is used to differentiate type of hardware
623 	 */
624 	sc->sc_hw_rev = IWH_READ(sc, CSR_HW_REV);
625 
626 	err = ddi_intr_get_supported_types(dip, &intr_type);
627 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
628 		cmn_err(CE_WARN, "iwh_attach(): "
629 		    "fixed type interrupt is not supported\n");
630 		goto attach_fail4;
631 	}
632 
633 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
634 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
635 		cmn_err(CE_WARN, "iwh_attach(): "
636 		    "no fixed interrupts\n");
637 		goto attach_fail4;
638 	}
639 
640 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
641 
642 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
643 	    intr_count, &intr_actual, 0);
644 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
645 		cmn_err(CE_WARN, "iwh_attach(): "
646 		    "ddi_intr_alloc() failed 0x%x\n", err);
647 		goto attach_fail5;
648 	}
649 
650 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
651 	if (err != DDI_SUCCESS) {
652 		cmn_err(CE_WARN, "iwh_attach(): "
653 		    "ddi_intr_get_pri() failed 0x%x\n", err);
654 		goto attach_fail6;
655 	}
656 
657 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
658 	    DDI_INTR_PRI(sc->sc_intr_pri));
659 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
660 	    DDI_INTR_PRI(sc->sc_intr_pri));
661 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
662 	    DDI_INTR_PRI(sc->sc_intr_pri));
663 
664 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
665 	cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
666 	cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
667 
668 	/*
669 	 * initialize the mfthread
670 	 */
671 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
672 	sc->sc_mf_thread = NULL;
673 	sc->sc_mf_thread_switch = 0;
674 
675 	/*
676 	 * Allocate shared buffer for communication between driver and ucode.
677 	 */
678 	err = iwh_alloc_shared(sc);
679 	if (err != DDI_SUCCESS) {
680 		cmn_err(CE_WARN, "iwh_attach(): "
681 		    "failed to allocate shared page\n");
682 		goto attach_fail7;
683 	}
684 
685 	(void) memset(sc->sc_shared, 0, sizeof (iwh_shared_t));
686 
687 	/*
688 	 * Allocate keep warm page.
689 	 */
690 	err = iwh_alloc_kw(sc);
691 	if (err != DDI_SUCCESS) {
692 		cmn_err(CE_WARN, "iwh_attach(): "
693 		    "failed to allocate keep warm page\n");
694 		goto attach_fail8;
695 	}
696 
697 	err = iwh_reset_hw(sc);
698 	if (err != IWH_SUCCESS) {
699 		cmn_err(CE_WARN, "iwh_attach(): "
700 		    "failed to reset hardware\n");
701 		goto attach_fail9;
702 	}
703 
704 	/*
705 	 * Do some necessary hardware initializations.
706 	 */
707 	err = iwh_preinit(sc);
708 	if (err != IWH_SUCCESS) {
709 		cmn_err(CE_WARN, "iwh_attach(): "
710 		    "failed to initialize hardware\n");
711 		goto attach_fail9;
712 	}
713 
714 	/*
715 	 * get hardware configurations from eeprom
716 	 */
717 	err = iwh_eep_load(sc);
718 	if (err != IWH_SUCCESS) {
719 		cmn_err(CE_WARN, "iwh_attach(): "
720 		    "failed to load eeprom\n");
721 		goto attach_fail9;
722 	}
723 
724 	if (IWH_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) {
725 		IWH_DBG((IWH_DEBUG_EEPROM, "iwh_attach(): "
726 		    "unsupported eeprom detected\n"));
727 		goto attach_fail9;
728 	}
729 
730 	/*
731 	 * get MAC address of this chipset
732 	 */
733 	iwh_get_mac_from_eep(sc);
734 
735 	/*
736 	 * calibration information from EEPROM
737 	 */
738 	sc->sc_eep_calib = (struct iwh_eep_calibration *)
739 	    iwh_eep_addr_trans(sc, EEP_CALIBRATION);
740 
741 	/*
742 	 * initialize TX and RX ring buffers
743 	 */
744 	err = iwh_ring_init(sc);
745 	if (err != DDI_SUCCESS) {
746 		cmn_err(CE_WARN, "iwh_attach(): "
747 		    "failed to allocate and initialize ring\n");
748 		goto attach_fail9;
749 	}
750 
751 	if ((0x423c == sc->sc_dev_id) || (0x423d == sc->sc_dev_id)) {
752 		sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5150_bin;
753 	} else {
754 		sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5000_bin;
755 	}
756 
757 	/*
758 	 * copy ucode to dma buffer
759 	 */
760 	err = iwh_alloc_fw_dma(sc);
761 	if (err != DDI_SUCCESS) {
762 		cmn_err(CE_WARN, "iwh_attach(): "
763 		    "failed to allocate firmware dma\n");
764 		goto attach_fail10;
765 	}
766 
767 	/*
768 	 * Initialize the wifi part, which will be used by
769 	 * 802.11 module
770 	 */
771 	ic = &sc->sc_ic;
772 	ic->ic_phytype  = IEEE80211_T_HT;
773 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
774 	ic->ic_state    = IEEE80211_S_INIT;
775 	ic->ic_maxrssi  = 100; /* experimental number */
776 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
777 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
778 
779 	/*
780 	 * Support WPA/WPA2
781 	 */
782 	ic->ic_caps |= IEEE80211_C_WPA;
783 
784 	/*
785 	 * Support QoS/WME
786 	 */
787 	ic->ic_caps |= IEEE80211_C_WME;
788 	ic->ic_wme.wme_update = iwh_wme_update;
789 
790 	/*
791 	 * Support 802.11n/HT
792 	 */
793 	if (sc->sc_ht_conf.ht_support) {
794 		ic->ic_htcaps = IEEE80211_HTC_HT |
795 		    IEEE80211_HTC_AMSDU;
796 		ic->ic_htcaps |= IEEE80211_HTCAP_MAXAMSDU_7935;
797 	}
798 
799 	/*
800 	 * set supported .11b and .11g rates
801 	 */
802 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwh_rateset_11b;
803 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwh_rateset_11g;
804 
805 	/*
806 	 * set supported .11b and .11g channels (1 through 11)
807 	 */
808 	for (i = 1; i <= 11; i++) {
809 		ic->ic_sup_channels[i].ich_freq =
810 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
811 		ic->ic_sup_channels[i].ich_flags =
812 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
813 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
814 		    IEEE80211_CHAN_PASSIVE;
815 
816 		if (sc->sc_ht_conf.cap & HT_CAP_SUP_WIDTH) {
817 			ic->ic_sup_channels[i].ich_flags |=
818 			    IEEE80211_CHAN_HT40;
819 		} else {
820 			ic->ic_sup_channels[i].ich_flags |=
821 			    IEEE80211_CHAN_HT20;
822 		}
823 	}
824 
825 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
826 	ic->ic_xmit = iwh_send;
827 
828 	/*
829 	 * attach to 802.11 module
830 	 */
831 	ieee80211_attach(ic);
832 
833 	/*
834 	 * different instance has different WPA door
835 	 */
836 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
837 	    ddi_driver_name(dip),
838 	    ddi_get_instance(dip));
839 
840 	/*
841 	 * Overwrite 80211 default configurations.
842 	 */
843 	iwh_overwrite_ic_default(sc);
844 
845 	/*
846 	 * initialize 802.11 module
847 	 */
848 	ieee80211_media_init(ic);
849 
850 	/*
851 	 * initialize default tx key
852 	 */
853 	ic->ic_def_txkey = 0;
854 
855 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
856 	    iwh_rx_softintr, (caddr_t)sc);
857 	if (err != DDI_SUCCESS) {
858 		cmn_err(CE_WARN, "iwh_attach(): "
859 		    "add soft interrupt failed\n");
860 		goto attach_fail12;
861 	}
862 
863 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwh_intr,
864 	    (caddr_t)sc, NULL);
865 	if (err != DDI_SUCCESS) {
866 		cmn_err(CE_WARN, "iwh_attach(): "
867 		    "ddi_intr_add_handle() failed\n");
868 		goto attach_fail13;
869 	}
870 
871 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
872 	if (err != DDI_SUCCESS) {
873 		cmn_err(CE_WARN, "iwh_attach(): "
874 		    "ddi_intr_enable() failed\n");
875 		goto attach_fail14;
876 	}
877 
878 	/*
879 	 * Initialize pointer to device specific functions
880 	 */
881 	wd.wd_secalloc = WIFI_SEC_NONE;
882 	wd.wd_opmode = ic->ic_opmode;
883 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
884 
885 	/*
886 	 * create relation to GLD
887 	 */
888 	macp = mac_alloc(MAC_VERSION);
889 	if (NULL == macp) {
890 		cmn_err(CE_WARN, "iwh_attach(): "
891 		    "failed to do mac_alloc()\n");
892 		goto attach_fail15;
893 	}
894 
895 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
896 	macp->m_driver		= sc;
897 	macp->m_dip		= dip;
898 	macp->m_src_addr	= ic->ic_macaddr;
899 	macp->m_callbacks	= &iwh_m_callbacks;
900 	macp->m_min_sdu		= 0;
901 	macp->m_max_sdu		= IEEE80211_MTU;
902 	macp->m_pdata		= &wd;
903 	macp->m_pdata_size	= sizeof (wd);
904 
905 	/*
906 	 * Register the macp to mac
907 	 */
908 	err = mac_register(macp, &ic->ic_mach);
909 	mac_free(macp);
910 	if (err != DDI_SUCCESS) {
911 		cmn_err(CE_WARN, "iwh_attach(): "
912 		    "failed to do mac_register()\n");
913 		goto attach_fail15;
914 	}
915 
916 	/*
917 	 * Create minor node of type DDI_NT_NET_WIFI
918 	 */
919 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
920 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
921 	    instance + 1, DDI_NT_NET_WIFI, 0);
922 	if (err != DDI_SUCCESS) {
923 		cmn_err(CE_WARN, "iwh_attach(): "
924 		    "failed to do ddi_create_minor_node()\n");
925 	}
926 
927 	/*
928 	 * Notify link is down now
929 	 */
930 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
931 
932 	/*
933 	 * create the mf thread to handle the link status,
934 	 * recovery fatal error, etc.
935 	 */
936 	sc->sc_mf_thread_switch = 1;
937 	if (NULL == sc->sc_mf_thread) {
938 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
939 		    iwh_thread, sc, 0, &p0, TS_RUN, minclsyspri);
940 	}
941 
942 	atomic_or_32(&sc->sc_flags, IWH_F_ATTACHED);
943 
944 	return (DDI_SUCCESS);
945 
946 attach_fail15:
947 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
948 
949 attach_fail14:
950 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
951 
952 attach_fail13:
953 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
954 	sc->sc_soft_hdl = NULL;
955 
956 attach_fail12:
957 	ieee80211_detach(ic);
958 
959 	iwh_free_fw_dma(sc);
960 
961 attach_fail10:
962 	iwh_ring_free(sc);
963 
964 attach_fail9:
965 	iwh_free_kw(sc);
966 
967 attach_fail8:
968 	iwh_free_shared(sc);
969 
970 attach_fail7:
971 	iwh_destroy_locks(sc);
972 
973 attach_fail6:
974 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
975 
976 attach_fail5:
977 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
978 
979 attach_fail4:
980 	ddi_regs_map_free(&sc->sc_handle);
981 
982 attach_fail3:
983 	ddi_regs_map_free(&sc->sc_cfg_handle);
984 
985 attach_fail2:
986 	ddi_soft_state_free(iwh_soft_state_p, instance);
987 
988 attach_fail1:
989 	return (DDI_FAILURE);
990 }
991 
992 int
iwh_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)993 iwh_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
994 {
995 	iwh_sc_t *sc;
996 	ieee80211com_t *ic;
997 	int err;
998 
999 	sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
1000 	ASSERT(sc != NULL);
1001 	ic = &sc->sc_ic;
1002 
1003 	switch (cmd) {
1004 	case DDI_DETACH:
1005 		break;
1006 
1007 	case DDI_SUSPEND:
1008 		atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
1009 		atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
1010 
1011 		atomic_or_32(&sc->sc_flags, IWH_F_SUSPEND);
1012 
1013 		if (sc->sc_flags & IWH_F_RUNNING) {
1014 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1015 			iwh_stop(sc);
1016 		}
1017 
1018 		IWH_DBG((IWH_DEBUG_RESUME, "iwh_detach(): "
1019 		    "suspend\n"));
1020 		return (DDI_SUCCESS);
1021 
1022 	default:
1023 		return (DDI_FAILURE);
1024 	}
1025 
1026 	if (!(sc->sc_flags & IWH_F_ATTACHED)) {
1027 		return (DDI_FAILURE);
1028 	}
1029 
1030 	/*
1031 	 * Destroy the mf_thread
1032 	 */
1033 	sc->sc_mf_thread_switch = 0;
1034 
1035 	mutex_enter(&sc->sc_mt_lock);
1036 	while (sc->sc_mf_thread != NULL) {
1037 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
1038 			break;
1039 		}
1040 	}
1041 	mutex_exit(&sc->sc_mt_lock);
1042 
1043 	err = mac_disable(sc->sc_ic.ic_mach);
1044 	if (err != DDI_SUCCESS) {
1045 		return (err);
1046 	}
1047 
1048 	/*
1049 	 * stop chipset
1050 	 */
1051 	iwh_stop(sc);
1052 
1053 	DELAY(500000);
1054 
1055 	/*
1056 	 * release buffer for calibration
1057 	 */
1058 	iwh_release_calib_buffer(sc);
1059 
1060 	/*
1061 	 * Unregiste from GLD
1062 	 */
1063 	(void) mac_unregister(sc->sc_ic.ic_mach);
1064 
1065 	mutex_enter(&sc->sc_glock);
1066 	iwh_free_fw_dma(sc);
1067 	iwh_ring_free(sc);
1068 	iwh_free_kw(sc);
1069 	iwh_free_shared(sc);
1070 	mutex_exit(&sc->sc_glock);
1071 
1072 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
1073 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
1074 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
1075 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1076 
1077 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
1078 	sc->sc_soft_hdl = NULL;
1079 
1080 	/*
1081 	 * detach from 80211 module
1082 	 */
1083 	ieee80211_detach(&sc->sc_ic);
1084 
1085 	iwh_destroy_locks(sc);
1086 
1087 	ddi_regs_map_free(&sc->sc_handle);
1088 	ddi_regs_map_free(&sc->sc_cfg_handle);
1089 	ddi_remove_minor_node(dip, NULL);
1090 	ddi_soft_state_free(iwh_soft_state_p, ddi_get_instance(dip));
1091 
1092 	return (DDI_SUCCESS);
1093 }
1094 
1095 /*
1096  * destroy all locks
1097  */
1098 static void
iwh_destroy_locks(iwh_sc_t * sc)1099 iwh_destroy_locks(iwh_sc_t *sc)
1100 {
1101 	cv_destroy(&sc->sc_mt_cv);
1102 	cv_destroy(&sc->sc_cmd_cv);
1103 	cv_destroy(&sc->sc_put_seg_cv);
1104 	cv_destroy(&sc->sc_ucode_cv);
1105 	mutex_destroy(&sc->sc_mt_lock);
1106 	mutex_destroy(&sc->sc_tx_lock);
1107 	mutex_destroy(&sc->sc_glock);
1108 }
1109 
1110 /*
1111  * Allocate an area of memory and a DMA handle for accessing it
1112  */
1113 static int
iwh_alloc_dma_mem(iwh_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwh_dma_t * dma_p)1114 iwh_alloc_dma_mem(iwh_sc_t *sc, size_t memsize,
1115     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1116     uint_t dma_flags, iwh_dma_t *dma_p)
1117 {
1118 	caddr_t vaddr;
1119 	int err = DDI_FAILURE;
1120 
1121 	/*
1122 	 * Allocate handle
1123 	 */
1124 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1125 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1126 	if (err != DDI_SUCCESS) {
1127 		dma_p->dma_hdl = NULL;
1128 		return (DDI_FAILURE);
1129 	}
1130 
1131 	/*
1132 	 * Allocate memory
1133 	 */
1134 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1135 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1136 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1137 	if (err != DDI_SUCCESS) {
1138 		ddi_dma_free_handle(&dma_p->dma_hdl);
1139 		dma_p->dma_hdl = NULL;
1140 		dma_p->acc_hdl = NULL;
1141 		return (DDI_FAILURE);
1142 	}
1143 
1144 	/*
1145 	 * Bind the two together
1146 	 */
1147 	dma_p->mem_va = vaddr;
1148 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1149 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1150 	    &dma_p->cookie, &dma_p->ncookies);
1151 	if (err != DDI_DMA_MAPPED) {
1152 		ddi_dma_mem_free(&dma_p->acc_hdl);
1153 		ddi_dma_free_handle(&dma_p->dma_hdl);
1154 		dma_p->acc_hdl = NULL;
1155 		dma_p->dma_hdl = NULL;
1156 		return (DDI_FAILURE);
1157 	}
1158 
1159 	dma_p->nslots = ~0U;
1160 	dma_p->size = ~0U;
1161 	dma_p->token = ~0U;
1162 	dma_p->offset = 0;
1163 	return (DDI_SUCCESS);
1164 }
1165 
1166 /*
1167  * Free one allocated area of DMAable memory
1168  */
1169 static void
iwh_free_dma_mem(iwh_dma_t * dma_p)1170 iwh_free_dma_mem(iwh_dma_t *dma_p)
1171 {
1172 	if (dma_p->dma_hdl != NULL) {
1173 		if (dma_p->ncookies) {
1174 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1175 			dma_p->ncookies = 0;
1176 		}
1177 		ddi_dma_free_handle(&dma_p->dma_hdl);
1178 		dma_p->dma_hdl = NULL;
1179 	}
1180 
1181 	if (dma_p->acc_hdl != NULL) {
1182 		ddi_dma_mem_free(&dma_p->acc_hdl);
1183 		dma_p->acc_hdl = NULL;
1184 	}
1185 }
1186 
1187 /*
1188  * copy ucode into dma buffers
1189  */
1190 static int
iwh_alloc_fw_dma(iwh_sc_t * sc)1191 iwh_alloc_fw_dma(iwh_sc_t *sc)
1192 {
1193 	int err = DDI_FAILURE;
1194 	iwh_dma_t *dma_p;
1195 	char *t;
1196 
1197 	/*
1198 	 * firmware image layout:
1199 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1200 	 */
1201 
1202 	/*
1203 	 * copy text of runtime ucode
1204 	 */
1205 	t = (char *)(sc->sc_hdr + 1);
1206 	err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1207 	    &fw_dma_attr, &iwh_dma_accattr,
1208 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1209 	    &sc->sc_dma_fw_text);
1210 	if (err != DDI_SUCCESS) {
1211 		cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1212 		    "failed to allocate text dma memory.\n");
1213 		goto fail;
1214 	}
1215 
1216 	dma_p = &sc->sc_dma_fw_text;
1217 
1218 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1219 	    "text[ncookies:%d addr:%lx size:%lx]\n",
1220 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1221 	    dma_p->cookie.dmac_size));
1222 
1223 	bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->textsz));
1224 
1225 	/*
1226 	 * copy data and bak-data of runtime ucode
1227 	 */
1228 	t += LE_32(sc->sc_hdr->textsz);
1229 	err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1230 	    &fw_dma_attr, &iwh_dma_accattr,
1231 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1232 	    &sc->sc_dma_fw_data);
1233 	if (err != DDI_SUCCESS) {
1234 		cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1235 		    "failed to allocate data dma memory\n");
1236 		goto fail;
1237 	}
1238 
1239 	dma_p = &sc->sc_dma_fw_data;
1240 
1241 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1242 	    "data[ncookies:%d addr:%lx size:%lx]\n",
1243 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1244 	    dma_p->cookie.dmac_size));
1245 
1246 	bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1247 
1248 	err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1249 	    &fw_dma_attr, &iwh_dma_accattr,
1250 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1251 	    &sc->sc_dma_fw_data_bak);
1252 	if (err != DDI_SUCCESS) {
1253 		cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1254 		    "failed to allocate data bakup dma memory\n");
1255 		goto fail;
1256 	}
1257 
1258 	dma_p = &sc->sc_dma_fw_data_bak;
1259 
1260 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1261 	    "data_bak[ncookies:%d addr:%lx "
1262 	    "size:%lx]\n",
1263 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1264 	    dma_p->cookie.dmac_size));
1265 
1266 	bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1267 
1268 	/*
1269 	 * copy text of init ucode
1270 	 */
1271 	t += LE_32(sc->sc_hdr->datasz);
1272 	err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1273 	    &fw_dma_attr, &iwh_dma_accattr,
1274 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1275 	    &sc->sc_dma_fw_init_text);
1276 	if (err != DDI_SUCCESS) {
1277 		cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1278 		    "failed to allocate init text dma memory\n");
1279 		goto fail;
1280 	}
1281 
1282 	dma_p = &sc->sc_dma_fw_init_text;
1283 
1284 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1285 	    "init_text[ncookies:%d addr:%lx "
1286 	    "size:%lx]\n",
1287 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1288 	    dma_p->cookie.dmac_size));
1289 
1290 	bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_textsz));
1291 
1292 	/*
1293 	 * copy data of init ucode
1294 	 */
1295 	t += LE_32(sc->sc_hdr->init_textsz);
1296 	err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1297 	    &fw_dma_attr, &iwh_dma_accattr,
1298 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1299 	    &sc->sc_dma_fw_init_data);
1300 	if (err != DDI_SUCCESS) {
1301 		cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1302 		    "failed to allocate init data dma memory\n");
1303 		goto fail;
1304 	}
1305 
1306 	dma_p = &sc->sc_dma_fw_init_data;
1307 
1308 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1309 	    "init_data[ncookies:%d addr:%lx "
1310 	    "size:%lx]\n",
1311 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1312 	    dma_p->cookie.dmac_size));
1313 
1314 	bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_datasz));
1315 
1316 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1317 
1318 fail:
1319 	return (err);
1320 }
1321 
1322 static void
iwh_free_fw_dma(iwh_sc_t * sc)1323 iwh_free_fw_dma(iwh_sc_t *sc)
1324 {
1325 	iwh_free_dma_mem(&sc->sc_dma_fw_text);
1326 	iwh_free_dma_mem(&sc->sc_dma_fw_data);
1327 	iwh_free_dma_mem(&sc->sc_dma_fw_data_bak);
1328 	iwh_free_dma_mem(&sc->sc_dma_fw_init_text);
1329 	iwh_free_dma_mem(&sc->sc_dma_fw_init_data);
1330 }
1331 
1332 /*
1333  * Allocate a shared buffer between host and NIC.
1334  */
1335 static int
iwh_alloc_shared(iwh_sc_t * sc)1336 iwh_alloc_shared(iwh_sc_t *sc)
1337 {
1338 #ifdef	DEBUG
1339 	iwh_dma_t *dma_p;
1340 #endif
1341 	int err = DDI_FAILURE;
1342 
1343 	/*
1344 	 * must be aligned on a 4K-page boundary
1345 	 */
1346 	err = iwh_alloc_dma_mem(sc, sizeof (iwh_shared_t),
1347 	    &sh_dma_attr, &iwh_dma_descattr,
1348 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1349 	    &sc->sc_dma_sh);
1350 	if (err != DDI_SUCCESS) {
1351 		goto fail;
1352 	}
1353 
1354 	sc->sc_shared = (iwh_shared_t *)sc->sc_dma_sh.mem_va;
1355 
1356 #ifdef	DEBUG
1357 	dma_p = &sc->sc_dma_sh;
1358 #endif
1359 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_shared(): "
1360 	    "sh[ncookies:%d addr:%lx size:%lx]\n",
1361 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1362 	    dma_p->cookie.dmac_size));
1363 
1364 	return (err);
1365 
1366 fail:
1367 	iwh_free_shared(sc);
1368 	return (err);
1369 }
1370 
1371 static void
iwh_free_shared(iwh_sc_t * sc)1372 iwh_free_shared(iwh_sc_t *sc)
1373 {
1374 	iwh_free_dma_mem(&sc->sc_dma_sh);
1375 }
1376 
1377 /*
1378  * Allocate a keep warm page.
1379  */
1380 static int
iwh_alloc_kw(iwh_sc_t * sc)1381 iwh_alloc_kw(iwh_sc_t *sc)
1382 {
1383 #ifdef	DEBUG
1384 	iwh_dma_t *dma_p;
1385 #endif
1386 	int err = DDI_FAILURE;
1387 
1388 	/*
1389 	 * must be aligned on a 4K-page boundary
1390 	 */
1391 	err = iwh_alloc_dma_mem(sc, IWH_KW_SIZE,
1392 	    &kw_dma_attr, &iwh_dma_descattr,
1393 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1394 	    &sc->sc_dma_kw);
1395 	if (err != DDI_SUCCESS) {
1396 		goto fail;
1397 	}
1398 
1399 #ifdef	DEBUG
1400 	dma_p = &sc->sc_dma_kw;
1401 #endif
1402 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_kw(): "
1403 	    "kw[ncookies:%d addr:%lx size:%lx]\n",
1404 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1405 	    dma_p->cookie.dmac_size));
1406 
1407 	return (err);
1408 
1409 fail:
1410 	iwh_free_kw(sc);
1411 	return (err);
1412 }
1413 
1414 static void
iwh_free_kw(iwh_sc_t * sc)1415 iwh_free_kw(iwh_sc_t *sc)
1416 {
1417 	iwh_free_dma_mem(&sc->sc_dma_kw);
1418 }
1419 
1420 /*
1421  * initialize RX ring buffers
1422  */
1423 static int
iwh_alloc_rx_ring(iwh_sc_t * sc)1424 iwh_alloc_rx_ring(iwh_sc_t *sc)
1425 {
1426 	iwh_rx_ring_t *ring;
1427 	iwh_rx_data_t *data;
1428 #ifdef	DEBUG
1429 	iwh_dma_t *dma_p;
1430 #endif
1431 	int i, err = DDI_FAILURE;
1432 
1433 	ring = &sc->sc_rxq;
1434 	ring->cur = 0;
1435 
1436 	/*
1437 	 * allocate RX description ring buffer
1438 	 */
1439 	err = iwh_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1440 	    &ring_desc_dma_attr, &iwh_dma_descattr,
1441 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1442 	    &ring->dma_desc);
1443 	if (err != DDI_SUCCESS) {
1444 		IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1445 		    "dma alloc rx ring desc "
1446 		    "failed\n"));
1447 		goto fail;
1448 	}
1449 
1450 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1451 #ifdef	DEBUG
1452 	dma_p = &ring->dma_desc;
1453 #endif
1454 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1455 	    "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1456 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1457 	    dma_p->cookie.dmac_size));
1458 
1459 	/*
1460 	 * Allocate Rx frame buffers.
1461 	 */
1462 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1463 		data = &ring->data[i];
1464 		err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1465 		    &rx_buffer_dma_attr, &iwh_dma_accattr,
1466 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1467 		    &data->dma_data);
1468 		if (err != DDI_SUCCESS) {
1469 			IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1470 			    "dma alloc rx ring "
1471 			    "buf[%d] failed\n", i));
1472 			goto fail;
1473 		}
1474 		/*
1475 		 * the physical address bit [8-36] are used,
1476 		 * instead of bit [0-31] in 3945.
1477 		 */
1478 		ring->desc[i] = (uint32_t)
1479 		    (data->dma_data.cookie.dmac_address >> 8);
1480 	}
1481 
1482 #ifdef	DEBUG
1483 	dma_p = &ring->data[0].dma_data;
1484 #endif
1485 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1486 	    "rx buffer[0][ncookies:%d addr:%lx "
1487 	    "size:%lx]\n",
1488 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1489 	    dma_p->cookie.dmac_size));
1490 
1491 	IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1492 
1493 	return (err);
1494 
1495 fail:
1496 	iwh_free_rx_ring(sc);
1497 	return (err);
1498 }
1499 
1500 /*
1501  * disable RX ring
1502  */
1503 static void
iwh_reset_rx_ring(iwh_sc_t * sc)1504 iwh_reset_rx_ring(iwh_sc_t *sc)
1505 {
1506 	int n;
1507 
1508 	iwh_mac_access_enter(sc);
1509 	IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1510 	for (n = 0; n < 2000; n++) {
1511 		if (IWH_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1512 			break;
1513 		}
1514 		DELAY(1000);
1515 	}
1516 #ifdef DEBUG
1517 	if (2000 == n) {
1518 		IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_rx_ring(): "
1519 		    "timeout resetting Rx ring\n"));
1520 	}
1521 #endif
1522 	iwh_mac_access_exit(sc);
1523 
1524 	sc->sc_rxq.cur = 0;
1525 }
1526 
1527 static void
iwh_free_rx_ring(iwh_sc_t * sc)1528 iwh_free_rx_ring(iwh_sc_t *sc)
1529 {
1530 	int i;
1531 
1532 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1533 		if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1534 			IWH_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1535 			    DDI_DMA_SYNC_FORCPU);
1536 		}
1537 
1538 		iwh_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1539 	}
1540 
1541 	if (sc->sc_rxq.dma_desc.dma_hdl) {
1542 		IWH_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1543 	}
1544 
1545 	iwh_free_dma_mem(&sc->sc_rxq.dma_desc);
1546 }
1547 
1548 /*
1549  * initialize TX ring buffers
1550  */
1551 static int
iwh_alloc_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring,int slots,int qid)1552 iwh_alloc_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring,
1553     int slots, int qid)
1554 {
1555 	iwh_tx_data_t *data;
1556 	iwh_tx_desc_t *desc_h;
1557 	uint32_t paddr_desc_h;
1558 	iwh_cmd_t *cmd_h;
1559 	uint32_t paddr_cmd_h;
1560 #ifdef	DEBUG
1561 	iwh_dma_t *dma_p;
1562 #endif
1563 	int i, err = DDI_FAILURE;
1564 
1565 	ring->qid = qid;
1566 	ring->count = TFD_QUEUE_SIZE_MAX;
1567 	ring->window = slots;
1568 	ring->queued = 0;
1569 	ring->cur = 0;
1570 	ring->desc_cur = 0;
1571 
1572 	/*
1573 	 * allocate buffer for TX descriptor ring
1574 	 */
1575 	err = iwh_alloc_dma_mem(sc,
1576 	    TFD_QUEUE_SIZE_MAX * sizeof (iwh_tx_desc_t),
1577 	    &ring_desc_dma_attr, &iwh_dma_descattr,
1578 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1579 	    &ring->dma_desc);
1580 	if (err != DDI_SUCCESS) {
1581 		IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1582 		    "dma alloc tx ring desc[%d] "
1583 		    "failed\n", qid));
1584 		goto fail;
1585 	}
1586 
1587 #ifdef	DEBUG
1588 	dma_p = &ring->dma_desc;
1589 #endif
1590 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1591 	    "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1592 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1593 	    dma_p->cookie.dmac_size));
1594 
1595 	desc_h = (iwh_tx_desc_t *)ring->dma_desc.mem_va;
1596 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1597 
1598 	/*
1599 	 * allocate buffer for ucode command
1600 	 */
1601 	err = iwh_alloc_dma_mem(sc,
1602 	    TFD_QUEUE_SIZE_MAX * sizeof (iwh_cmd_t),
1603 	    &cmd_dma_attr, &iwh_dma_accattr,
1604 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1605 	    &ring->dma_cmd);
1606 	if (err != DDI_SUCCESS) {
1607 		IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1608 		    "dma alloc tx ring cmd[%d]"
1609 		    " failed\n", qid));
1610 		goto fail;
1611 	}
1612 
1613 #ifdef	DEBUG
1614 	dma_p = &ring->dma_cmd;
1615 #endif
1616 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1617 	    "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1618 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1619 	    dma_p->cookie.dmac_size));
1620 
1621 	cmd_h = (iwh_cmd_t *)ring->dma_cmd.mem_va;
1622 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1623 
1624 	/*
1625 	 * Allocate Tx frame buffers.
1626 	 */
1627 	ring->data = kmem_zalloc(sizeof (iwh_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1628 	    KM_NOSLEEP);
1629 	if (NULL == ring->data) {
1630 		IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1631 		    "could not allocate "
1632 		    "tx data slots\n"));
1633 		goto fail;
1634 	}
1635 
1636 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1637 		data = &ring->data[i];
1638 		err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1639 		    &tx_buffer_dma_attr, &iwh_dma_accattr,
1640 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1641 		    &data->dma_data);
1642 		if (err != DDI_SUCCESS) {
1643 			IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1644 			    "dma alloc tx "
1645 			    "ring buf[%d] failed\n", i));
1646 			goto fail;
1647 		}
1648 
1649 		data->desc = desc_h + i;
1650 		data->paddr_desc = paddr_desc_h +
1651 		    _PTRDIFF(data->desc, desc_h);
1652 		data->cmd = cmd_h +  i;
1653 		data->paddr_cmd = paddr_cmd_h +
1654 		    _PTRDIFF(data->cmd, cmd_h);
1655 	}
1656 #ifdef	DEBUG
1657 	dma_p = &ring->data[0].dma_data;
1658 #endif
1659 	IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1660 	    "tx buffer[0][ncookies:%d addr:%lx "
1661 	    "size:%lx]\n",
1662 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1663 	    dma_p->cookie.dmac_size));
1664 
1665 	return (err);
1666 
1667 fail:
1668 	iwh_free_tx_ring(ring);
1669 
1670 	return (err);
1671 }
1672 
1673 /*
1674  * disable TX ring
1675  */
1676 static void
iwh_reset_tx_ring(iwh_sc_t * sc,iwh_tx_ring_t * ring)1677 iwh_reset_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring)
1678 {
1679 	iwh_tx_data_t *data;
1680 	int i, n;
1681 
1682 	iwh_mac_access_enter(sc);
1683 
1684 	IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1685 	for (n = 0; n < 200; n++) {
1686 		if (IWH_READ(sc, IWH_FH_TSSR_TX_STATUS_REG) &
1687 		    IWH_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1688 			break;
1689 		}
1690 		DELAY(10);
1691 	}
1692 
1693 #ifdef	DEBUG
1694 	if (200 == n) {
1695 		IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_tx_ring(): "
1696 		    "timeout reset tx ring %d\n",
1697 		    ring->qid));
1698 	}
1699 #endif
1700 
1701 	iwh_mac_access_exit(sc);
1702 
1703 	/*
1704 	 * by pass, if it's quiesce
1705 	 */
1706 	if (!(sc->sc_flags & IWH_F_QUIESCED)) {
1707 		for (i = 0; i < ring->count; i++) {
1708 			data = &ring->data[i];
1709 			IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1710 		}
1711 	}
1712 
1713 	ring->queued = 0;
1714 	ring->cur = 0;
1715 	ring->desc_cur = 0;
1716 }
1717 
1718 static void
iwh_free_tx_ring(iwh_tx_ring_t * ring)1719 iwh_free_tx_ring(iwh_tx_ring_t *ring)
1720 {
1721 	int i;
1722 
1723 	if (ring->dma_desc.dma_hdl != NULL) {
1724 		IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1725 	}
1726 	iwh_free_dma_mem(&ring->dma_desc);
1727 
1728 	if (ring->dma_cmd.dma_hdl != NULL) {
1729 		IWH_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1730 	}
1731 	iwh_free_dma_mem(&ring->dma_cmd);
1732 
1733 	if (ring->data != NULL) {
1734 		for (i = 0; i < ring->count; i++) {
1735 			if (ring->data[i].dma_data.dma_hdl) {
1736 				IWH_DMA_SYNC(ring->data[i].dma_data,
1737 				    DDI_DMA_SYNC_FORDEV);
1738 			}
1739 			iwh_free_dma_mem(&ring->data[i].dma_data);
1740 		}
1741 		kmem_free(ring->data, ring->count * sizeof (iwh_tx_data_t));
1742 	}
1743 }
1744 
1745 /*
1746  * initialize TX and RX ring
1747  */
1748 static int
iwh_ring_init(iwh_sc_t * sc)1749 iwh_ring_init(iwh_sc_t *sc)
1750 {
1751 	int i, err = DDI_FAILURE;
1752 
1753 	for (i = 0; i < IWH_NUM_QUEUES; i++) {
1754 		if (IWH_CMD_QUEUE_NUM == i) {
1755 			continue;
1756 		}
1757 
1758 		err = iwh_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1759 		    i);
1760 		if (err != DDI_SUCCESS) {
1761 			goto fail;
1762 		}
1763 	}
1764 
1765 	/*
1766 	 * initialize command queue
1767 	 */
1768 	err = iwh_alloc_tx_ring(sc, &sc->sc_txq[IWH_CMD_QUEUE_NUM],
1769 	    TFD_CMD_SLOTS, IWH_CMD_QUEUE_NUM);
1770 	if (err != DDI_SUCCESS) {
1771 		goto fail;
1772 	}
1773 
1774 	err = iwh_alloc_rx_ring(sc);
1775 	if (err != DDI_SUCCESS) {
1776 		goto fail;
1777 	}
1778 
1779 fail:
1780 	return (err);
1781 }
1782 
1783 static void
iwh_ring_free(iwh_sc_t * sc)1784 iwh_ring_free(iwh_sc_t *sc)
1785 {
1786 	int i = IWH_NUM_QUEUES;
1787 
1788 	iwh_free_rx_ring(sc);
1789 	while (--i >= 0) {
1790 		iwh_free_tx_ring(&sc->sc_txq[i]);
1791 	}
1792 }
1793 
1794 /* ARGSUSED */
1795 static ieee80211_node_t *
iwh_node_alloc(ieee80211com_t * ic)1796 iwh_node_alloc(ieee80211com_t *ic)
1797 {
1798 	iwh_amrr_t *amrr;
1799 
1800 	amrr = kmem_zalloc(sizeof (iwh_amrr_t), KM_SLEEP);
1801 	if (NULL == amrr) {
1802 		cmn_err(CE_WARN, "iwh_node_alloc(): "
1803 		    "failed to allocate memory for amrr structure\n");
1804 		return (NULL);
1805 	}
1806 
1807 	iwh_amrr_init(amrr);
1808 
1809 	return (&amrr->in);
1810 }
1811 
1812 static void
iwh_node_free(ieee80211_node_t * in)1813 iwh_node_free(ieee80211_node_t *in)
1814 {
1815 	ieee80211com_t *ic;
1816 
1817 	if ((NULL == in) ||
1818 	    (NULL == in->in_ic)) {
1819 		cmn_err(CE_WARN, "iwh_node_free() "
1820 		    "Got a NULL point from Net80211 module\n");
1821 		return;
1822 	}
1823 	ic = in->in_ic;
1824 
1825 	if (ic->ic_node_cleanup != NULL) {
1826 		ic->ic_node_cleanup(in);
1827 	}
1828 
1829 	if (in->in_wpa_ie != NULL) {
1830 		ieee80211_free(in->in_wpa_ie);
1831 	}
1832 
1833 	if (in->in_wme_ie != NULL) {
1834 		ieee80211_free(in->in_wme_ie);
1835 	}
1836 
1837 	if (in->in_htcap_ie != NULL) {
1838 		ieee80211_free(in->in_htcap_ie);
1839 	}
1840 
1841 	kmem_free(in, sizeof (iwh_amrr_t));
1842 }
1843 
1844 /*
1845  * change station's state. this function will be invoked by 80211 module
1846  * when need to change staton's state.
1847  */
1848 static int
iwh_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1849 iwh_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1850 {
1851 	iwh_sc_t *sc;
1852 	ieee80211_node_t *in;
1853 	enum ieee80211_state ostate;
1854 	iwh_add_sta_t node;
1855 	iwh_amrr_t *amrr;
1856 	uint8_t r;
1857 	int i, err = IWH_FAIL;
1858 
1859 	if (NULL == ic) {
1860 		return (err);
1861 	}
1862 	sc = (iwh_sc_t *)ic;
1863 	in = ic->ic_bss;
1864 	ostate = ic->ic_state;
1865 
1866 	mutex_enter(&sc->sc_glock);
1867 
1868 	switch (nstate) {
1869 	case IEEE80211_S_SCAN:
1870 		switch (ostate) {
1871 		case IEEE80211_S_INIT:
1872 			atomic_or_32(&sc->sc_flags, IWH_F_SCANNING);
1873 			iwh_set_led(sc, 2, 10, 2);
1874 
1875 			/*
1876 			 * clear association to receive beacons from
1877 			 * all BSS'es
1878 			 */
1879 			sc->sc_config.assoc_id = 0;
1880 			sc->sc_config.filter_flags &=
1881 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1882 
1883 			IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1884 			    "config chan %d "
1885 			    "flags %x filter_flags %x\n",
1886 			    LE_16(sc->sc_config.chan),
1887 			    LE_32(sc->sc_config.flags),
1888 			    LE_32(sc->sc_config.filter_flags)));
1889 
1890 			err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
1891 			    sizeof (iwh_rxon_cmd_t), 1);
1892 			if (err != IWH_SUCCESS) {
1893 				cmn_err(CE_WARN, "iwh_newstate(): "
1894 				    "could not clear association\n");
1895 				atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1896 				mutex_exit(&sc->sc_glock);
1897 				return (err);
1898 			}
1899 
1900 			/*
1901 			 * add broadcast node to send probe request
1902 			 */
1903 			(void) memset(&node, 0, sizeof (node));
1904 			(void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1905 			node.sta.sta_id = IWH_BROADCAST_ID;
1906 			err = iwh_cmd(sc, REPLY_ADD_STA, &node,
1907 			    sizeof (node), 1);
1908 			if (err != IWH_SUCCESS) {
1909 				cmn_err(CE_WARN, "iwh_newstate(): "
1910 				    "could not add broadcast node\n");
1911 				atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1912 				mutex_exit(&sc->sc_glock);
1913 				return (err);
1914 			}
1915 			break;
1916 		case IEEE80211_S_SCAN:
1917 			mutex_exit(&sc->sc_glock);
1918 			/* step to next channel before actual FW scan */
1919 			err = sc->sc_newstate(ic, nstate, arg);
1920 			mutex_enter(&sc->sc_glock);
1921 			if ((err != 0) || ((err = iwh_scan(sc)) != 0)) {
1922 				cmn_err(CE_WARN, "iwh_newstate(): "
1923 				    "could not initiate scan\n");
1924 				atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1925 				ieee80211_cancel_scan(ic);
1926 			}
1927 			mutex_exit(&sc->sc_glock);
1928 			return (err);
1929 		default:
1930 			break;
1931 		}
1932 		sc->sc_clk = 0;
1933 		break;
1934 
1935 	case IEEE80211_S_AUTH:
1936 		if (ostate == IEEE80211_S_SCAN) {
1937 			atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1938 		}
1939 
1940 		/*
1941 		 * reset state to handle reassociations correctly
1942 		 */
1943 		sc->sc_config.assoc_id = 0;
1944 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1945 
1946 		/*
1947 		 * before sending authentication and association request frame,
1948 		 * we need do something in the hardware, such as setting the
1949 		 * channel same to the target AP...
1950 		 */
1951 		if ((err = iwh_hw_set_before_auth(sc)) != 0) {
1952 			IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1953 			    "could not send authentication request\n"));
1954 			mutex_exit(&sc->sc_glock);
1955 			return (err);
1956 		}
1957 		break;
1958 
1959 	case IEEE80211_S_RUN:
1960 		if (ostate == IEEE80211_S_SCAN) {
1961 			atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1962 		}
1963 
1964 		if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1965 			/*
1966 			 * let LED blink when monitoring
1967 			 */
1968 			iwh_set_led(sc, 2, 10, 10);
1969 			break;
1970 		}
1971 
1972 		IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1973 		    "associated.\n"));
1974 
1975 		err = iwh_run_state_config(sc);
1976 		if (err != IWH_SUCCESS) {
1977 			cmn_err(CE_WARN, "iwh_newstate(): "
1978 			    "failed to set up association\n");
1979 			mutex_exit(&sc->sc_glock);
1980 			return (err);
1981 		}
1982 
1983 		/*
1984 		 * start automatic rate control
1985 		 */
1986 		if ((in->in_flags & IEEE80211_NODE_HT) &&
1987 		    (sc->sc_ht_conf.ht_support) &&
1988 		    (in->in_htrates.rs_nrates > 0) &&
1989 		    (in->in_htrates.rs_nrates <= IEEE80211_HTRATE_MAXSIZE)) {
1990 			amrr = (iwh_amrr_t *)in;
1991 
1992 			for (i = in->in_htrates.rs_nrates - 1; i > 0; i--) {
1993 
1994 				r = in->in_htrates.rs_rates[i] &
1995 				    IEEE80211_RATE_VAL;
1996 				if ((r != 0) && (r <= 0xd) &&
1997 				    (sc->sc_ht_conf.tx_support_mcs[r/8] &
1998 				    (1 << (r%8)))) {
1999 					amrr->ht_mcs_idx = r;
2000 					atomic_or_32(&sc->sc_flags,
2001 					    IWH_F_RATE_AUTO_CTL);
2002 					break;
2003 				}
2004 			}
2005 		} else {
2006 			if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
2007 				atomic_or_32(&sc->sc_flags,
2008 				    IWH_F_RATE_AUTO_CTL);
2009 
2010 				/*
2011 				 * set rate to some reasonable initial value
2012 				 */
2013 				i = in->in_rates.ir_nrates - 1;
2014 				while (i > 0 && IEEE80211_RATE(i) > 72) {
2015 					i--;
2016 				}
2017 				in->in_txrate = i;
2018 
2019 			} else {
2020 				atomic_and_32(&sc->sc_flags,
2021 				    ~IWH_F_RATE_AUTO_CTL);
2022 			}
2023 		}
2024 
2025 		/*
2026 		 * set LED on after associated
2027 		 */
2028 		iwh_set_led(sc, 2, 0, 1);
2029 		break;
2030 
2031 	case IEEE80211_S_INIT:
2032 		if (ostate == IEEE80211_S_SCAN) {
2033 			atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2034 		}
2035 		/*
2036 		 * set LED off after init
2037 		 */
2038 		iwh_set_led(sc, 2, 1, 0);
2039 		break;
2040 
2041 	case IEEE80211_S_ASSOC:
2042 		if (ostate == IEEE80211_S_SCAN) {
2043 			atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2044 		}
2045 		break;
2046 	}
2047 
2048 	mutex_exit(&sc->sc_glock);
2049 
2050 	return (sc->sc_newstate(ic, nstate, arg));
2051 }
2052 
2053 /*
2054  * exclusive access to mac begin.
2055  */
2056 static void
iwh_mac_access_enter(iwh_sc_t * sc)2057 iwh_mac_access_enter(iwh_sc_t *sc)
2058 {
2059 	uint32_t tmp;
2060 	int n;
2061 
2062 	tmp = IWH_READ(sc, CSR_GP_CNTRL);
2063 	IWH_WRITE(sc, CSR_GP_CNTRL,
2064 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2065 
2066 	/*
2067 	 * wait until we succeed
2068 	 */
2069 	for (n = 0; n < 1000; n++) {
2070 		if ((IWH_READ(sc, CSR_GP_CNTRL) &
2071 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2072 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
2073 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
2074 			break;
2075 		}
2076 		DELAY(10);
2077 	}
2078 
2079 #ifdef	DEBUG
2080 	if (1000 == n) {
2081 		IWH_DBG((IWH_DEBUG_PIO, "iwh_mac_access_enter(): "
2082 		    "could not lock memory\n"));
2083 	}
2084 #endif
2085 }
2086 
2087 /*
2088  * exclusive access to mac end.
2089  */
2090 static void
iwh_mac_access_exit(iwh_sc_t * sc)2091 iwh_mac_access_exit(iwh_sc_t *sc)
2092 {
2093 	uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2094 	IWH_WRITE(sc, CSR_GP_CNTRL,
2095 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2096 }
2097 
2098 /*
2099  * this function defined here for future use.
2100  * static uint32_t
2101  * iwh_mem_read(iwh_sc_t *sc, uint32_t addr)
2102  * {
2103  *	IWH_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2104  *	return (IWH_READ(sc, HBUS_TARG_MEM_RDAT));
2105  * }
2106  */
2107 
2108 /*
2109  * write mac memory
2110  */
2111 static void
iwh_mem_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2112 iwh_mem_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2113 {
2114 	IWH_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2115 	IWH_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2116 }
2117 
2118 /*
2119  * read mac register
2120  */
2121 static uint32_t
iwh_reg_read(iwh_sc_t * sc,uint32_t addr)2122 iwh_reg_read(iwh_sc_t *sc, uint32_t addr)
2123 {
2124 	IWH_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2125 	return (IWH_READ(sc, HBUS_TARG_PRPH_RDAT));
2126 }
2127 
2128 /*
2129  * write mac register
2130  */
2131 static void
iwh_reg_write(iwh_sc_t * sc,uint32_t addr,uint32_t data)2132 iwh_reg_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2133 {
2134 	IWH_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2135 	IWH_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2136 }
2137 
2138 
2139 /*
2140  * steps of loading ucode:
2141  * load init ucode=>init alive=>calibrate=>
2142  * receive calibration result=>reinitialize NIC=>
2143  * load runtime ucode=>runtime alive=>
2144  * send calibration result=>running.
2145  */
2146 static int
iwh_load_init_firmware(iwh_sc_t * sc)2147 iwh_load_init_firmware(iwh_sc_t *sc)
2148 {
2149 	int err = IWH_FAIL;
2150 	clock_t	clk;
2151 
2152 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2153 
2154 	/*
2155 	 * load init_text section of uCode to hardware
2156 	 */
2157 	err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2158 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2159 	if (err != IWH_SUCCESS) {
2160 		cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2161 		    "failed to write init uCode.\n");
2162 		return (err);
2163 	}
2164 
2165 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2166 
2167 	/*
2168 	 * wait loading init_text until completed or timeout
2169 	 */
2170 	while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2171 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2172 			break;
2173 		}
2174 	}
2175 
2176 	if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2177 		cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2178 		    "timeout waiting for init uCode load.\n");
2179 		return (IWH_FAIL);
2180 	}
2181 
2182 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2183 
2184 	/*
2185 	 * load init_data section of uCode to hardware
2186 	 */
2187 	err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2188 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2189 	if (err != IWH_SUCCESS) {
2190 		cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2191 		    "failed to write init_data uCode.\n");
2192 		return (err);
2193 	}
2194 
2195 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2196 
2197 	/*
2198 	 * wait loading init_data until completed or timeout
2199 	 */
2200 	while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2201 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2202 			break;
2203 		}
2204 	}
2205 
2206 	if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2207 		cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2208 		    "timeout waiting for init_data uCode load.\n");
2209 		return (IWH_FAIL);
2210 	}
2211 
2212 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2213 
2214 	return (err);
2215 }
2216 
2217 static int
iwh_load_run_firmware(iwh_sc_t * sc)2218 iwh_load_run_firmware(iwh_sc_t *sc)
2219 {
2220 	int err = IWH_FAIL;
2221 	clock_t	clk;
2222 
2223 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2224 
2225 	/*
2226 	 * load init_text section of uCode to hardware
2227 	 */
2228 	err = iwh_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2229 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2230 	if (err != IWH_SUCCESS) {
2231 		cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2232 		    "failed to write run uCode.\n");
2233 		return (err);
2234 	}
2235 
2236 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2237 
2238 	/*
2239 	 * wait loading run_text until completed or timeout
2240 	 */
2241 	while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2242 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2243 			break;
2244 		}
2245 	}
2246 
2247 	if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2248 		cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2249 		    "timeout waiting for run uCode load.\n");
2250 		return (IWH_FAIL);
2251 	}
2252 
2253 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2254 
2255 	/*
2256 	 * load run_data section of uCode to hardware
2257 	 */
2258 	err = iwh_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2259 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2260 	if (err != IWH_SUCCESS) {
2261 		cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2262 		    "failed to write run_data uCode.\n");
2263 		return (err);
2264 	}
2265 
2266 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2267 
2268 	/*
2269 	 * wait loading run_data until completed or timeout
2270 	 */
2271 	while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2272 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2273 			break;
2274 		}
2275 	}
2276 
2277 	if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2278 		cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2279 		    "timeout waiting for run_data uCode load.\n");
2280 		return (IWH_FAIL);
2281 	}
2282 
2283 	atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2284 
2285 	return (err);
2286 }
2287 
2288 /*
2289  * this function will be invoked to receive phy information
2290  * when a frame is received.
2291  */
2292 static void
iwh_rx_phy_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2293 iwh_rx_phy_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2294 {
2295 
2296 	sc->sc_rx_phy_res.flag = 1;
2297 
2298 	bcopy((uint8_t *)(desc + 1), sc->sc_rx_phy_res.buf,
2299 	    sizeof (iwh_rx_phy_res_t));
2300 }
2301 
2302 /*
2303  * this function will be invoked to receive body of frame when
2304  * a frame is received.
2305  */
2306 static void
iwh_rx_mpdu_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2307 iwh_rx_mpdu_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2308 {
2309 	ieee80211com_t *ic = &sc->sc_ic;
2310 #ifdef	DEBUG
2311 	iwh_rx_ring_t *ring = &sc->sc_rxq;
2312 #endif
2313 	struct ieee80211_frame *wh;
2314 	struct iwh_rx_non_cfg_phy *phyinfo;
2315 	struct iwh_rx_mpdu_body_size *mpdu_size;
2316 	mblk_t *mp;
2317 	int16_t t;
2318 	uint16_t len, rssi, agc;
2319 	uint32_t temp, crc, *tail;
2320 	uint32_t arssi, brssi, crssi, mrssi;
2321 	iwh_rx_phy_res_t *stat;
2322 	ieee80211_node_t *in;
2323 
2324 	/*
2325 	 * assuming not 11n here. cope with 11n in phase-II
2326 	 */
2327 	mpdu_size = (struct iwh_rx_mpdu_body_size *)(desc + 1);
2328 	stat = (iwh_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2329 	if (stat->cfg_phy_cnt > 20) {
2330 		return;
2331 	}
2332 
2333 	phyinfo = (struct iwh_rx_non_cfg_phy *)stat->non_cfg_phy;
2334 	temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_AGC_IDX]);
2335 	agc = (temp & IWH_OFDM_AGC_MSK) >> IWH_OFDM_AGC_BIT_POS;
2336 
2337 	temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_AB_IDX]);
2338 	arssi = (temp & IWH_OFDM_RSSI_A_MSK) >> IWH_OFDM_RSSI_A_BIT_POS;
2339 	brssi = (temp & IWH_OFDM_RSSI_B_MSK) >> IWH_OFDM_RSSI_B_BIT_POS;
2340 
2341 	temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_C_IDX]);
2342 	crssi = (temp & IWH_OFDM_RSSI_C_MSK) >> IWH_OFDM_RSSI_C_BIT_POS;
2343 
2344 	mrssi = MAX(arssi, brssi);
2345 	mrssi = MAX(mrssi, crssi);
2346 
2347 	t = mrssi - agc - IWH_RSSI_OFFSET;
2348 	/*
2349 	 * convert dBm to percentage
2350 	 */
2351 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2352 	    / (75 * 75);
2353 	if (rssi > 100) {
2354 		rssi = 100;
2355 	}
2356 	if (rssi < 1) {
2357 		rssi = 1;
2358 	}
2359 
2360 	/*
2361 	 * size of frame, not include FCS
2362 	 */
2363 	len = LE_16(mpdu_size->byte_count);
2364 	tail = (uint32_t *)((uint8_t *)(desc + 1) +
2365 	    sizeof (struct iwh_rx_mpdu_body_size) + len);
2366 	bcopy(tail, &crc, 4);
2367 
2368 	IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2369 	    "rx intr: idx=%d phy_len=%x len=%d "
2370 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2371 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2372 	    len, stat->rate.r.s.rate, stat->channel,
2373 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2374 	    stat->cfg_phy_cnt, LE_32(crc)));
2375 
2376 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2377 		IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2378 		    "rx frame oversize\n"));
2379 		return;
2380 	}
2381 
2382 	/*
2383 	 * discard Rx frames with bad CRC
2384 	 */
2385 	if ((LE_32(crc) &
2386 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2387 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2388 		IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2389 		    "rx crc error tail: %x\n",
2390 		    LE_32(crc)));
2391 		sc->sc_rx_err++;
2392 		return;
2393 	}
2394 
2395 	wh = (struct ieee80211_frame *)
2396 	    ((uint8_t *)(desc + 1)+ sizeof (struct iwh_rx_mpdu_body_size));
2397 
2398 	if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2399 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2400 		IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2401 		    "rx : association id = %x\n",
2402 		    sc->sc_assoc_id));
2403 	}
2404 
2405 #ifdef DEBUG
2406 	if (iwh_dbg_flags & IWH_DEBUG_RX) {
2407 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2408 	}
2409 #endif
2410 
2411 	in = ieee80211_find_rxnode(ic, wh);
2412 	mp = allocb(len, BPRI_MED);
2413 	if (mp) {
2414 		bcopy(wh, mp->b_wptr, len);
2415 		mp->b_wptr += len;
2416 
2417 		/*
2418 		 * send the frame to the 802.11 layer
2419 		 */
2420 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2421 	} else {
2422 		sc->sc_rx_nobuf++;
2423 		IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2424 		    "alloc rx buf failed\n"));
2425 	}
2426 
2427 	/*
2428 	 * release node reference
2429 	 */
2430 	ieee80211_free_node(in);
2431 }
2432 
2433 /*
2434  * process correlative affairs after a frame is sent.
2435  */
2436 static void
iwh_tx_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2437 iwh_tx_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2438 {
2439 	ieee80211com_t *ic = &sc->sc_ic;
2440 	iwh_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2441 	iwh_tx_stat_t *stat = (iwh_tx_stat_t *)(desc + 1);
2442 	iwh_amrr_t *amrr;
2443 
2444 	if (NULL == ic->ic_bss) {
2445 		return;
2446 	}
2447 
2448 	amrr = (iwh_amrr_t *)ic->ic_bss;
2449 
2450 	amrr->txcnt++;
2451 	IWH_DBG((IWH_DEBUG_RATECTL, "iwh_tx_intr(): "
2452 	    "tx: %d cnt\n", amrr->txcnt));
2453 
2454 	if (stat->ntries > 0) {
2455 		amrr->retrycnt++;
2456 		sc->sc_tx_retries++;
2457 		IWH_DBG((IWH_DEBUG_TX, "iwh_tx_intr(): "
2458 		    "tx: %d retries\n",
2459 		    sc->sc_tx_retries));
2460 	}
2461 
2462 	mutex_enter(&sc->sc_mt_lock);
2463 	sc->sc_tx_timer = 0;
2464 	mutex_exit(&sc->sc_mt_lock);
2465 
2466 	mutex_enter(&sc->sc_tx_lock);
2467 
2468 	ring->queued--;
2469 	if (ring->queued < 0) {
2470 		ring->queued = 0;
2471 	}
2472 
2473 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2474 		sc->sc_need_reschedule = 0;
2475 		mutex_exit(&sc->sc_tx_lock);
2476 		mac_tx_update(ic->ic_mach);
2477 		mutex_enter(&sc->sc_tx_lock);
2478 	}
2479 
2480 	mutex_exit(&sc->sc_tx_lock);
2481 }
2482 
2483 /*
2484  * inform a given command has been executed
2485  */
2486 static void
iwh_cmd_intr(iwh_sc_t * sc,iwh_rx_desc_t * desc)2487 iwh_cmd_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2488 {
2489 	if ((desc->hdr.qid & 7) != 4) {
2490 		return;
2491 	}
2492 
2493 	if (sc->sc_cmd_accum > 0) {
2494 		sc->sc_cmd_accum--;
2495 		return;
2496 	}
2497 
2498 	mutex_enter(&sc->sc_glock);
2499 
2500 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2501 
2502 	cv_signal(&sc->sc_cmd_cv);
2503 
2504 	mutex_exit(&sc->sc_glock);
2505 
2506 	IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd_intr(): "
2507 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2508 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2509 	    desc->hdr.type));
2510 }
2511 
2512 /*
2513  * this function will be invoked when alive notification occur.
2514  */
2515 static void
iwh_ucode_alive(iwh_sc_t * sc,iwh_rx_desc_t * desc)2516 iwh_ucode_alive(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2517 {
2518 	uint32_t rv;
2519 	struct iwh_calib_cfg_cmd cmd;
2520 	struct iwh_alive_resp *ar =
2521 	    (struct iwh_alive_resp *)(desc + 1);
2522 	struct iwh_calib_results *res_p = &sc->sc_calib_results;
2523 
2524 	/*
2525 	 * the microcontroller is ready
2526 	 */
2527 	IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2528 	    "microcode alive notification minor: %x major: %x type: "
2529 	    "%x subtype: %x\n",
2530 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2531 
2532 #ifdef	DEBUG
2533 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2534 		IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2535 		    "microcontroller initialization failed\n"));
2536 	}
2537 #endif
2538 
2539 	/*
2540 	 * determine if init alive or runtime alive.
2541 	 */
2542 	if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2543 		IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2544 		    "initialization alive received.\n"));
2545 
2546 		bcopy(ar, &sc->sc_card_alive_init,
2547 		    sizeof (struct iwh_init_alive_resp));
2548 
2549 		/*
2550 		 * necessary configuration to NIC
2551 		 */
2552 		mutex_enter(&sc->sc_glock);
2553 
2554 		rv = iwh_alive_common(sc);
2555 		if (rv != IWH_SUCCESS) {
2556 			cmn_err(CE_WARN, "iwh_ucode_alive(): "
2557 			    "common alive process failed in init alive.\n");
2558 			mutex_exit(&sc->sc_glock);
2559 			return;
2560 		}
2561 
2562 		(void) memset(&cmd, 0, sizeof (cmd));
2563 
2564 		cmd.ucd_calib_cfg.once.is_enable = IWH_CALIB_INIT_CFG_ALL;
2565 		cmd.ucd_calib_cfg.once.start = IWH_CALIB_INIT_CFG_ALL;
2566 		cmd.ucd_calib_cfg.once.send_res = IWH_CALIB_INIT_CFG_ALL;
2567 		cmd.ucd_calib_cfg.flags = IWH_CALIB_INIT_CFG_ALL;
2568 
2569 		/*
2570 		 * require ucode execute calibration
2571 		 */
2572 		rv = iwh_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2573 		if (rv != IWH_SUCCESS) {
2574 			cmn_err(CE_WARN, "iwh_ucode_alive(): "
2575 			    "failed to send calibration configure command.\n");
2576 			mutex_exit(&sc->sc_glock);
2577 			return;
2578 		}
2579 
2580 		mutex_exit(&sc->sc_glock);
2581 
2582 	} else {	/* runtime alive */
2583 
2584 		IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2585 		    "runtime alive received.\n"));
2586 
2587 		bcopy(ar, &sc->sc_card_alive_run,
2588 		    sizeof (struct iwh_alive_resp));
2589 
2590 		mutex_enter(&sc->sc_glock);
2591 
2592 		/*
2593 		 * necessary configuration to NIC
2594 		 */
2595 		rv = iwh_alive_common(sc);
2596 		if (rv != IWH_SUCCESS) {
2597 			cmn_err(CE_WARN, "iwh_ucode_alive(): "
2598 			    "common alive process failed in run alive.\n");
2599 			mutex_exit(&sc->sc_glock);
2600 			return;
2601 		}
2602 
2603 		/*
2604 		 * send the result of local oscilator calibration to uCode.
2605 		 */
2606 		if (res_p->lo_res != NULL) {
2607 			rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2608 			    res_p->lo_res, res_p->lo_res_len, 1);
2609 			if (rv != IWH_SUCCESS) {
2610 				cmn_err(CE_WARN, "iwh_ucode_alive(): "
2611 				    "failed to send local"
2612 				    "oscilator calibration command.\n");
2613 				mutex_exit(&sc->sc_glock);
2614 				return;
2615 			}
2616 
2617 			DELAY(1000);
2618 		}
2619 
2620 		/*
2621 		 * send the result of TX IQ calibration to uCode.
2622 		 */
2623 		if (res_p->tx_iq_res != NULL) {
2624 			rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2625 			    res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2626 			if (rv != IWH_SUCCESS) {
2627 				cmn_err(CE_WARN, "iwh_ucode_alive(): "
2628 				    "failed to send TX IQ"
2629 				    "calibration command.\n");
2630 				mutex_exit(&sc->sc_glock);
2631 				return;
2632 			}
2633 
2634 			DELAY(1000);
2635 		}
2636 
2637 		/*
2638 		 * sned the result of TX IQ perd calibration to uCode.
2639 		 */
2640 		if (res_p->tx_iq_perd_res != NULL) {
2641 			rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2642 			    res_p->tx_iq_perd_res,
2643 			    res_p->tx_iq_perd_res_len, 1);
2644 			if (rv != IWH_SUCCESS) {
2645 				cmn_err(CE_WARN, "iwh_ucode_alive(): "
2646 				    "failed to send TX IQ perd"
2647 				    "calibration command.\n");
2648 				mutex_exit(&sc->sc_glock);
2649 				return;
2650 			}
2651 
2652 			DELAY(1000);
2653 		}
2654 
2655 		/*
2656 		 * send the result of DC calibration to uCode.
2657 		 */
2658 		if (res_p->dc_res != NULL) {
2659 			rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2660 			    res_p->dc_res,
2661 			    res_p->dc_res_len, 1);
2662 			if (rv != IWH_SUCCESS) {
2663 				cmn_err(CE_WARN, "iwh_ucode_alive(): "
2664 				    "failed to send DC"
2665 				    "calibration command.\n");
2666 				mutex_exit(&sc->sc_glock);
2667 				return;
2668 			}
2669 
2670 			DELAY(1000);
2671 		}
2672 
2673 		/*
2674 		 * send the result of BASE BAND calibration to uCode.
2675 		 */
2676 		if (res_p->base_band_res != NULL) {
2677 			rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2678 			    res_p->base_band_res,
2679 			    res_p->base_band_res_len, 1);
2680 			if (rv != IWH_SUCCESS) {
2681 				cmn_err(CE_WARN, "iwh_ucode_alive(): "
2682 				    "failed to send BASE BAND"
2683 				    "calibration command.\n");
2684 				mutex_exit(&sc->sc_glock);
2685 				return;
2686 			}
2687 
2688 			DELAY(1000);
2689 		}
2690 
2691 		atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2692 		cv_signal(&sc->sc_ucode_cv);
2693 
2694 		mutex_exit(&sc->sc_glock);
2695 	}
2696 
2697 }
2698 
2699 /*
2700  * deal with receiving frames, command response
2701  * and all notifications from ucode.
2702  */
2703 /* ARGSUSED */
2704 static uint_t
iwh_rx_softintr(caddr_t arg,caddr_t unused)2705 iwh_rx_softintr(caddr_t arg, caddr_t unused)
2706 {
2707 	iwh_sc_t *sc;
2708 	ieee80211com_t *ic;
2709 	iwh_rx_desc_t *desc;
2710 	iwh_rx_data_t *data;
2711 	uint32_t index;
2712 
2713 	if (NULL == arg) {
2714 		return (DDI_INTR_UNCLAIMED);
2715 	}
2716 	sc = (iwh_sc_t *)arg;
2717 	ic = &sc->sc_ic;
2718 
2719 	/*
2720 	 * firmware has moved the index of the rx queue, driver get it,
2721 	 * and deal with it.
2722 	 */
2723 	index = (sc->sc_shared->val0) & 0xfff;
2724 
2725 	while (sc->sc_rxq.cur != index) {
2726 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2727 		desc = (iwh_rx_desc_t *)data->dma_data.mem_va;
2728 
2729 		IWH_DBG((IWH_DEBUG_INTR, "iwh_rx_softintr(): "
2730 		    "rx notification index = %d"
2731 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2732 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2733 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2734 
2735 		/*
2736 		 * a command other than a tx need to be replied
2737 		 */
2738 		if (!(desc->hdr.qid & 0x80) &&
2739 		    (desc->hdr.type != REPLY_SCAN_CMD) &&
2740 		    (desc->hdr.type != REPLY_TX)) {
2741 			iwh_cmd_intr(sc, desc);
2742 		}
2743 
2744 		switch (desc->hdr.type) {
2745 		case REPLY_RX_PHY_CMD:
2746 			iwh_rx_phy_intr(sc, desc);
2747 			break;
2748 
2749 		case REPLY_RX_MPDU_CMD:
2750 			iwh_rx_mpdu_intr(sc, desc);
2751 			break;
2752 
2753 		case REPLY_TX:
2754 			iwh_tx_intr(sc, desc);
2755 			break;
2756 
2757 		case REPLY_ALIVE:
2758 			iwh_ucode_alive(sc, desc);
2759 			break;
2760 
2761 		case CARD_STATE_NOTIFICATION:
2762 		{
2763 			uint32_t *status = (uint32_t *)(desc + 1);
2764 
2765 			IWH_DBG((IWH_DEBUG_RADIO, "iwh_rx_softintr(): "
2766 			    "state changed to %x\n",
2767 			    LE_32(*status)));
2768 
2769 			if (LE_32(*status) & 1) {
2770 				/*
2771 				 * the radio button has to be pushed(OFF). It
2772 				 * is considered as a hw error, the
2773 				 * iwh_thread() tries to recover it after the
2774 				 * button is pushed again(ON)
2775 				 */
2776 				cmn_err(CE_NOTE, "iwh_rx_softintr(): "
2777 				    "radio transmitter is off\n");
2778 				sc->sc_ostate = sc->sc_ic.ic_state;
2779 				ieee80211_new_state(&sc->sc_ic,
2780 				    IEEE80211_S_INIT, -1);
2781 				atomic_or_32(&sc->sc_flags,
2782 				    (IWH_F_HW_ERR_RECOVER | IWH_F_RADIO_OFF));
2783 			}
2784 
2785 			break;
2786 		}
2787 
2788 		case SCAN_START_NOTIFICATION:
2789 		{
2790 			iwh_start_scan_t *scan =
2791 			    (iwh_start_scan_t *)(desc + 1);
2792 
2793 			IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2794 			    "scanning channel %d status %x\n",
2795 			    scan->chan, LE_32(scan->status)));
2796 
2797 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2798 			break;
2799 		}
2800 
2801 		case SCAN_COMPLETE_NOTIFICATION:
2802 		{
2803 #ifdef	DEBUG
2804 			iwh_stop_scan_t *scan =
2805 			    (iwh_stop_scan_t *)(desc + 1);
2806 
2807 			IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2808 			    "completed channel %d (burst of %d) status %02x\n",
2809 			    scan->chan, scan->nchan, scan->status));
2810 #endif
2811 
2812 			sc->sc_scan_pending++;
2813 			break;
2814 		}
2815 
2816 		case STATISTICS_NOTIFICATION:
2817 		{
2818 			/*
2819 			 * handle statistics notification
2820 			 */
2821 			break;
2822 		}
2823 
2824 		case CALIBRATION_RES_NOTIFICATION:
2825 			iwh_save_calib_result(sc, desc);
2826 			break;
2827 
2828 		case CALIBRATION_COMPLETE_NOTIFICATION:
2829 			mutex_enter(&sc->sc_glock);
2830 			atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2831 			cv_signal(&sc->sc_ucode_cv);
2832 			mutex_exit(&sc->sc_glock);
2833 			break;
2834 
2835 		case MISSED_BEACONS_NOTIFICATION:
2836 			/* handle beacon miss by software mechanism */
2837 			break;
2838 		}
2839 
2840 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2841 	}
2842 
2843 	/*
2844 	 * driver dealt with what received in rx queue and tell the information
2845 	 * to the firmware.
2846 	 */
2847 	index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2848 	IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2849 
2850 	/*
2851 	 * re-enable interrupts
2852 	 */
2853 	IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2854 
2855 	return (DDI_INTR_CLAIMED);
2856 }
2857 
2858 /*
2859  * the handle of interrupt
2860  */
2861 /* ARGSUSED */
2862 static uint_t
iwh_intr(caddr_t arg,caddr_t unused)2863 iwh_intr(caddr_t arg, caddr_t unused)
2864 {
2865 	iwh_sc_t *sc;
2866 	uint32_t r, rfh;
2867 
2868 	if (NULL == arg) {
2869 		return (DDI_INTR_UNCLAIMED);
2870 	}
2871 	sc = (iwh_sc_t *)arg;
2872 
2873 	r = IWH_READ(sc, CSR_INT);
2874 	if (0 == r || 0xffffffff == r) {
2875 		return (DDI_INTR_UNCLAIMED);
2876 	}
2877 
2878 	IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2879 	    "interrupt reg %x\n", r));
2880 
2881 	rfh = IWH_READ(sc, CSR_FH_INT_STATUS);
2882 
2883 	IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2884 	    "FH interrupt reg %x\n", rfh));
2885 
2886 	/*
2887 	 * disable interrupts
2888 	 */
2889 	IWH_WRITE(sc, CSR_INT_MASK, 0);
2890 
2891 	/*
2892 	 * ack interrupts
2893 	 */
2894 	IWH_WRITE(sc, CSR_INT, r);
2895 	IWH_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2896 
2897 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2898 		IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2899 		    "fatal firmware error\n"));
2900 		iwh_stop(sc);
2901 		sc->sc_ostate = sc->sc_ic.ic_state;
2902 
2903 		/*
2904 		 * notify upper layer
2905 		 */
2906 		if (!IWH_CHK_FAST_RECOVER(sc)) {
2907 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2908 		}
2909 
2910 		atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
2911 		return (DDI_INTR_CLAIMED);
2912 	}
2913 
2914 	if (r & BIT_INT_RF_KILL) {
2915 		uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2916 		if (tmp & (1 << 27)) {
2917 			cmn_err(CE_NOTE, "RF switch: radio on\n");
2918 		}
2919 	}
2920 
2921 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2922 	    (rfh & FH_INT_RX_MASK)) {
2923 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2924 		return (DDI_INTR_CLAIMED);
2925 	}
2926 
2927 	if (r & BIT_INT_FH_TX) {
2928 		mutex_enter(&sc->sc_glock);
2929 		atomic_or_32(&sc->sc_flags, IWH_F_PUT_SEG);
2930 		cv_signal(&sc->sc_put_seg_cv);
2931 		mutex_exit(&sc->sc_glock);
2932 	}
2933 
2934 #ifdef	DEBUG
2935 	if (r & BIT_INT_ALIVE)	{
2936 		IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2937 		    "firmware initialized.\n"));
2938 	}
2939 #endif
2940 
2941 	/*
2942 	 * re-enable interrupts
2943 	 */
2944 	IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2945 
2946 	return (DDI_INTR_CLAIMED);
2947 }
2948 
2949 static uint8_t
iwh_rate_to_plcp(int rate)2950 iwh_rate_to_plcp(int rate)
2951 {
2952 	uint8_t ret;
2953 
2954 	switch (rate) {
2955 	/*
2956 	 * CCK rates
2957 	 */
2958 	case 2:
2959 		ret = 0xa;
2960 		break;
2961 
2962 	case 4:
2963 		ret = 0x14;
2964 		break;
2965 
2966 	case 11:
2967 		ret = 0x37;
2968 		break;
2969 
2970 	case 22:
2971 		ret = 0x6e;
2972 		break;
2973 
2974 	/*
2975 	 * OFDM rates
2976 	 */
2977 	case 12:
2978 		ret = 0xd;
2979 		break;
2980 
2981 	case 18:
2982 		ret = 0xf;
2983 		break;
2984 
2985 	case 24:
2986 		ret = 0x5;
2987 		break;
2988 
2989 	case 36:
2990 		ret = 0x7;
2991 		break;
2992 
2993 	case 48:
2994 		ret = 0x9;
2995 		break;
2996 
2997 	case 72:
2998 		ret = 0xb;
2999 		break;
3000 
3001 	case 96:
3002 		ret = 0x1;
3003 		break;
3004 
3005 	case 108:
3006 		ret = 0x3;
3007 		break;
3008 
3009 	default:
3010 		ret = 0;
3011 		break;
3012 	}
3013 
3014 	return (ret);
3015 }
3016 
3017 /*
3018  * invoked by GLD send frames
3019  */
3020 static mblk_t *
iwh_m_tx(void * arg,mblk_t * mp)3021 iwh_m_tx(void *arg, mblk_t *mp)
3022 {
3023 	iwh_sc_t *sc;
3024 	ieee80211com_t *ic;
3025 	mblk_t *next;
3026 
3027 	if (NULL == arg) {
3028 		return (NULL);
3029 	}
3030 	sc = (iwh_sc_t *)arg;
3031 	ic = &sc->sc_ic;
3032 
3033 	if (sc->sc_flags & IWH_F_SUSPEND) {
3034 		freemsgchain(mp);
3035 		return (NULL);
3036 	}
3037 
3038 	if (ic->ic_state != IEEE80211_S_RUN) {
3039 		freemsgchain(mp);
3040 		return (NULL);
3041 	}
3042 
3043 	if ((sc->sc_flags & IWH_F_HW_ERR_RECOVER) &&
3044 	    IWH_CHK_FAST_RECOVER(sc)) {
3045 		IWH_DBG((IWH_DEBUG_FW, "iwh_m_tx(): "
3046 		    "hold queue\n"));
3047 		return (mp);
3048 	}
3049 
3050 	while (mp != NULL) {
3051 		next = mp->b_next;
3052 		mp->b_next = NULL;
3053 		if (iwh_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
3054 			mp->b_next = next;
3055 			break;
3056 		}
3057 		mp = next;
3058 	}
3059 
3060 	return (mp);
3061 }
3062 
3063 /*
3064  * send frames
3065  */
3066 static int
iwh_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)3067 iwh_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3068 {
3069 	iwh_sc_t *sc;
3070 	iwh_tx_ring_t *ring;
3071 	iwh_tx_desc_t *desc;
3072 	iwh_tx_data_t *data;
3073 	iwh_tx_data_t *desc_data;
3074 	iwh_cmd_t *cmd;
3075 	iwh_tx_cmd_t *tx;
3076 	ieee80211_node_t *in;
3077 	struct ieee80211_frame *wh, *mp_wh;
3078 	struct ieee80211_key *k = NULL;
3079 	mblk_t *m, *m0;
3080 	int hdrlen, len, len0, mblen, off, err = IWH_SUCCESS;
3081 	uint16_t masks = 0;
3082 	uint32_t rate, s_id = 0;
3083 	int txq_id = NON_QOS_TXQ;
3084 	struct ieee80211_qosframe *qwh = NULL;
3085 	int tid = WME_TID_INVALID;
3086 
3087 	if (ic == NULL) {
3088 		return (IWH_FAIL);
3089 	}
3090 	rate = 0;
3091 	sc = (iwh_sc_t *)ic;
3092 
3093 	if (sc->sc_flags & IWH_F_SUSPEND) {
3094 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3095 		    IEEE80211_FC0_TYPE_DATA) {
3096 			freemsg(mp);
3097 		}
3098 		err = IWH_FAIL;
3099 		goto exit;
3100 	}
3101 
3102 	if ((NULL == mp) || (MBLKL(mp) <= 0)) {
3103 		return (IWH_FAIL);
3104 	}
3105 
3106 	mp_wh = (struct ieee80211_frame *)mp->b_rptr;
3107 
3108 	/*
3109 	 * Determine send which AP or station in IBSS
3110 	 */
3111 	in = ieee80211_find_txnode(ic, mp_wh->i_addr1);
3112 	if (NULL == in) {
3113 		cmn_err(CE_WARN, "iwh_send(): "
3114 		    "failed to find tx node\n");
3115 		freemsg(mp);
3116 		sc->sc_tx_err++;
3117 		err = IWH_SUCCESS;
3118 		goto exit;
3119 	}
3120 
3121 	/*
3122 	 * Determine TX queue according to traffic ID in frame
3123 	 * if working in QoS mode.
3124 	 */
3125 	if (in->in_flags & IEEE80211_NODE_QOS) {
3126 
3127 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
3128 		    IEEE80211_FC0_TYPE_DATA) {
3129 
3130 			if (mp_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3131 				qwh = (struct ieee80211_qosframe *)mp_wh;
3132 
3133 				tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3134 				txq_id = iwh_wme_tid_to_txq(tid);
3135 
3136 				if (txq_id < TXQ_FOR_AC_MIN ||
3137 				    (txq_id > TXQ_FOR_AC_MAX)) {
3138 					freemsg(mp);
3139 					sc->sc_tx_err++;
3140 					err = IWH_SUCCESS;
3141 					goto exit;
3142 				}
3143 
3144 			} else {
3145 				txq_id = NON_QOS_TXQ;
3146 			}
3147 
3148 		} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3149 		    IEEE80211_FC0_TYPE_MGT) {
3150 			txq_id = QOS_TXQ_FOR_MGT;
3151 		} else {
3152 			txq_id = NON_QOS_TXQ;
3153 		}
3154 
3155 	} else {
3156 		txq_id = NON_QOS_TXQ;
3157 	}
3158 
3159 	mutex_enter(&sc->sc_tx_lock);
3160 	ring = &sc->sc_txq[txq_id];
3161 	data = &ring->data[ring->cur];
3162 	cmd = data->cmd;
3163 	bzero(cmd, sizeof (*cmd));
3164 
3165 	ring->cur = (ring->cur + 1) % ring->count;
3166 
3167 	/*
3168 	 * Need reschedule TX if TX buffer is full.
3169 	 */
3170 	if (ring->queued > ring->count - IWH_MAX_WIN_SIZE) {
3171 		IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3172 		"no txbuf\n"));
3173 
3174 		sc->sc_need_reschedule = 1;
3175 		mutex_exit(&sc->sc_tx_lock);
3176 
3177 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3178 		    IEEE80211_FC0_TYPE_DATA) {
3179 			freemsg(mp);
3180 		}
3181 		sc->sc_tx_nobuf++;
3182 		err = IWH_FAIL;
3183 		goto exit;
3184 	}
3185 
3186 	ring->queued++;
3187 
3188 	mutex_exit(&sc->sc_tx_lock);
3189 
3190 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3191 
3192 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3193 	if (NULL == m) { /* can not alloc buf, drop this package */
3194 		cmn_err(CE_WARN, "iwh_send(): "
3195 		    "failed to allocate msgbuf\n");
3196 		freemsg(mp);
3197 
3198 		mutex_enter(&sc->sc_tx_lock);
3199 		ring->queued--;
3200 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3201 			sc->sc_need_reschedule = 0;
3202 			mutex_exit(&sc->sc_tx_lock);
3203 			mac_tx_update(ic->ic_mach);
3204 			mutex_enter(&sc->sc_tx_lock);
3205 		}
3206 		mutex_exit(&sc->sc_tx_lock);
3207 
3208 		err = IWH_SUCCESS;
3209 		goto exit;
3210 	}
3211 
3212 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3213 		mblen = MBLKL(m0);
3214 		bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3215 		off += mblen;
3216 	}
3217 
3218 	m->b_wptr += off;
3219 
3220 	wh = (struct ieee80211_frame *)m->b_rptr;
3221 
3222 	/*
3223 	 * Net80211 module encapsulate outbound data frames.
3224 	 * Add some feilds of 80211 frame.
3225 	 */
3226 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3227 	    IEEE80211_FC0_TYPE_DATA) {
3228 		(void) ieee80211_encap(ic, m, in);
3229 	}
3230 
3231 	freemsg(mp);
3232 
3233 	cmd->hdr.type = REPLY_TX;
3234 	cmd->hdr.flags = 0;
3235 	cmd->hdr.qid = ring->qid;
3236 
3237 	tx = (iwh_tx_cmd_t *)cmd->data;
3238 	tx->tx_flags = 0;
3239 
3240 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3241 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3242 	} else {
3243 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3244 	}
3245 
3246 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3247 		k = ieee80211_crypto_encap(ic, m);
3248 		if (NULL == k) {
3249 			freemsg(m);
3250 			sc->sc_tx_err++;
3251 
3252 			mutex_enter(&sc->sc_tx_lock);
3253 			ring->queued--;
3254 			if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3255 				sc->sc_need_reschedule = 0;
3256 				mutex_exit(&sc->sc_tx_lock);
3257 				mac_tx_update(ic->ic_mach);
3258 				mutex_enter(&sc->sc_tx_lock);
3259 			}
3260 			mutex_exit(&sc->sc_tx_lock);
3261 
3262 			err = IWH_SUCCESS;
3263 			goto exit;
3264 		}
3265 
3266 		/*
3267 		 * packet header may have moved, reset our local pointer
3268 		 */
3269 		wh = (struct ieee80211_frame *)m->b_rptr;
3270 	}
3271 
3272 	len = msgdsize(m);
3273 
3274 #ifdef DEBUG
3275 	if (iwh_dbg_flags & IWH_DEBUG_TX) {
3276 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3277 	}
3278 #endif
3279 
3280 	tx->rts_retry_limit = IWH_TX_RTS_RETRY_LIMIT;
3281 	tx->data_retry_limit = IWH_TX_DATA_RETRY_LIMIT;
3282 
3283 	/*
3284 	 * specific TX parameters for management frames
3285 	 */
3286 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3287 	    IEEE80211_FC0_TYPE_MGT) {
3288 		/*
3289 		 * mgmt frames are sent at 1M
3290 		 */
3291 		if ((in->in_rates.ir_rates[0] &
3292 		    IEEE80211_RATE_VAL) != 0) {
3293 			rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3294 		} else {
3295 			rate = 2;
3296 		}
3297 
3298 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3299 
3300 		/*
3301 		 * tell h/w to set timestamp in probe responses
3302 		 */
3303 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3304 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3305 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3306 
3307 			tx->data_retry_limit = 3;
3308 			if (tx->data_retry_limit < tx->rts_retry_limit) {
3309 				tx->rts_retry_limit = tx->data_retry_limit;
3310 			}
3311 		}
3312 
3313 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3314 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3315 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3316 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3317 			tx->timeout.pm_frame_timeout = LE_16(3);
3318 		} else {
3319 			tx->timeout.pm_frame_timeout = LE_16(2);
3320 		}
3321 
3322 	} else {
3323 		/*
3324 		 * do it here for the software way rate scaling.
3325 		 * later for rate scaling in hardware.
3326 		 *
3327 		 * now the txrate is determined in tx cmd flags, set to the
3328 		 * max value 54M for 11g and 11M for 11b and 96M for 11n
3329 		 * originally.
3330 		 */
3331 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3332 			rate = ic->ic_fixed_rate;
3333 		} else {
3334 			if ((in->in_flags & IEEE80211_NODE_HT) &&
3335 			    (sc->sc_ht_conf.ht_support)) {
3336 				iwh_amrr_t *amrr = (iwh_amrr_t *)in;
3337 				rate = amrr->ht_mcs_idx;
3338 			} else {
3339 				if ((in->in_rates.ir_rates[in->in_txrate] &
3340 				    IEEE80211_RATE_VAL) != 0) {
3341 					rate = in->in_rates.
3342 					    ir_rates[in->in_txrate] &
3343 					    IEEE80211_RATE_VAL;
3344 				}
3345 			}
3346 		}
3347 
3348 		if (tid != WME_TID_INVALID) {
3349 			tx->tid_tspec = (uint8_t)tid;
3350 			tx->tx_flags &= LE_32(~TX_CMD_FLG_SEQ_CTL_MSK);
3351 		} else {
3352 			tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3353 		}
3354 
3355 		tx->timeout.pm_frame_timeout = 0;
3356 	}
3357 
3358 	IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3359 	    "tx rate[%d of %d] = %x",
3360 	    in->in_txrate, in->in_rates.ir_nrates, rate));
3361 
3362 	len0 = roundup(4 + sizeof (iwh_tx_cmd_t) + hdrlen, 4);
3363 	if (len0 != (4 + sizeof (iwh_tx_cmd_t) + hdrlen)) {
3364 		tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3365 	}
3366 
3367 	/*
3368 	 * retrieve destination node's id
3369 	 */
3370 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3371 		tx->sta_id = IWH_BROADCAST_ID;
3372 	} else {
3373 		tx->sta_id = IWH_AP_ID;
3374 	}
3375 
3376 	if ((in->in_flags & IEEE80211_NODE_HT) &&
3377 	    (sc->sc_ht_conf.ht_support) &&
3378 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3379 	    IEEE80211_FC0_TYPE_DATA)) {
3380 		if (rate >= HT_2CHAIN_RATE_MIN_IDX) {
3381 			rate |= LE_32(RATE_MCS_ANT_AB_MSK);
3382 		} else {
3383 			rate |= LE_32(RATE_MCS_ANT_B_MSK);
3384 		}
3385 
3386 		rate |= LE_32((1 << RATE_MCS_HT_POS));
3387 
3388 		tx->rate.r.rate_n_flags = rate;
3389 
3390 	} else {
3391 		if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3392 			masks |= RATE_MCS_CCK_MSK;
3393 		}
3394 
3395 		masks |= RATE_MCS_ANT_B_MSK;
3396 		tx->rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(rate) | masks);
3397 	}
3398 
3399 	IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3400 	    "tx flag = %x",
3401 	    tx->tx_flags));
3402 
3403 	tx->stop_time.life_time  = LE_32(0xffffffff);
3404 
3405 	tx->len = LE_16(len);
3406 
3407 	tx->dram_lsb_ptr =
3408 	    LE_32(data->paddr_cmd + 4 + offsetof(iwh_tx_cmd_t, scratch));
3409 	tx->dram_msb_ptr = 0;
3410 	tx->driver_txop = 0;
3411 	tx->next_frame_len = 0;
3412 
3413 	bcopy(m->b_rptr, tx + 1, hdrlen);
3414 	m->b_rptr += hdrlen;
3415 	bcopy(m->b_rptr, data->dma_data.mem_va, (len - hdrlen));
3416 
3417 	IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3418 	    "sending data: qid=%d idx=%d len=%d",
3419 	    ring->qid, ring->cur, len));
3420 
3421 	/*
3422 	 * first segment includes the tx cmd plus the 802.11 header,
3423 	 * the second includes the remaining of the 802.11 frame.
3424 	 */
3425 
3426 	mutex_enter(&sc->sc_tx_lock);
3427 	cmd->hdr.idx = ring->desc_cur;
3428 	desc_data = &ring->data[ring->desc_cur];
3429 	desc = desc_data->desc;
3430 	bzero(desc, sizeof (*desc));
3431 	desc->val0 = 2 << 24;
3432 	desc->pa[0].tb1_addr = data->paddr_cmd;
3433 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3434 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3435 	desc->pa[0].val2 =
3436 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3437 	    ((len - hdrlen) << 20);
3438 	IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3439 	    "phy addr1 = 0x%x phy addr2 = 0x%x "
3440 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3441 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
3442 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3443 
3444 	/*
3445 	 * kick ring
3446 	 */
3447 	s_id = tx->sta_id;
3448 
3449 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3450 	    tfd_offset[ring->desc_cur].val =
3451 	    (8 + len) | (s_id << 12);
3452 	if (ring->desc_cur < IWH_MAX_WIN_SIZE) {
3453 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3454 		    tfd_offset[IWH_QUEUE_SIZE + ring->desc_cur].val =
3455 		    (8 + len) | (s_id << 12);
3456 	}
3457 
3458 	IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3459 	IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3460 
3461 	ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3462 	IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3463 
3464 	mutex_exit(&sc->sc_tx_lock);
3465 	freemsg(m);
3466 
3467 	/*
3468 	 * release node reference
3469 	 */
3470 	ieee80211_free_node(in);
3471 
3472 	ic->ic_stats.is_tx_bytes += len;
3473 	ic->ic_stats.is_tx_frags++;
3474 
3475 	mutex_enter(&sc->sc_mt_lock);
3476 	if (0 == sc->sc_tx_timer) {
3477 		sc->sc_tx_timer = 4;
3478 	}
3479 	mutex_exit(&sc->sc_mt_lock);
3480 
3481 exit:
3482 	return (err);
3483 }
3484 
3485 /*
3486  * invoked by GLD to deal with IOCTL affaires
3487  */
3488 static void
iwh_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3489 iwh_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3490 {
3491 	iwh_sc_t *sc;
3492 	ieee80211com_t *ic;
3493 	int err = EINVAL;
3494 
3495 	if (NULL == arg) {
3496 		return;
3497 	}
3498 	sc = (iwh_sc_t *)arg;
3499 	ic = &sc->sc_ic;
3500 
3501 	err = ieee80211_ioctl(ic, wq, mp);
3502 	if (ENETRESET == err) {
3503 		/*
3504 		 * This is special for the hidden AP connection.
3505 		 * In any case, we should make sure only one 'scan'
3506 		 * in the driver for a 'connect' CLI command. So
3507 		 * when connecting to a hidden AP, the scan is just
3508 		 * sent out to the air when we know the desired
3509 		 * essid of the AP we want to connect.
3510 		 */
3511 		if (ic->ic_des_esslen) {
3512 			if (sc->sc_flags & IWH_F_RUNNING) {
3513 				iwh_m_stop(sc);
3514 				(void) iwh_m_start(sc);
3515 				(void) ieee80211_new_state(ic,
3516 				    IEEE80211_S_SCAN, -1);
3517 			}
3518 		}
3519 	}
3520 }
3521 
3522 /*
3523  * Call back functions for get/set proporty
3524  */
3525 static int
iwh_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3526 iwh_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3527     uint_t wldp_length, void *wldp_buf)
3528 {
3529 	iwh_sc_t *sc;
3530 	int err = EINVAL;
3531 
3532 	if (NULL == arg) {
3533 		return (EINVAL);
3534 	}
3535 	sc = (iwh_sc_t *)arg;
3536 
3537 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3538 	    wldp_length, wldp_buf);
3539 
3540 	return (err);
3541 }
3542 
3543 static void
iwh_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t mph)3544 iwh_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3545     mac_prop_info_handle_t mph)
3546 {
3547 	iwh_sc_t	*sc = (iwh_sc_t *)arg;
3548 
3549 	ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, mph);
3550 }
3551 
3552 static int
iwh_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3553 iwh_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3554     uint_t wldp_length, const void *wldp_buf)
3555 {
3556 	iwh_sc_t *sc;
3557 	ieee80211com_t *ic;
3558 	int err = EINVAL;
3559 
3560 	if (NULL == arg) {
3561 		return (EINVAL);
3562 	}
3563 	sc = (iwh_sc_t *)arg;
3564 	ic = &sc->sc_ic;
3565 
3566 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3567 	    wldp_buf);
3568 
3569 	if (err == ENETRESET) {
3570 		if (ic->ic_des_esslen) {
3571 			if (sc->sc_flags & IWH_F_RUNNING) {
3572 				iwh_m_stop(sc);
3573 				(void) iwh_m_start(sc);
3574 				(void) ieee80211_new_state(ic,
3575 				    IEEE80211_S_SCAN, -1);
3576 			}
3577 		}
3578 		err = 0;
3579 	}
3580 	return (err);
3581 }
3582 
3583 /*
3584  * invoked by GLD supply statistics NIC and driver
3585  */
3586 static int
iwh_m_stat(void * arg,uint_t stat,uint64_t * val)3587 iwh_m_stat(void *arg, uint_t stat, uint64_t *val)
3588 {
3589 	iwh_sc_t *sc;
3590 	ieee80211com_t *ic;
3591 	ieee80211_node_t *in;
3592 
3593 	if (NULL == arg) {
3594 		return (EINVAL);
3595 	}
3596 	sc = (iwh_sc_t *)arg;
3597 	ic = &sc->sc_ic;
3598 
3599 	mutex_enter(&sc->sc_glock);
3600 
3601 	switch (stat) {
3602 	case MAC_STAT_IFSPEED:
3603 		in = ic->ic_bss;
3604 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3605 		    IEEE80211_RATE(in->in_txrate) :
3606 		    ic->ic_fixed_rate) / 2 * 1000000;
3607 		break;
3608 
3609 	case MAC_STAT_NOXMTBUF:
3610 		*val = sc->sc_tx_nobuf;
3611 		break;
3612 
3613 	case MAC_STAT_NORCVBUF:
3614 		*val = sc->sc_rx_nobuf;
3615 		break;
3616 
3617 	case MAC_STAT_IERRORS:
3618 		*val = sc->sc_rx_err;
3619 		break;
3620 
3621 	case MAC_STAT_RBYTES:
3622 		*val = ic->ic_stats.is_rx_bytes;
3623 		break;
3624 
3625 	case MAC_STAT_IPACKETS:
3626 		*val = ic->ic_stats.is_rx_frags;
3627 		break;
3628 
3629 	case MAC_STAT_OBYTES:
3630 		*val = ic->ic_stats.is_tx_bytes;
3631 		break;
3632 
3633 	case MAC_STAT_OPACKETS:
3634 		*val = ic->ic_stats.is_tx_frags;
3635 		break;
3636 
3637 	case MAC_STAT_OERRORS:
3638 	case WIFI_STAT_TX_FAILED:
3639 		*val = sc->sc_tx_err;
3640 		break;
3641 
3642 	case WIFI_STAT_TX_RETRANS:
3643 		*val = sc->sc_tx_retries;
3644 		break;
3645 
3646 	case WIFI_STAT_FCS_ERRORS:
3647 	case WIFI_STAT_WEP_ERRORS:
3648 	case WIFI_STAT_TX_FRAGS:
3649 	case WIFI_STAT_MCAST_TX:
3650 	case WIFI_STAT_RTS_SUCCESS:
3651 	case WIFI_STAT_RTS_FAILURE:
3652 	case WIFI_STAT_ACK_FAILURE:
3653 	case WIFI_STAT_RX_FRAGS:
3654 	case WIFI_STAT_MCAST_RX:
3655 	case WIFI_STAT_RX_DUPS:
3656 		mutex_exit(&sc->sc_glock);
3657 		return (ieee80211_stat(ic, stat, val));
3658 
3659 	default:
3660 		mutex_exit(&sc->sc_glock);
3661 		return (ENOTSUP);
3662 	}
3663 
3664 	mutex_exit(&sc->sc_glock);
3665 
3666 	return (IWH_SUCCESS);
3667 }
3668 
3669 /*
3670  * invoked by GLD to start or open NIC
3671  */
3672 static int
iwh_m_start(void * arg)3673 iwh_m_start(void *arg)
3674 {
3675 	iwh_sc_t *sc;
3676 	ieee80211com_t *ic;
3677 	int err = IWH_FAIL;
3678 
3679 	if (NULL == arg) {
3680 		return (EINVAL);
3681 	}
3682 	sc = (iwh_sc_t *)arg;
3683 	ic = &sc->sc_ic;
3684 
3685 	err = iwh_init(sc);
3686 	if (err != IWH_SUCCESS) {
3687 		/*
3688 		 * The hw init err(eg. RF is OFF). Return Success to make
3689 		 * the 'plumb' succeed. The iwh_thread() tries to re-init
3690 		 * background.
3691 		 */
3692 		atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
3693 		return (IWH_SUCCESS);
3694 	}
3695 
3696 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3697 
3698 	atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3699 
3700 	return (IWH_SUCCESS);
3701 }
3702 
3703 /*
3704  * invoked by GLD to stop or down NIC
3705  */
3706 static void
iwh_m_stop(void * arg)3707 iwh_m_stop(void *arg)
3708 {
3709 	iwh_sc_t *sc;
3710 	ieee80211com_t *ic;
3711 
3712 	if (NULL == arg) {
3713 		return;
3714 	}
3715 	sc = (iwh_sc_t *)arg;
3716 	ic = &sc->sc_ic;
3717 
3718 	iwh_stop(sc);
3719 
3720 	/*
3721 	 * release buffer for calibration
3722 	 */
3723 	iwh_release_calib_buffer(sc);
3724 
3725 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3726 
3727 	atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
3728 	atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
3729 
3730 	atomic_and_32(&sc->sc_flags, ~IWH_F_RUNNING);
3731 	atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
3732 }
3733 
3734 /*
3735  * invoked by GLD to configure NIC
3736  */
3737 static int
iwh_m_unicst(void * arg,const uint8_t * macaddr)3738 iwh_m_unicst(void *arg, const uint8_t *macaddr)
3739 {
3740 	iwh_sc_t *sc;
3741 	ieee80211com_t *ic;
3742 	int err = IWH_SUCCESS;
3743 
3744 	if (NULL == arg) {
3745 		return (EINVAL);
3746 	}
3747 	sc = (iwh_sc_t *)arg;
3748 	ic = &sc->sc_ic;
3749 
3750 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3751 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3752 		mutex_enter(&sc->sc_glock);
3753 		err = iwh_config(sc);
3754 		mutex_exit(&sc->sc_glock);
3755 		if (err != IWH_SUCCESS) {
3756 			cmn_err(CE_WARN, "iwh_m_unicst(): "
3757 			    "failed to configure device\n");
3758 			goto fail;
3759 		}
3760 	}
3761 
3762 fail:
3763 	return (err);
3764 }
3765 
3766 /* ARGSUSED */
3767 static int
iwh_m_multicst(void * arg,boolean_t add,const uint8_t * m)3768 iwh_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3769 {
3770 	return (IWH_SUCCESS);
3771 }
3772 
3773 /* ARGSUSED */
3774 static int
iwh_m_promisc(void * arg,boolean_t on)3775 iwh_m_promisc(void *arg, boolean_t on)
3776 {
3777 	return (IWH_SUCCESS);
3778 }
3779 
3780 /*
3781  * kernel thread to deal with exceptional situation
3782  */
3783 static void
iwh_thread(iwh_sc_t * sc)3784 iwh_thread(iwh_sc_t *sc)
3785 {
3786 	ieee80211com_t *ic = &sc->sc_ic;
3787 	clock_t clk;
3788 	int err, n = 0, timeout = 0;
3789 	uint32_t tmp;
3790 #ifdef	DEBUG
3791 	int times = 0;
3792 #endif
3793 
3794 	while (sc->sc_mf_thread_switch) {
3795 		tmp = IWH_READ(sc, CSR_GP_CNTRL);
3796 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3797 			atomic_and_32(&sc->sc_flags, ~IWH_F_RADIO_OFF);
3798 		} else {
3799 			atomic_or_32(&sc->sc_flags, IWH_F_RADIO_OFF);
3800 		}
3801 
3802 		/*
3803 		 * If  in SUSPEND or the RF is OFF, do nothing.
3804 		 */
3805 		if (sc->sc_flags & IWH_F_RADIO_OFF) {
3806 			delay(drv_usectohz(100000));
3807 			continue;
3808 		}
3809 
3810 		/*
3811 		 * recovery fatal error
3812 		 */
3813 		if (ic->ic_mach &&
3814 		    (sc->sc_flags & IWH_F_HW_ERR_RECOVER)) {
3815 
3816 			IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3817 			    "try to recover fatal hw error: %d\n", times++));
3818 
3819 			iwh_stop(sc);
3820 
3821 			if (IWH_CHK_FAST_RECOVER(sc)) {
3822 				/*
3823 				 * save runtime configuration
3824 				 */
3825 				bcopy(&sc->sc_config, &sc->sc_config_save,
3826 				    sizeof (sc->sc_config));
3827 			} else {
3828 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3829 				delay(drv_usectohz(2000000 + n*500000));
3830 			}
3831 
3832 			err = iwh_init(sc);
3833 			if (err != IWH_SUCCESS) {
3834 				n++;
3835 				if (n < 20) {
3836 					continue;
3837 				}
3838 			}
3839 
3840 			n = 0;
3841 			if (!err) {
3842 				atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3843 			}
3844 
3845 
3846 			if (!IWH_CHK_FAST_RECOVER(sc) ||
3847 			    iwh_fast_recover(sc) != IWH_SUCCESS) {
3848 				atomic_and_32(&sc->sc_flags,
3849 				    ~IWH_F_HW_ERR_RECOVER);
3850 
3851 				delay(drv_usectohz(2000000));
3852 				if (sc->sc_ostate != IEEE80211_S_INIT) {
3853 					ieee80211_new_state(ic,
3854 					    IEEE80211_S_SCAN, 0);
3855 				}
3856 			}
3857 		}
3858 
3859 		if (ic->ic_mach &&
3860 		    (sc->sc_flags & IWH_F_SCANNING) && sc->sc_scan_pending) {
3861 			IWH_DBG((IWH_DEBUG_SCAN, "iwh_thread(): "
3862 			    "wait for probe response\n"));
3863 
3864 			sc->sc_scan_pending--;
3865 			delay(drv_usectohz(200000));
3866 			ieee80211_next_scan(ic);
3867 		}
3868 
3869 		/*
3870 		 * rate ctl
3871 		 */
3872 		if (ic->ic_mach &&
3873 		    (sc->sc_flags & IWH_F_RATE_AUTO_CTL)) {
3874 			clk = ddi_get_lbolt();
3875 			if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3876 				iwh_amrr_timeout(sc);
3877 			}
3878 		}
3879 
3880 		if ((ic->ic_state == IEEE80211_S_RUN) &&
3881 		    (ic->ic_beaconmiss++ > 100)) {	/* 10 seconds */
3882 			cmn_err(CE_WARN, "iwh: beacon missed for 10 seconds\n");
3883 			(void) ieee80211_new_state(ic,
3884 			    IEEE80211_S_INIT, -1);
3885 		}
3886 
3887 		delay(drv_usectohz(100000));
3888 
3889 		mutex_enter(&sc->sc_mt_lock);
3890 		if (sc->sc_tx_timer) {
3891 			timeout++;
3892 			if (10 == timeout) {
3893 				sc->sc_tx_timer--;
3894 				if (0 == sc->sc_tx_timer) {
3895 					atomic_or_32(&sc->sc_flags,
3896 					    IWH_F_HW_ERR_RECOVER);
3897 					sc->sc_ostate = IEEE80211_S_RUN;
3898 					IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3899 					    "try to recover from "
3900 					    "send fail\n"));
3901 				}
3902 				timeout = 0;
3903 			}
3904 		}
3905 		mutex_exit(&sc->sc_mt_lock);
3906 	}
3907 
3908 	mutex_enter(&sc->sc_mt_lock);
3909 	sc->sc_mf_thread = NULL;
3910 	cv_signal(&sc->sc_mt_cv);
3911 	mutex_exit(&sc->sc_mt_lock);
3912 }
3913 
3914 /*
3915  * Send a command to the ucode.
3916  */
3917 static int
iwh_cmd(iwh_sc_t * sc,int code,const void * buf,int size,int async)3918 iwh_cmd(iwh_sc_t *sc, int code, const void *buf, int size, int async)
3919 {
3920 	iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
3921 	iwh_tx_desc_t *desc;
3922 	iwh_cmd_t *cmd;
3923 
3924 	ASSERT(size <= sizeof (cmd->data));
3925 	ASSERT(mutex_owned(&sc->sc_glock));
3926 
3927 	IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd() "
3928 	    "code[%d]", code));
3929 	desc = ring->data[ring->cur].desc;
3930 	cmd = ring->data[ring->cur].cmd;
3931 
3932 	cmd->hdr.type = (uint8_t)code;
3933 	cmd->hdr.flags = 0;
3934 	cmd->hdr.qid = ring->qid;
3935 	cmd->hdr.idx = ring->cur;
3936 	bcopy(buf, cmd->data, size);
3937 	(void) memset(desc, 0, sizeof (*desc));
3938 
3939 	desc->val0 = 1 << 24;
3940 	desc->pa[0].tb1_addr =
3941 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3942 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3943 
3944 	if (async) {
3945 		sc->sc_cmd_accum++;
3946 	}
3947 
3948 	/*
3949 	 * kick cmd ring XXX
3950 	 */
3951 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3952 	    tfd_offset[ring->cur].val = 8;
3953 	if (ring->cur < IWH_MAX_WIN_SIZE) {
3954 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3955 		    tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
3956 	}
3957 	ring->cur = (ring->cur + 1) % ring->count;
3958 	IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3959 
3960 	if (async) {
3961 		return (IWH_SUCCESS);
3962 	} else {
3963 		clock_t clk;
3964 
3965 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3966 		while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3967 			if (cv_timedwait(&sc->sc_cmd_cv,
3968 			    &sc->sc_glock, clk) < 0) {
3969 				break;
3970 			}
3971 		}
3972 
3973 		if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3974 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3975 			return (IWH_SUCCESS);
3976 		} else {
3977 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3978 			return (IWH_FAIL);
3979 		}
3980 	}
3981 }
3982 
3983 /*
3984  * require ucode seting led of NIC
3985  */
3986 static void
iwh_set_led(iwh_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3987 iwh_set_led(iwh_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3988 {
3989 	iwh_led_cmd_t led;
3990 
3991 	led.interval = LE_32(100000);	/* unit: 100ms */
3992 	led.id = id;
3993 	led.off = off;
3994 	led.on = on;
3995 
3996 	(void) iwh_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3997 }
3998 
3999 /*
4000  * necessary setting to NIC before authentication
4001  */
4002 static int
iwh_hw_set_before_auth(iwh_sc_t * sc)4003 iwh_hw_set_before_auth(iwh_sc_t *sc)
4004 {
4005 	ieee80211com_t *ic = &sc->sc_ic;
4006 	ieee80211_node_t *in = ic->ic_bss;
4007 	int err = IWH_FAIL;
4008 
4009 	/*
4010 	 * update adapter's configuration according
4011 	 * the info of target AP
4012 	 */
4013 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
4014 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
4015 
4016 	if (ic->ic_curmode != IEEE80211_MODE_11NG) {
4017 
4018 		sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
4019 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
4020 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
4021 
4022 		if (IEEE80211_MODE_11B == ic->ic_curmode) {
4023 			sc->sc_config.cck_basic_rates  = 0x03;
4024 			sc->sc_config.ofdm_basic_rates = 0;
4025 		} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
4026 		    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
4027 			sc->sc_config.cck_basic_rates  = 0;
4028 			sc->sc_config.ofdm_basic_rates = 0x15;
4029 		} else { /* assume 802.11b/g */
4030 			sc->sc_config.cck_basic_rates  = 0x0f;
4031 			sc->sc_config.ofdm_basic_rates = 0xff;
4032 		}
4033 	}
4034 
4035 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
4036 	    RXON_FLG_SHORT_SLOT_MSK);
4037 
4038 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
4039 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
4040 	} else {
4041 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
4042 	}
4043 
4044 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
4045 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4046 	} else {
4047 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
4048 	}
4049 
4050 	IWH_DBG((IWH_DEBUG_80211, "iwh_hw_set_before_auth(): "
4051 	    "config chan %d flags %x "
4052 	    "filter_flags %x  cck %x ofdm %x"
4053 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
4054 	    LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
4055 	    LE_32(sc->sc_config.filter_flags),
4056 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
4057 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
4058 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
4059 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
4060 
4061 	err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4062 	    sizeof (iwh_rxon_cmd_t), 1);
4063 	if (err != IWH_SUCCESS) {
4064 		cmn_err(CE_WARN, "iwh_hw_set_before_auth(): "
4065 		    "failed to config chan%d\n", sc->sc_config.chan);
4066 		return (err);
4067 	}
4068 
4069 	if ((sc->sc_dev_id != 0x423c) &&
4070 	    (sc->sc_dev_id != 0x423d)) {
4071 		err = iwh_tx_power_table(sc, 1);
4072 		if (err != IWH_SUCCESS) {
4073 			return (err);
4074 		}
4075 	}
4076 
4077 	/*
4078 	 * add default AP node
4079 	 */
4080 	err = iwh_add_ap_sta(sc);
4081 	if (err != IWH_SUCCESS) {
4082 		return (err);
4083 	}
4084 
4085 	if ((sc->sc_dev_id != 0x423c) &&
4086 	    (sc->sc_dev_id != 0x423d)) {
4087 		/*
4088 		 * set up retry rate table for AP node
4089 		 */
4090 		err = iwh_ap_lq(sc);
4091 		if (err != IWH_SUCCESS) {
4092 			return (err);
4093 		}
4094 	}
4095 
4096 	return (err);
4097 }
4098 
4099 /*
4100  * Send a scan request(assembly scan cmd) to the firmware.
4101  */
4102 static int
iwh_scan(iwh_sc_t * sc)4103 iwh_scan(iwh_sc_t *sc)
4104 {
4105 	ieee80211com_t *ic = &sc->sc_ic;
4106 	iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
4107 	iwh_tx_desc_t *desc;
4108 	iwh_tx_data_t *data;
4109 	iwh_cmd_t *cmd;
4110 	iwh_scan_hdr_t *hdr;
4111 	iwh_scan_chan_t chan;
4112 	struct ieee80211_frame *wh;
4113 	ieee80211_node_t *in = ic->ic_bss;
4114 	uint8_t essid[IEEE80211_NWID_LEN+1];
4115 	struct ieee80211_rateset *rs;
4116 	enum ieee80211_phymode mode;
4117 	uint8_t *frm;
4118 	int i, pktlen, nrates;
4119 
4120 	data = &ring->data[ring->cur];
4121 	desc = data->desc;
4122 	cmd = (iwh_cmd_t *)data->dma_data.mem_va;
4123 
4124 	cmd->hdr.type = REPLY_SCAN_CMD;
4125 	cmd->hdr.flags = 0;
4126 	cmd->hdr.qid = ring->qid;
4127 	cmd->hdr.idx = ring->cur | 0x40;
4128 
4129 	hdr = (iwh_scan_hdr_t *)cmd->data;
4130 	(void) memset(hdr, 0, sizeof (iwh_scan_hdr_t));
4131 	hdr->nchan = 1;
4132 	hdr->quiet_time = LE_16(50);
4133 	hdr->quiet_plcp_th = LE_16(1);
4134 
4135 	hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
4136 	hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4137 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
4138 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4139 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4140 
4141 	hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
4142 	hdr->tx_cmd.sta_id = IWH_BROADCAST_ID;
4143 	hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
4144 	hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(2));
4145 	hdr->tx_cmd.rate.r.rate_n_flags |=
4146 	    LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
4147 	hdr->direct_scan[0].len = ic->ic_des_esslen;
4148 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
4149 
4150 	hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4151 	    RXON_FILTER_BCON_AWARE_MSK);
4152 
4153 	if (ic->ic_des_esslen) {
4154 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
4155 		essid[ic->ic_des_esslen] = '\0';
4156 		IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4157 		    "directed scan %s\n", essid));
4158 
4159 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
4160 		    ic->ic_des_esslen);
4161 	} else {
4162 		bzero(hdr->direct_scan[0].ssid,
4163 		    sizeof (hdr->direct_scan[0].ssid));
4164 	}
4165 
4166 	/*
4167 	 * a probe request frame is required after the REPLY_SCAN_CMD
4168 	 */
4169 	wh = (struct ieee80211_frame *)(hdr + 1);
4170 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4171 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4172 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4173 	(void) memset(wh->i_addr1, 0xff, 6);
4174 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
4175 	(void) memset(wh->i_addr3, 0xff, 6);
4176 	*(uint16_t *)&wh->i_dur[0] = 0;
4177 	*(uint16_t *)&wh->i_seq[0] = 0;
4178 
4179 	frm = (uint8_t *)(wh + 1);
4180 
4181 	/*
4182 	 * essid IE
4183 	 */
4184 	if (in->in_esslen) {
4185 		bcopy(in->in_essid, essid, in->in_esslen);
4186 		essid[in->in_esslen] = '\0';
4187 		IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4188 		    "probe with ESSID %s\n",
4189 		    essid));
4190 	}
4191 	*frm++ = IEEE80211_ELEMID_SSID;
4192 	*frm++ = in->in_esslen;
4193 	bcopy(in->in_essid, frm, in->in_esslen);
4194 	frm += in->in_esslen;
4195 
4196 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
4197 	rs = &ic->ic_sup_rates[mode];
4198 
4199 	/*
4200 	 * supported rates IE
4201 	 */
4202 	*frm++ = IEEE80211_ELEMID_RATES;
4203 	nrates = rs->ir_nrates;
4204 	if (nrates > IEEE80211_RATE_SIZE) {
4205 		nrates = IEEE80211_RATE_SIZE;
4206 	}
4207 
4208 	*frm++ = (uint8_t)nrates;
4209 	bcopy(rs->ir_rates, frm, nrates);
4210 	frm += nrates;
4211 
4212 	/*
4213 	 * supported xrates IE
4214 	 */
4215 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4216 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4217 		*frm++ = IEEE80211_ELEMID_XRATES;
4218 		*frm++ = (uint8_t)nrates;
4219 		bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
4220 		frm += nrates;
4221 	}
4222 
4223 	/*
4224 	 * optionnal IE (usually for wpa)
4225 	 */
4226 	if (ic->ic_opt_ie != NULL) {
4227 		bcopy(ic->ic_opt_ie, frm, ic->ic_opt_ie_len);
4228 		frm += ic->ic_opt_ie_len;
4229 	}
4230 
4231 	/* setup length of probe request */
4232 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4233 	hdr->len = LE_16(hdr->nchan * sizeof (iwh_scan_chan_t) +
4234 	    LE_16(hdr->tx_cmd.len) + sizeof (iwh_scan_hdr_t));
4235 
4236 	/*
4237 	 * the attribute of the scan channels are required after the probe
4238 	 * request frame.
4239 	 */
4240 	for (i = 1; i <= hdr->nchan; i++) {
4241 		if (ic->ic_des_esslen) {
4242 			chan.type = LE_32(3);
4243 		} else {
4244 			chan.type = LE_32(1);
4245 		}
4246 
4247 		chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4248 		chan.tpc.tx_gain = 0x28;
4249 		chan.tpc.dsp_atten = 110;
4250 		chan.active_dwell = LE_16(50);
4251 		chan.passive_dwell = LE_16(120);
4252 
4253 		bcopy(&chan, frm, sizeof (iwh_scan_chan_t));
4254 		frm += sizeof (iwh_scan_chan_t);
4255 	}
4256 
4257 	pktlen = _PTRDIFF(frm, cmd);
4258 
4259 	(void) memset(desc, 0, sizeof (*desc));
4260 	desc->val0 = 1 << 24;
4261 	desc->pa[0].tb1_addr =
4262 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4263 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4264 
4265 	/*
4266 	 * maybe for cmd, filling the byte cnt table is not necessary.
4267 	 * anyway, we fill it here.
4268 	 */
4269 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4270 	    .tfd_offset[ring->cur].val = 8;
4271 	if (ring->cur < IWH_MAX_WIN_SIZE) {
4272 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4273 		    tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
4274 	}
4275 
4276 	/*
4277 	 * kick cmd ring
4278 	 */
4279 	ring->cur = (ring->cur + 1) % ring->count;
4280 	IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4281 
4282 	return (IWH_SUCCESS);
4283 }
4284 
4285 /*
4286  * configure NIC by using ucode commands after loading ucode.
4287  */
4288 static int
iwh_config(iwh_sc_t * sc)4289 iwh_config(iwh_sc_t *sc)
4290 {
4291 	ieee80211com_t *ic = &sc->sc_ic;
4292 	iwh_powertable_cmd_t powertable;
4293 	iwh_bt_cmd_t bt;
4294 	iwh_add_sta_t node;
4295 	iwh_rem_sta_t rm_sta;
4296 	const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4297 	iwh_link_quality_cmd_t link_quality;
4298 	int i, err = IWH_FAIL;
4299 	uint16_t masks = 0;
4300 
4301 	/*
4302 	 * set power mode. Disable power management at present, do it later
4303 	 */
4304 	(void) memset(&powertable, 0, sizeof (powertable));
4305 	powertable.flags = LE_16(0x8);
4306 	err = iwh_cmd(sc, POWER_TABLE_CMD, &powertable,
4307 	    sizeof (powertable), 0);
4308 	if (err != IWH_SUCCESS) {
4309 		cmn_err(CE_WARN, "iwh_config(): "
4310 		    "failed to set power mode\n");
4311 		return (err);
4312 	}
4313 
4314 	/*
4315 	 * configure bt coexistence
4316 	 */
4317 	(void) memset(&bt, 0, sizeof (bt));
4318 	bt.flags = 3;
4319 	bt.lead_time = 0xaa;
4320 	bt.max_kill = 1;
4321 	err = iwh_cmd(sc, REPLY_BT_CONFIG, &bt,
4322 	    sizeof (bt), 0);
4323 	if (err != IWH_SUCCESS) {
4324 		cmn_err(CE_WARN, "iwh_config(): "
4325 		    "failed to configurate bt coexistence\n");
4326 		return (err);
4327 	}
4328 
4329 	/*
4330 	 * configure rxon
4331 	 */
4332 	(void) memset(&sc->sc_config, 0, sizeof (iwh_rxon_cmd_t));
4333 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4334 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4335 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4336 	sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4337 	sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4338 	    RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4339 
4340 	switch (ic->ic_opmode) {
4341 	case IEEE80211_M_STA:
4342 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4343 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4344 		    RXON_FILTER_DIS_DECRYPT_MSK |
4345 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4346 		break;
4347 
4348 	case IEEE80211_M_IBSS:
4349 	case IEEE80211_M_AHDEMO:
4350 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4351 
4352 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4353 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4354 		    RXON_FILTER_DIS_DECRYPT_MSK |
4355 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4356 		break;
4357 
4358 	case IEEE80211_M_HOSTAP:
4359 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4360 		break;
4361 
4362 	case IEEE80211_M_MONITOR:
4363 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4364 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4365 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4366 		break;
4367 	}
4368 
4369 	/*
4370 	 * Support all CCK rates.
4371 	 */
4372 	sc->sc_config.cck_basic_rates  = 0x0f;
4373 
4374 	/*
4375 	 * Support all OFDM rates.
4376 	 */
4377 	sc->sc_config.ofdm_basic_rates = 0xff;
4378 
4379 	/*
4380 	 * Determine HT supported rates.
4381 	 */
4382 	switch (sc->sc_ht_conf.rx_stream_count) {
4383 	case 3:
4384 		sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0xff;
4385 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4386 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4387 		break;
4388 	case 2:
4389 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4390 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4391 		break;
4392 	case 1:
4393 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4394 		break;
4395 	default:
4396 		cmn_err(CE_WARN, "iwh_config(): "
4397 		    "RX stream count %d is not in suitable range\n",
4398 		    sc->sc_ht_conf.rx_stream_count);
4399 		return (IWH_FAIL);
4400 	}
4401 
4402 	/*
4403 	 * set RX chains/antennas.
4404 	 */
4405 	iwh_config_rxon_chain(sc);
4406 
4407 	err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4408 	    sizeof (iwh_rxon_cmd_t), 0);
4409 	if (err != IWH_SUCCESS) {
4410 		cmn_err(CE_WARN, "iwh_config(): "
4411 		    "failed to set configure command\n");
4412 		return (err);
4413 	}
4414 
4415 	/*
4416 	 * remove all nodes in NIC
4417 	 */
4418 	(void) memset(&rm_sta, 0, sizeof (rm_sta));
4419 	rm_sta.num_sta = 1;
4420 	bcopy(bcast, rm_sta.addr, 6);
4421 
4422 	err = iwh_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwh_rem_sta_t), 0);
4423 	if (err != IWH_SUCCESS) {
4424 		cmn_err(CE_WARN, "iwh_config(): "
4425 		    "failed to remove broadcast node in hardware.\n");
4426 		return (err);
4427 	}
4428 
4429 	if ((sc->sc_dev_id != 0x423c) &&
4430 	    (sc->sc_dev_id != 0x423d)) {
4431 		/*
4432 		 * configure TX power table
4433 		 */
4434 		err = iwh_tx_power_table(sc, 0);
4435 		if (err != IWH_SUCCESS) {
4436 			return (err);
4437 		}
4438 	}
4439 
4440 	/*
4441 	 * add broadcast node so that we can send broadcast frame
4442 	 */
4443 	(void) memset(&node, 0, sizeof (node));
4444 	(void) memset(node.sta.addr, 0xff, 6);
4445 	node.mode = 0;
4446 	node.sta.sta_id = IWH_BROADCAST_ID;
4447 	node.station_flags = 0;
4448 
4449 	err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4450 	if (err != IWH_SUCCESS) {
4451 		cmn_err(CE_WARN, "iwh_config(): "
4452 		    "failed to add broadcast node\n");
4453 		return (err);
4454 	}
4455 
4456 	if ((sc->sc_dev_id != 0x423c) &&
4457 	    (sc->sc_dev_id != 0x423d)) {
4458 		/*
4459 		 * TX_LINK_QUALITY cmd
4460 		 */
4461 		(void) memset(&link_quality, 0, sizeof (link_quality));
4462 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4463 			masks |= RATE_MCS_CCK_MSK;
4464 			masks |= RATE_MCS_ANT_B_MSK;
4465 			masks &= ~RATE_MCS_ANT_A_MSK;
4466 			link_quality.rate_n_flags[i] =
4467 			    LE_32(iwh_rate_to_plcp(2) | masks);
4468 		}
4469 
4470 		link_quality.general_params.single_stream_ant_msk = 2;
4471 		link_quality.general_params.dual_stream_ant_msk = 3;
4472 		link_quality.agg_params.agg_dis_start_th = 3;
4473 		link_quality.agg_params.agg_time_limit = LE_16(4000);
4474 		link_quality.sta_id = IWH_BROADCAST_ID;
4475 		err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
4476 		    sizeof (link_quality), 0);
4477 		if (err != IWH_SUCCESS) {
4478 			cmn_err(CE_WARN, "iwh_config(): "
4479 			    "failed to config link quality table\n");
4480 			return (err);
4481 		}
4482 	}
4483 
4484 	return (err);
4485 }
4486 
4487 /*
4488  * quiesce(9E) entry point.
4489  * This function is called when the system is single-threaded at high
4490  * PIL with preemption disabled. Therefore, this function must not be
4491  * blocked.
4492  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4493  * DDI_FAILURE indicates an error condition and should almost never happen.
4494  */
4495 static int
iwh_quiesce(dev_info_t * dip)4496 iwh_quiesce(dev_info_t *dip)
4497 {
4498 	iwh_sc_t *sc;
4499 
4500 	sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
4501 	if (sc == NULL) {
4502 		return (DDI_FAILURE);
4503 	}
4504 
4505 #ifdef DEBUG
4506 	/*
4507 	 * by pass any messages, if it's quiesce
4508 	 */
4509 	iwh_dbg_flags = 0;
4510 #endif
4511 
4512 	/*
4513 	 * No more blocking is allowed while we are in the
4514 	 * quiesce(9E) entry point.
4515 	 */
4516 	atomic_or_32(&sc->sc_flags, IWH_F_QUIESCED);
4517 
4518 	/*
4519 	 * Disable and mask all interrupts.
4520 	 */
4521 	iwh_stop(sc);
4522 
4523 	return (DDI_SUCCESS);
4524 }
4525 
4526 static void
iwh_stop_master(iwh_sc_t * sc)4527 iwh_stop_master(iwh_sc_t *sc)
4528 {
4529 	uint32_t tmp;
4530 	int n;
4531 
4532 	tmp = IWH_READ(sc, CSR_RESET);
4533 	IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4534 
4535 	tmp = IWH_READ(sc, CSR_GP_CNTRL);
4536 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4537 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4538 		return;
4539 	}
4540 
4541 	for (n = 0; n < 2000; n++) {
4542 		if (IWH_READ(sc, CSR_RESET) &
4543 		    CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4544 			break;
4545 		}
4546 		DELAY(1000);
4547 	}
4548 
4549 #ifdef	DEBUG
4550 	if (2000 == n) {
4551 		IWH_DBG((IWH_DEBUG_HW, "iwh_stop_master(): "
4552 		    "timeout waiting for master stop\n"));
4553 	}
4554 #endif
4555 }
4556 
4557 static int
iwh_power_up(iwh_sc_t * sc)4558 iwh_power_up(iwh_sc_t *sc)
4559 {
4560 	uint32_t tmp;
4561 
4562 	iwh_mac_access_enter(sc);
4563 	tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4564 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4565 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4566 	iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4567 	iwh_mac_access_exit(sc);
4568 
4569 	DELAY(5000);
4570 	return (IWH_SUCCESS);
4571 }
4572 
4573 /*
4574  * hardware initialization
4575  */
4576 static int
iwh_preinit(iwh_sc_t * sc)4577 iwh_preinit(iwh_sc_t *sc)
4578 {
4579 	int n;
4580 	uint8_t vlink;
4581 	uint16_t radio_cfg;
4582 	uint32_t tmp;
4583 
4584 	/*
4585 	 * clear any pending interrupts
4586 	 */
4587 	IWH_WRITE(sc, CSR_INT, 0xffffffff);
4588 
4589 	tmp = IWH_READ(sc, CSR_GIO_CHICKEN_BITS);
4590 	IWH_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4591 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4592 
4593 	tmp = IWH_READ(sc, CSR_ANA_PLL_CFG);
4594 	IWH_WRITE(sc, CSR_ANA_PLL_CFG, tmp | IWH_CSR_ANA_PLL_CFG);
4595 
4596 	tmp = IWH_READ(sc, CSR_GP_CNTRL);
4597 	IWH_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4598 
4599 	/*
4600 	 * wait for clock ready
4601 	 */
4602 	for (n = 0; n < 1000; n++) {
4603 		if (IWH_READ(sc, CSR_GP_CNTRL) &
4604 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4605 			break;
4606 		}
4607 		DELAY(10);
4608 	}
4609 
4610 	if (1000 == n) {
4611 		return (ETIMEDOUT);
4612 	}
4613 
4614 	iwh_mac_access_enter(sc);
4615 
4616 	iwh_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4617 
4618 	DELAY(20);
4619 	tmp = iwh_reg_read(sc, ALM_APMG_PCIDEV_STT);
4620 	iwh_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4621 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4622 	iwh_mac_access_exit(sc);
4623 
4624 	radio_cfg = IWH_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4625 	if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4626 		tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4627 		IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4628 		    tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4629 		    SP_RADIO_STEP_MSK(radio_cfg) |
4630 		    SP_RADIO_DASH_MSK(radio_cfg));
4631 	} else {
4632 		cmn_err(CE_WARN, "iwh_preinit(): "
4633 		    "radio configuration information in eeprom is wrong\n");
4634 		return (IWH_FAIL);
4635 	}
4636 
4637 
4638 	IWH_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4639 
4640 	(void) iwh_power_up(sc);
4641 
4642 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4643 		tmp = ddi_get32(sc->sc_cfg_handle,
4644 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
4645 		ddi_put32(sc->sc_cfg_handle,
4646 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
4647 		    tmp & ~(1 << 11));
4648 	}
4649 
4650 	vlink = ddi_get8(sc->sc_cfg_handle,
4651 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
4652 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4653 	    vlink & ~2);
4654 
4655 	tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4656 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4657 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4658 	IWH_WRITE(sc, CSR_SW_VER, tmp);
4659 
4660 	/*
4661 	 * make sure power supply on each part of the hardware
4662 	 */
4663 	iwh_mac_access_enter(sc);
4664 	tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4665 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4666 	iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4667 	DELAY(5);
4668 
4669 	tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4670 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4671 	iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4672 	iwh_mac_access_exit(sc);
4673 
4674 	return (IWH_SUCCESS);
4675 }
4676 
4677 /*
4678  * set up semphore flag to own EEPROM
4679  */
4680 static int
iwh_eep_sem_down(iwh_sc_t * sc)4681 iwh_eep_sem_down(iwh_sc_t *sc)
4682 {
4683 	int count1, count2;
4684 	uint32_t tmp;
4685 
4686 	for (count1 = 0; count1 < 1000; count1++) {
4687 		tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4688 		IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4689 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4690 
4691 		for (count2 = 0; count2 < 2; count2++) {
4692 			if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
4693 			    CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4694 				return (IWH_SUCCESS);
4695 			}
4696 			DELAY(10000);
4697 		}
4698 	}
4699 
4700 	return (IWH_FAIL);
4701 }
4702 
4703 /*
4704  * reset semphore flag to release EEPROM
4705  */
4706 static void
iwh_eep_sem_up(iwh_sc_t * sc)4707 iwh_eep_sem_up(iwh_sc_t *sc)
4708 {
4709 	uint32_t tmp;
4710 
4711 	tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4712 	IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4713 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4714 }
4715 
4716 /*
4717  * This function read all infomation from eeprom
4718  */
4719 static int
iwh_eep_load(iwh_sc_t * sc)4720 iwh_eep_load(iwh_sc_t *sc)
4721 {
4722 	int i, rr;
4723 	uint32_t rv, tmp, eep_gp;
4724 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4725 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4726 
4727 	/*
4728 	 * read eeprom gp register in CSR
4729 	 */
4730 	eep_gp = IWH_READ(sc, CSR_EEPROM_GP);
4731 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4732 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4733 		IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4734 		    "not find eeprom\n"));
4735 		return (IWH_FAIL);
4736 	}
4737 
4738 	rr = iwh_eep_sem_down(sc);
4739 	if (rr != 0) {
4740 		IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4741 		    "driver failed to own EEPROM\n"));
4742 		return (IWH_FAIL);
4743 	}
4744 
4745 	for (addr = 0; addr < eep_sz; addr += 2) {
4746 		IWH_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4747 		tmp = IWH_READ(sc, CSR_EEPROM_REG);
4748 		IWH_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4749 
4750 		for (i = 0; i < 10; i++) {
4751 			rv = IWH_READ(sc, CSR_EEPROM_REG);
4752 			if (rv & 1) {
4753 				break;
4754 			}
4755 			DELAY(10);
4756 		}
4757 
4758 		if (!(rv & 1)) {
4759 			IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4760 			    "time out when read eeprome\n"));
4761 			iwh_eep_sem_up(sc);
4762 			return (IWH_FAIL);
4763 		}
4764 
4765 		eep_p[addr/2] = LE_16(rv >> 16);
4766 	}
4767 
4768 	iwh_eep_sem_up(sc);
4769 	return (IWH_SUCCESS);
4770 }
4771 
4772 /*
4773  * initialize mac address in ieee80211com_t struct
4774  */
4775 static void
iwh_get_mac_from_eep(iwh_sc_t * sc)4776 iwh_get_mac_from_eep(iwh_sc_t *sc)
4777 {
4778 	ieee80211com_t *ic = &sc->sc_ic;
4779 
4780 	IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4781 
4782 	IWH_DBG((IWH_DEBUG_EEPROM, "iwh_get_mac_from_eep(): "
4783 	    "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4784 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4785 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4786 }
4787 
4788 /*
4789  * main initialization function
4790  */
4791 static int
iwh_init(iwh_sc_t * sc)4792 iwh_init(iwh_sc_t *sc)
4793 {
4794 	int err = IWH_FAIL;
4795 	clock_t clk;
4796 
4797 	/*
4798 	 * release buffer for calibration
4799 	 */
4800 	iwh_release_calib_buffer(sc);
4801 
4802 	mutex_enter(&sc->sc_glock);
4803 	atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4804 
4805 	err = iwh_init_common(sc);
4806 	if (err != IWH_SUCCESS) {
4807 		mutex_exit(&sc->sc_glock);
4808 		return (IWH_FAIL);
4809 	}
4810 
4811 	/*
4812 	 * backup ucode data part for future use.
4813 	 */
4814 	bcopy(sc->sc_dma_fw_data.mem_va,
4815 	    sc->sc_dma_fw_data_bak.mem_va,
4816 	    sc->sc_dma_fw_data.alength);
4817 
4818 	/* load firmware init segment into NIC */
4819 	err = iwh_load_init_firmware(sc);
4820 	if (err != IWH_SUCCESS) {
4821 		cmn_err(CE_WARN, "iwh_init(): "
4822 		    "failed to setup init firmware\n");
4823 		mutex_exit(&sc->sc_glock);
4824 		return (IWH_FAIL);
4825 	}
4826 
4827 	/*
4828 	 * now press "execute" start running
4829 	 */
4830 	IWH_WRITE(sc, CSR_RESET, 0);
4831 
4832 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4833 	while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4834 		if (cv_timedwait(&sc->sc_ucode_cv,
4835 		    &sc->sc_glock, clk) < 0) {
4836 			break;
4837 		}
4838 	}
4839 
4840 	if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4841 		cmn_err(CE_WARN, "iwh_init(): "
4842 		    "failed to process init alive.\n");
4843 		mutex_exit(&sc->sc_glock);
4844 		return (IWH_FAIL);
4845 	}
4846 
4847 	mutex_exit(&sc->sc_glock);
4848 
4849 	/*
4850 	 * stop chipset for initializing chipset again
4851 	 */
4852 	iwh_stop(sc);
4853 
4854 	mutex_enter(&sc->sc_glock);
4855 	atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4856 
4857 	err = iwh_init_common(sc);
4858 	if (err != IWH_SUCCESS) {
4859 		mutex_exit(&sc->sc_glock);
4860 		return (IWH_FAIL);
4861 	}
4862 
4863 	/*
4864 	 * load firmware run segment into NIC
4865 	 */
4866 	err = iwh_load_run_firmware(sc);
4867 	if (err != IWH_SUCCESS) {
4868 		cmn_err(CE_WARN, "iwh_init(): "
4869 		    "failed to setup run firmware\n");
4870 		mutex_exit(&sc->sc_glock);
4871 		return (IWH_FAIL);
4872 	}
4873 
4874 	/*
4875 	 * now press "execute" start running
4876 	 */
4877 	IWH_WRITE(sc, CSR_RESET, 0);
4878 
4879 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4880 	while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4881 		if (cv_timedwait(&sc->sc_ucode_cv,
4882 		    &sc->sc_glock, clk) < 0) {
4883 			break;
4884 		}
4885 	}
4886 
4887 	if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4888 		cmn_err(CE_WARN, "iwh_init(): "
4889 		    "failed to process runtime alive.\n");
4890 		mutex_exit(&sc->sc_glock);
4891 		return (IWH_FAIL);
4892 	}
4893 
4894 	mutex_exit(&sc->sc_glock);
4895 
4896 	DELAY(1000);
4897 
4898 	mutex_enter(&sc->sc_glock);
4899 	atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4900 
4901 	/*
4902 	 * at this point, the firmware is loaded OK, then config the hardware
4903 	 * with the ucode API, including rxon, txpower, etc.
4904 	 */
4905 	err = iwh_config(sc);
4906 	if (err) {
4907 		cmn_err(CE_WARN, "iwh_init(): "
4908 		    "failed to configure device\n");
4909 		mutex_exit(&sc->sc_glock);
4910 		return (IWH_FAIL);
4911 	}
4912 
4913 	/*
4914 	 * at this point, hardware may receive beacons :)
4915 	 */
4916 	mutex_exit(&sc->sc_glock);
4917 	return (IWH_SUCCESS);
4918 }
4919 
4920 /*
4921  * stop or disable NIC
4922  */
4923 static void
iwh_stop(iwh_sc_t * sc)4924 iwh_stop(iwh_sc_t *sc)
4925 {
4926 	uint32_t tmp;
4927 	int i;
4928 
4929 	/*
4930 	 * by pass if it's quiesced
4931 	 */
4932 	if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4933 		mutex_enter(&sc->sc_glock);
4934 	}
4935 
4936 	IWH_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4937 	/*
4938 	 * disable interrupts
4939 	 */
4940 	IWH_WRITE(sc, CSR_INT_MASK, 0);
4941 	IWH_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4942 	IWH_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4943 
4944 	/*
4945 	 * reset all Tx rings
4946 	 */
4947 	for (i = 0; i < IWH_NUM_QUEUES; i++) {
4948 		iwh_reset_tx_ring(sc, &sc->sc_txq[i]);
4949 	}
4950 
4951 	/*
4952 	 * reset Rx ring
4953 	 */
4954 	iwh_reset_rx_ring(sc);
4955 
4956 	iwh_mac_access_enter(sc);
4957 	iwh_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4958 	iwh_mac_access_exit(sc);
4959 
4960 	DELAY(5);
4961 
4962 	iwh_stop_master(sc);
4963 
4964 	mutex_enter(&sc->sc_mt_lock);
4965 	sc->sc_tx_timer = 0;
4966 	mutex_exit(&sc->sc_mt_lock);
4967 
4968 	tmp = IWH_READ(sc, CSR_RESET);
4969 	IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4970 
4971 	/*
4972 	 * by pass if it's quiesced
4973 	 */
4974 	if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4975 		mutex_exit(&sc->sc_glock);
4976 	}
4977 }
4978 
4979 /*
4980  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4981  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4982  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4983  * INRIA Sophia - Projet Planete
4984  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4985  */
4986 #define	is_success(amrr)	\
4987 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4988 #define	is_failure(amrr)	\
4989 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4990 #define	is_enough(amrr)		\
4991 	((amrr)->txcnt > 200)
4992 #define	not_very_few(amrr)	\
4993 	((amrr)->txcnt > 40)
4994 #define	is_min_rate(in)		\
4995 	(0 == (in)->in_txrate)
4996 #define	is_max_rate(in)		\
4997 	((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4998 #define	increase_rate(in)	\
4999 	((in)->in_txrate++)
5000 #define	decrease_rate(in)	\
5001 	((in)->in_txrate--)
5002 #define	reset_cnt(amrr)		\
5003 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
5004 
5005 #define	IWH_AMRR_MIN_SUCCESS_THRESHOLD	 1
5006 #define	IWH_AMRR_MAX_SUCCESS_THRESHOLD	15
5007 
5008 static void
iwh_amrr_init(iwh_amrr_t * amrr)5009 iwh_amrr_init(iwh_amrr_t *amrr)
5010 {
5011 	amrr->success = 0;
5012 	amrr->recovery = 0;
5013 	amrr->txcnt = amrr->retrycnt = 0;
5014 	amrr->success_threshold = IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5015 	amrr->ht_mcs_idx = 0;	/* 6Mbps */
5016 }
5017 
5018 static void
iwh_amrr_timeout(iwh_sc_t * sc)5019 iwh_amrr_timeout(iwh_sc_t *sc)
5020 {
5021 	ieee80211com_t *ic = &sc->sc_ic;
5022 
5023 	IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_timeout(): "
5024 	    "enter\n"));
5025 
5026 	if (IEEE80211_M_STA == ic->ic_opmode) {
5027 		iwh_amrr_ratectl(NULL, ic->ic_bss);
5028 	} else {
5029 		ieee80211_iterate_nodes(&ic->ic_sta, iwh_amrr_ratectl, NULL);
5030 	}
5031 
5032 	sc->sc_clk = ddi_get_lbolt();
5033 }
5034 
5035 static int
iwh_is_max_rate(ieee80211_node_t * in)5036 iwh_is_max_rate(ieee80211_node_t *in)
5037 {
5038 	int i;
5039 	iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5040 	uint8_t r = (uint8_t)amrr->ht_mcs_idx;
5041 	ieee80211com_t *ic = in->in_ic;
5042 	iwh_sc_t *sc = (iwh_sc_t *)ic;
5043 
5044 	if (in->in_flags & IEEE80211_NODE_HT) {
5045 		for (i = in->in_htrates.rs_nrates - 1; i >= 0; i--) {
5046 			r = in->in_htrates.rs_rates[i] &
5047 			    IEEE80211_RATE_VAL;
5048 			if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5049 			    (1 << (r%8))) {
5050 				break;
5051 			}
5052 		}
5053 
5054 		return (r == (uint8_t)amrr->ht_mcs_idx);
5055 	} else {
5056 		return (is_max_rate(in));
5057 	}
5058 }
5059 
5060 static int
iwh_is_min_rate(ieee80211_node_t * in)5061 iwh_is_min_rate(ieee80211_node_t *in)
5062 {
5063 	int i;
5064 	uint8_t r = 0;
5065 	iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5066 	ieee80211com_t *ic = in->in_ic;
5067 	iwh_sc_t *sc = (iwh_sc_t *)ic;
5068 
5069 	if (in->in_flags & IEEE80211_NODE_HT) {
5070 		for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5071 			r = in->in_htrates.rs_rates[i] &
5072 			    IEEE80211_RATE_VAL;
5073 			if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5074 			    (1 << (r%8))) {
5075 				break;
5076 			}
5077 		}
5078 
5079 		return (r == (uint8_t)amrr->ht_mcs_idx);
5080 	} else {
5081 		return (is_min_rate(in));
5082 	}
5083 }
5084 
5085 static void
iwh_increase_rate(ieee80211_node_t * in)5086 iwh_increase_rate(ieee80211_node_t *in)
5087 {
5088 	int i;
5089 	uint8_t r;
5090 	iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5091 	ieee80211com_t *ic = in->in_ic;
5092 	iwh_sc_t *sc = (iwh_sc_t *)ic;
5093 
5094 	if (in->in_flags & IEEE80211_NODE_HT) {
5095 again:
5096 		amrr->ht_mcs_idx++;
5097 
5098 		for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5099 			r = in->in_htrates.rs_rates[i] &
5100 			    IEEE80211_RATE_VAL;
5101 			if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5102 			    (sc->sc_ht_conf.tx_support_mcs[r/8] &
5103 			    (1 << (r%8)))) {
5104 				break;
5105 			}
5106 		}
5107 
5108 		if (i >= in->in_htrates.rs_nrates) {
5109 			goto again;
5110 		}
5111 	} else {
5112 		increase_rate(in);
5113 	}
5114 }
5115 
5116 static void
iwh_decrease_rate(ieee80211_node_t * in)5117 iwh_decrease_rate(ieee80211_node_t *in)
5118 {
5119 	int i;
5120 	uint8_t r;
5121 	iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5122 	ieee80211com_t *ic = in->in_ic;
5123 	iwh_sc_t *sc = (iwh_sc_t *)ic;
5124 
5125 	if (in->in_flags & IEEE80211_NODE_HT) {
5126 again:
5127 		amrr->ht_mcs_idx--;
5128 
5129 		for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5130 			r = in->in_htrates.rs_rates[i] &
5131 			    IEEE80211_RATE_VAL;
5132 			if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5133 			    (sc->sc_ht_conf.tx_support_mcs[r/8] &
5134 			    (1 << (r%8)))) {
5135 				break;
5136 			}
5137 		}
5138 
5139 		if (i >= in->in_htrates.rs_nrates) {
5140 			goto again;
5141 		}
5142 	} else {
5143 		decrease_rate(in);
5144 	}
5145 }
5146 
5147 /* ARGSUSED */
5148 static void
iwh_amrr_ratectl(void * arg,ieee80211_node_t * in)5149 iwh_amrr_ratectl(void *arg, ieee80211_node_t *in)
5150 {
5151 	iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5152 	int need_change = 0;
5153 
5154 	if (is_success(amrr) && is_enough(amrr)) {
5155 		amrr->success++;
5156 		if (amrr->success >= amrr->success_threshold &&
5157 		    !iwh_is_max_rate(in)) {
5158 			amrr->recovery = 1;
5159 			amrr->success = 0;
5160 			iwh_increase_rate(in);
5161 			IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5162 			    "AMRR increasing rate %d "
5163 			    "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5164 			    in->in_txrate, amrr->txcnt,
5165 			    amrr->retrycnt, amrr->ht_mcs_idx));
5166 			need_change = 1;
5167 		} else {
5168 			amrr->recovery = 0;
5169 		}
5170 	} else if (not_very_few(amrr) && is_failure(amrr)) {
5171 		amrr->success = 0;
5172 		if (!iwh_is_min_rate(in)) {
5173 			if (amrr->recovery) {
5174 				amrr->success_threshold++;
5175 				if (amrr->success_threshold >
5176 				    IWH_AMRR_MAX_SUCCESS_THRESHOLD) {
5177 					amrr->success_threshold =
5178 					    IWH_AMRR_MAX_SUCCESS_THRESHOLD;
5179 				}
5180 			} else {
5181 				amrr->success_threshold =
5182 				    IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5183 			}
5184 			iwh_decrease_rate(in);
5185 			IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5186 			    "AMRR decreasing rate %d "
5187 			    "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5188 			    in->in_txrate, amrr->txcnt,
5189 			    amrr->retrycnt, amrr->ht_mcs_idx));
5190 			need_change = 1;
5191 		}
5192 		amrr->recovery = 0;	/* paper is incorrect */
5193 	}
5194 
5195 	if (is_enough(amrr) || need_change) {
5196 		reset_cnt(amrr);
5197 	}
5198 }
5199 
5200 /*
5201  * translate indirect address in eeprom to direct address
5202  * in eeprom and return address of entry whos indirect address
5203  * is indi_addr
5204  */
5205 static uint8_t *
iwh_eep_addr_trans(iwh_sc_t * sc,uint32_t indi_addr)5206 iwh_eep_addr_trans(iwh_sc_t *sc, uint32_t indi_addr)
5207 {
5208 	uint32_t di_addr;
5209 	uint16_t temp;
5210 
5211 	if (!(indi_addr & INDIRECT_ADDRESS)) {
5212 		di_addr = indi_addr;
5213 		return (&sc->sc_eep_map[di_addr]);
5214 	}
5215 
5216 	switch (indi_addr & INDIRECT_TYPE_MSK) {
5217 	case INDIRECT_GENERAL:
5218 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
5219 		break;
5220 
5221 	case	INDIRECT_HOST:
5222 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_HOST);
5223 		break;
5224 
5225 	case	INDIRECT_REGULATORY:
5226 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
5227 		break;
5228 
5229 	case	INDIRECT_CALIBRATION:
5230 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
5231 		break;
5232 
5233 	case	INDIRECT_PROCESS_ADJST:
5234 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
5235 		break;
5236 
5237 	case	INDIRECT_OTHERS:
5238 		temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
5239 		break;
5240 
5241 	default:
5242 		temp = 0;
5243 		cmn_err(CE_WARN, "iwh_eep_addr_trans(): "
5244 		    "incorrect indirect eeprom address.\n");
5245 		break;
5246 	}
5247 
5248 	di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
5249 
5250 	return (&sc->sc_eep_map[di_addr]);
5251 }
5252 
5253 /*
5254  * loade a section of ucode into NIC
5255  */
5256 static int
iwh_put_seg_fw(iwh_sc_t * sc,uint32_t addr_s,uint32_t addr_d,uint32_t len)5257 iwh_put_seg_fw(iwh_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
5258 {
5259 
5260 	iwh_mac_access_enter(sc);
5261 
5262 	IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5263 	    IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
5264 
5265 	IWH_WRITE(sc, IWH_FH_SRVC_CHNL_SRAM_ADDR_REG(IWH_FH_SRVC_CHNL), addr_d);
5266 
5267 	IWH_WRITE(sc, IWH_FH_TFDIB_CTRL0_REG(IWH_FH_SRVC_CHNL),
5268 	    (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
5269 
5270 	IWH_WRITE(sc, IWH_FH_TFDIB_CTRL1_REG(IWH_FH_SRVC_CHNL), len);
5271 
5272 	IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_BUF_STS_REG(IWH_FH_SRVC_CHNL),
5273 	    (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
5274 	    (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
5275 	    IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
5276 
5277 	IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5278 	    IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5279 	    IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
5280 	    IWH_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
5281 
5282 	iwh_mac_access_exit(sc);
5283 
5284 	return (IWH_SUCCESS);
5285 }
5286 
5287 /*
5288  * necessary setting during alive notification
5289  */
5290 static int
iwh_alive_common(iwh_sc_t * sc)5291 iwh_alive_common(iwh_sc_t *sc)
5292 {
5293 	uint32_t base;
5294 	uint32_t i;
5295 	iwh_wimax_coex_cmd_t w_cmd;
5296 	iwh_calibration_crystal_cmd_t c_cmd;
5297 	uint32_t rv = IWH_FAIL;
5298 
5299 	/*
5300 	 * initialize SCD related registers to make TX work.
5301 	 */
5302 	iwh_mac_access_enter(sc);
5303 
5304 	/*
5305 	 * read sram address of data base.
5306 	 */
5307 	sc->sc_scd_base = iwh_reg_read(sc, IWH_SCD_SRAM_BASE_ADDR);
5308 
5309 	for (base = sc->sc_scd_base + IWH_SCD_CONTEXT_DATA_OFFSET;
5310 	    base < sc->sc_scd_base + IWH_SCD_TX_STTS_BITMAP_OFFSET;
5311 	    base += 4) {
5312 		iwh_mem_write(sc, base, 0);
5313 	}
5314 
5315 	for (; base < sc->sc_scd_base + IWH_SCD_TRANSLATE_TBL_OFFSET;
5316 	    base += 4) {
5317 		iwh_mem_write(sc, base, 0);
5318 	}
5319 
5320 	for (i = 0; i < sizeof (uint16_t) * IWH_NUM_QUEUES; i += 4) {
5321 		iwh_mem_write(sc, base + i, 0);
5322 	}
5323 
5324 	iwh_reg_write(sc, IWH_SCD_DRAM_BASE_ADDR,
5325 	    sc->sc_dma_sh.cookie.dmac_address >> 10);
5326 
5327 	iwh_reg_write(sc, IWH_SCD_QUEUECHAIN_SEL,
5328 	    IWH_SCD_QUEUECHAIN_SEL_ALL(IWH_NUM_QUEUES));
5329 
5330 	iwh_reg_write(sc, IWH_SCD_AGGR_SEL, 0);
5331 
5332 	for (i = 0; i < IWH_NUM_QUEUES; i++) {
5333 		iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(i), 0);
5334 		IWH_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
5335 		iwh_mem_write(sc, sc->sc_scd_base +
5336 		    IWH_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
5337 		iwh_mem_write(sc, sc->sc_scd_base +
5338 		    IWH_SCD_CONTEXT_QUEUE_OFFSET(i) +
5339 		    sizeof (uint32_t),
5340 		    ((SCD_WIN_SIZE << IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
5341 		    IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
5342 		    ((SCD_FRAME_LIMIT <<
5343 		    IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5344 		    IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
5345 	}
5346 
5347 	iwh_reg_write(sc, IWH_SCD_INTERRUPT_MASK, (1 << IWH_NUM_QUEUES) - 1);
5348 
5349 	iwh_reg_write(sc, (IWH_SCD_BASE + 0x10),
5350 	    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
5351 
5352 	IWH_WRITE(sc, HBUS_TARG_WRPTR, (IWH_CMD_QUEUE_NUM << 8));
5353 	iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(IWH_CMD_QUEUE_NUM), 0);
5354 
5355 	/*
5356 	 * queue 0-7 map to FIFO 0-7 and
5357 	 * all queues work under FIFO mode(none-scheduler_ack)
5358 	 */
5359 	for (i = 0; i < 4; i++) {
5360 		iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5361 		    (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5362 		    ((3-i) << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5363 		    (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5364 		    IWH_SCD_QUEUE_STTS_REG_MSK);
5365 	}
5366 
5367 	iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(IWH_CMD_QUEUE_NUM),
5368 	    (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5369 	    (IWH_CMD_FIFO_NUM << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5370 	    (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5371 	    IWH_SCD_QUEUE_STTS_REG_MSK);
5372 
5373 	for (i = 5; i < 7; i++) {
5374 		iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5375 		    (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5376 		    (i << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5377 		    (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5378 		    IWH_SCD_QUEUE_STTS_REG_MSK);
5379 	}
5380 
5381 	iwh_mac_access_exit(sc);
5382 
5383 	(void) memset(&w_cmd, 0, sizeof (w_cmd));
5384 
5385 	rv = iwh_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5386 	if (rv != IWH_SUCCESS) {
5387 		cmn_err(CE_WARN, "iwh_alive_common(): "
5388 		    "failed to send wimax coexist command.\n");
5389 		return (rv);
5390 	}
5391 
5392 	if ((sc->sc_dev_id != 0x423c) &&
5393 	    (sc->sc_dev_id != 0x423d)) {
5394 		(void) memset(&c_cmd, 0, sizeof (c_cmd));
5395 
5396 		c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5397 		c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5398 		c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5399 
5400 		rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5401 		    &c_cmd, sizeof (c_cmd), 1);
5402 		if (rv != IWH_SUCCESS) {
5403 			cmn_err(CE_WARN, "iwh_alive_common(): "
5404 			    "failed to send crystal"
5405 			    "frq calibration command.\n");
5406 			return (rv);
5407 		}
5408 
5409 		/*
5410 		 * make sure crystal frequency calibration ready
5411 		 * before next operations.
5412 		 */
5413 		DELAY(1000);
5414 	}
5415 
5416 	return (IWH_SUCCESS);
5417 }
5418 
5419 /*
5420  * save results of calibration from ucode
5421  */
5422 static void
iwh_save_calib_result(iwh_sc_t * sc,iwh_rx_desc_t * desc)5423 iwh_save_calib_result(iwh_sc_t *sc, iwh_rx_desc_t *desc)
5424 {
5425 	struct iwh_calib_results *res_p = &sc->sc_calib_results;
5426 	struct iwh_calib_hdr *calib_hdr = (struct iwh_calib_hdr *)(desc + 1);
5427 	int len = LE_32(desc->len);
5428 
5429 	/*
5430 	 * ensure the size of buffer is not too big
5431 	 */
5432 	len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5433 
5434 	switch (calib_hdr->op_code) {
5435 	case PHY_CALIBRATE_LO_CMD:
5436 		if (NULL == res_p->lo_res) {
5437 			res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5438 		}
5439 
5440 		if (NULL == res_p->lo_res) {
5441 			cmn_err(CE_WARN, "iwh_save_calib_result(): "
5442 			    "failed to allocate memory.\n");
5443 			return;
5444 		}
5445 
5446 		res_p->lo_res_len = len;
5447 		bcopy(calib_hdr, res_p->lo_res, len);
5448 		break;
5449 
5450 	case PHY_CALIBRATE_TX_IQ_CMD:
5451 		if (NULL == res_p->tx_iq_res) {
5452 			res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5453 		}
5454 
5455 		if (NULL == res_p->tx_iq_res) {
5456 			cmn_err(CE_WARN, "iwh_save_calib_result(): "
5457 			    "failed to allocate memory.\n");
5458 			return;
5459 		}
5460 
5461 		res_p->tx_iq_res_len = len;
5462 		bcopy(calib_hdr, res_p->tx_iq_res, len);
5463 		break;
5464 
5465 	case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5466 		if (NULL == res_p->tx_iq_perd_res) {
5467 			res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5468 		}
5469 
5470 		if (NULL == res_p->tx_iq_perd_res) {
5471 			cmn_err(CE_WARN, "iwh_save_calib_result(): "
5472 			    "failed to allocate memory.\n");
5473 			return;
5474 		}
5475 
5476 		res_p->tx_iq_perd_res_len = len;
5477 		bcopy(calib_hdr, res_p->tx_iq_perd_res, len);
5478 		break;
5479 
5480 	case PHY_CALIBRATE_DC_CMD:
5481 		if (NULL == res_p->dc_res) {
5482 			res_p->dc_res = kmem_alloc(len, KM_NOSLEEP);
5483 		}
5484 
5485 		if (NULL == res_p->dc_res) {
5486 			cmn_err(CE_WARN, "iwh_save_calib_result(): "
5487 			    "failed to allocate memory.\n");
5488 			return;
5489 		}
5490 
5491 		res_p->dc_res_len = len;
5492 		bcopy(calib_hdr, res_p->dc_res, len);
5493 		break;
5494 
5495 	case PHY_CALIBRATE_BASE_BAND_CMD:
5496 		if (NULL == res_p->base_band_res) {
5497 			res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5498 		}
5499 
5500 		if (NULL == res_p->base_band_res) {
5501 			cmn_err(CE_WARN, "iwh_save_calib_result(): "
5502 			    "failed to allocate memory.\n");
5503 			return;
5504 		}
5505 
5506 		res_p->base_band_res_len = len;
5507 		bcopy(calib_hdr, res_p->base_band_res, len);
5508 		break;
5509 
5510 	default:
5511 		cmn_err(CE_WARN, "iwh_save_calib_result(): "
5512 		    "incorrect calibration type(%d).\n", calib_hdr->op_code);
5513 		break;
5514 	}
5515 
5516 }
5517 
5518 /*
5519  * configure TX pwoer table
5520  */
5521 static int
iwh_tx_power_table(iwh_sc_t * sc,int async)5522 iwh_tx_power_table(iwh_sc_t *sc, int async)
5523 {
5524 	iwh_tx_power_table_cmd_t txpower;
5525 	int i, err = IWH_FAIL;
5526 
5527 	(void) memset(&txpower, 0, sizeof (txpower));
5528 
5529 	txpower.band = 1; /* for 2.4G */
5530 	txpower.channel = (uint8_t)LE_16(sc->sc_config.chan);
5531 	txpower.pa_measurements = 1;
5532 	txpower.max_mcs = 23;
5533 
5534 	for (i = 0; i < 24; i++) {
5535 		txpower.db.ht_ofdm_power[i].s.radio_tx_gain[0] = 0x16;
5536 		txpower.db.ht_ofdm_power[i].s.radio_tx_gain[1] = 0x16;
5537 		txpower.db.ht_ofdm_power[i].s.radio_tx_gain[2] = 0x16;
5538 		txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[0] = 0x6E;
5539 		txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[1] = 0x6E;
5540 		txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[2] = 0x6E;
5541 	}
5542 
5543 	for (i = 0; i < 2; i++) {
5544 		txpower.db.cck_power[i].s.radio_tx_gain[0] = 0x16;
5545 		txpower.db.cck_power[i].s.radio_tx_gain[1] = 0x16;
5546 		txpower.db.cck_power[i].s.radio_tx_gain[2] = 0x16;
5547 		txpower.db.cck_power[i].s.dsp_predis_atten[0] = 0x6E;
5548 		txpower.db.cck_power[i].s.dsp_predis_atten[1] = 0x6E;
5549 		txpower.db.cck_power[i].s.dsp_predis_atten[2] = 0x6E;
5550 	}
5551 
5552 	err = iwh_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
5553 	    sizeof (txpower), async);
5554 	if (err != IWH_SUCCESS) {
5555 		cmn_err(CE_WARN, "iwh_tx_power_table(): "
5556 		    "failed to set tx power table.\n");
5557 		return (err);
5558 	}
5559 
5560 	return (err);
5561 }
5562 
5563 static void
iwh_release_calib_buffer(iwh_sc_t * sc)5564 iwh_release_calib_buffer(iwh_sc_t *sc)
5565 {
5566 	if (sc->sc_calib_results.lo_res != NULL) {
5567 		kmem_free(sc->sc_calib_results.lo_res,
5568 		    sc->sc_calib_results.lo_res_len);
5569 		sc->sc_calib_results.lo_res = NULL;
5570 	}
5571 
5572 	if (sc->sc_calib_results.tx_iq_res != NULL) {
5573 		kmem_free(sc->sc_calib_results.tx_iq_res,
5574 		    sc->sc_calib_results.tx_iq_res_len);
5575 		sc->sc_calib_results.tx_iq_res = NULL;
5576 	}
5577 
5578 	if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5579 		kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5580 		    sc->sc_calib_results.tx_iq_perd_res_len);
5581 		sc->sc_calib_results.tx_iq_perd_res = NULL;
5582 	}
5583 
5584 	if (sc->sc_calib_results.dc_res != NULL) {
5585 		kmem_free(sc->sc_calib_results.dc_res,
5586 		    sc->sc_calib_results.dc_res_len);
5587 		sc->sc_calib_results.dc_res = NULL;
5588 	}
5589 
5590 	if (sc->sc_calib_results.base_band_res != NULL) {
5591 		kmem_free(sc->sc_calib_results.base_band_res,
5592 		    sc->sc_calib_results.base_band_res_len);
5593 		sc->sc_calib_results.base_band_res = NULL;
5594 	}
5595 }
5596 
5597 /*
5598  * common section of intialization
5599  */
5600 static int
iwh_init_common(iwh_sc_t * sc)5601 iwh_init_common(iwh_sc_t *sc)
5602 {
5603 	int32_t	qid;
5604 	uint32_t tmp;
5605 
5606 	if (iwh_reset_hw(sc) != IWH_SUCCESS) {
5607 		cmn_err(CE_WARN, "iwh_init_common(): "
5608 		    "failed to reset hardware\n");
5609 		return (IWH_FAIL);
5610 	}
5611 
5612 	(void) iwh_preinit(sc);
5613 
5614 	tmp = IWH_READ(sc, CSR_GP_CNTRL);
5615 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5616 		cmn_err(CE_NOTE, "iwh_init_common(): "
5617 		    "radio transmitter is off\n");
5618 		return (IWH_FAIL);
5619 	}
5620 
5621 	/*
5622 	 * init Rx ring
5623 	 */
5624 	iwh_mac_access_enter(sc);
5625 	IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5626 
5627 	IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5628 	IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5629 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5630 
5631 	IWH_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5632 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5633 	    offsetof(struct iwh_shared, val0)) >> 4));
5634 
5635 	IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5636 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5637 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5638 	    IWH_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K |
5639 	    (RX_QUEUE_SIZE_LOG <<
5640 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5641 	iwh_mac_access_exit(sc);
5642 	IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5643 	    (RX_QUEUE_SIZE - 1) & ~0x7);
5644 
5645 	/*
5646 	 * init Tx rings
5647 	 */
5648 	iwh_mac_access_enter(sc);
5649 	iwh_reg_write(sc, IWH_SCD_TXFACT, 0);
5650 
5651 	/*
5652 	 * keep warm page
5653 	 */
5654 	IWH_WRITE(sc, IWH_FH_KW_MEM_ADDR_REG,
5655 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
5656 
5657 	for (qid = 0; qid < IWH_NUM_QUEUES; qid++) {
5658 		IWH_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5659 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5660 		IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5661 		    IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5662 		    IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5663 	}
5664 
5665 	iwh_mac_access_exit(sc);
5666 
5667 	/*
5668 	 * clear "radio off" and "disable command" bits
5669 	 */
5670 	IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5671 	IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5672 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5673 
5674 	/*
5675 	 * clear any pending interrupts
5676 	 */
5677 	IWH_WRITE(sc, CSR_INT, 0xffffffff);
5678 
5679 	/*
5680 	 * enable interrupts
5681 	 */
5682 	IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5683 
5684 	IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5685 	IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5686 
5687 	return (IWH_SUCCESS);
5688 }
5689 
5690 static int
iwh_fast_recover(iwh_sc_t * sc)5691 iwh_fast_recover(iwh_sc_t *sc)
5692 {
5693 	ieee80211com_t *ic = &sc->sc_ic;
5694 	int err = IWH_FAIL;
5695 
5696 	mutex_enter(&sc->sc_glock);
5697 
5698 	/*
5699 	 * restore runtime configuration
5700 	 */
5701 	bcopy(&sc->sc_config_save, &sc->sc_config,
5702 	    sizeof (sc->sc_config));
5703 
5704 	sc->sc_config.assoc_id = 0;
5705 	sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5706 
5707 	if ((err = iwh_hw_set_before_auth(sc)) != IWH_SUCCESS) {
5708 		cmn_err(CE_WARN, "iwh_fast_recover(): "
5709 		    "could not setup authentication\n");
5710 		mutex_exit(&sc->sc_glock);
5711 		return (err);
5712 	}
5713 
5714 	bcopy(&sc->sc_config_save, &sc->sc_config,
5715 	    sizeof (sc->sc_config));
5716 
5717 	/*
5718 	 * update adapter's configuration
5719 	 */
5720 	err = iwh_run_state_config(sc);
5721 	if (err != IWH_SUCCESS) {
5722 		cmn_err(CE_WARN, "iwh_fast_recover(): "
5723 		    "failed to setup association\n");
5724 		mutex_exit(&sc->sc_glock);
5725 		return (err);
5726 	}
5727 
5728 	/*
5729 	 * set LED on
5730 	 */
5731 	iwh_set_led(sc, 2, 0, 1);
5732 
5733 	mutex_exit(&sc->sc_glock);
5734 
5735 	atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
5736 
5737 	/*
5738 	 * start queue
5739 	 */
5740 	IWH_DBG((IWH_DEBUG_FW, "iwh_fast_recover(): "
5741 	    "resume xmit\n"));
5742 	mac_tx_update(ic->ic_mach);
5743 
5744 	return (IWH_SUCCESS);
5745 }
5746 
5747 static int
iwh_run_state_config(iwh_sc_t * sc)5748 iwh_run_state_config(iwh_sc_t *sc)
5749 {
5750 	struct ieee80211com *ic = &sc->sc_ic;
5751 	ieee80211_node_t *in = ic->ic_bss;
5752 	uint32_t ht_protec = (uint32_t)(-1);
5753 	int err = IWH_FAIL;
5754 
5755 	/*
5756 	 * update adapter's configuration
5757 	 */
5758 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5759 
5760 	/*
5761 	 * short preamble/slot time are
5762 	 * negotiated when associating
5763 	 */
5764 	sc->sc_config.flags &=
5765 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5766 	    RXON_FLG_SHORT_SLOT_MSK);
5767 
5768 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5769 		sc->sc_config.flags |=
5770 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
5771 	}
5772 
5773 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5774 		sc->sc_config.flags |=
5775 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5776 	}
5777 
5778 	if (in->in_flags & IEEE80211_NODE_HT) {
5779 		ht_protec = in->in_htopmode;
5780 		if (ht_protec > 3) {
5781 			cmn_err(CE_WARN, "iwh_run_state_config(): "
5782 			    "HT protection mode is not correct.\n");
5783 			return (IWH_FAIL);
5784 		} else if (NO_HT_PROT == ht_protec) {
5785 			ht_protec = sc->sc_ht_conf.ht_protection;
5786 		}
5787 
5788 		sc->sc_config.flags |=
5789 		    LE_32(ht_protec << RXON_FLG_HT_OPERATING_MODE_POS);
5790 	}
5791 
5792 	/*
5793 	 * set RX chains/antennas.
5794 	 */
5795 	iwh_config_rxon_chain(sc);
5796 
5797 	sc->sc_config.filter_flags |=
5798 	    LE_32(RXON_FILTER_ASSOC_MSK);
5799 
5800 	if (ic->ic_opmode != IEEE80211_M_STA) {
5801 		sc->sc_config.filter_flags |=
5802 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
5803 	}
5804 
5805 	IWH_DBG((IWH_DEBUG_80211, "iwh_run_state_config(): "
5806 	    "config chan %d flags %x"
5807 	    " filter_flags %x\n",
5808 	    sc->sc_config.chan, sc->sc_config.flags,
5809 	    sc->sc_config.filter_flags));
5810 
5811 	err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
5812 	    sizeof (iwh_rxon_cmd_t), 1);
5813 	if (err != IWH_SUCCESS) {
5814 		cmn_err(CE_WARN, "iwh_run_state_config(): "
5815 		    "could not update configuration\n");
5816 		return (err);
5817 	}
5818 
5819 	if ((sc->sc_dev_id != 0x423c) &&
5820 	    (sc->sc_dev_id != 0x423d)) {
5821 		/*
5822 		 * send tx power table command
5823 		 */
5824 		err = iwh_tx_power_table(sc, 1);
5825 		if (err != IWH_SUCCESS) {
5826 			return (err);
5827 		}
5828 	}
5829 
5830 	/*
5831 	 * Not need to update retry rate table for AP node
5832 	 */
5833 	err = iwh_qosparam_to_hw(sc, 1);
5834 	if (err != IWH_SUCCESS) {
5835 		return (err);
5836 	}
5837 
5838 	return (err);
5839 }
5840 
5841 /*
5842  * This function is only for compatibility with Net80211 module.
5843  * iwh_qosparam_to_hw() is the actual function updating EDCA
5844  * parameters to hardware.
5845  */
5846 /* ARGSUSED */
5847 static int
iwh_wme_update(ieee80211com_t * ic)5848 iwh_wme_update(ieee80211com_t *ic)
5849 {
5850 	return (0);
5851 }
5852 
5853 static int
iwh_wme_to_qos_ac(int wme_ac)5854 iwh_wme_to_qos_ac(int wme_ac)
5855 {
5856 	int qos_ac = QOS_AC_INVALID;
5857 
5858 	if (wme_ac < WME_AC_BE || wme_ac > WME_AC_VO) {
5859 		cmn_err(CE_WARN, "iwh_wme_to_qos_ac(): "
5860 		    "WME AC index is not in suitable range.\n");
5861 		return (qos_ac);
5862 	}
5863 
5864 	switch (wme_ac) {
5865 	case WME_AC_BE:
5866 		qos_ac = QOS_AC_BK;
5867 		break;
5868 	case WME_AC_BK:
5869 		qos_ac = QOS_AC_BE;
5870 		break;
5871 	case WME_AC_VI:
5872 		qos_ac = QOS_AC_VI;
5873 		break;
5874 	case WME_AC_VO:
5875 		qos_ac = QOS_AC_VO;
5876 		break;
5877 	}
5878 
5879 	return (qos_ac);
5880 }
5881 
5882 static uint16_t
iwh_cw_e_to_cw(uint8_t cw_e)5883 iwh_cw_e_to_cw(uint8_t cw_e)
5884 {
5885 	uint16_t cw = 1;
5886 
5887 	while (cw_e > 0) {
5888 		cw <<= 1;
5889 		cw_e--;
5890 	}
5891 
5892 	cw -= 1;
5893 	return (cw);
5894 }
5895 
5896 static int
iwh_wmeparam_check(struct wmeParams * wmeparam)5897 iwh_wmeparam_check(struct wmeParams *wmeparam)
5898 {
5899 	int i;
5900 
5901 	for (i = 0; i < WME_NUM_AC; i++) {
5902 
5903 		if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
5904 		    (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
5905 			cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5906 			    "Contention window is not in suitable range.\n");
5907 			return (IWH_FAIL);
5908 		}
5909 
5910 		if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
5911 		    (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
5912 			cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5913 			    "Arbitration interframe space number"
5914 			    "is not in suitable range.\n");
5915 			return (IWH_FAIL);
5916 		}
5917 	}
5918 
5919 	return (IWH_SUCCESS);
5920 }
5921 
5922 /*
5923  * This function updates EDCA parameters into hardware.
5924  * FIFO0-background, FIFO1-best effort, FIFO2-viedo, FIFO3-voice.
5925  */
5926 static int
iwh_qosparam_to_hw(iwh_sc_t * sc,int async)5927 iwh_qosparam_to_hw(iwh_sc_t *sc, int async)
5928 {
5929 	ieee80211com_t *ic = &sc->sc_ic;
5930 	ieee80211_node_t *in = ic->ic_bss;
5931 	struct wmeParams *wmeparam;
5932 	iwh_qos_param_cmd_t qosparam_cmd;
5933 	int i, j;
5934 	int err = IWH_FAIL;
5935 
5936 	if ((in->in_flags & IEEE80211_NODE_QOS) &&
5937 	    (IEEE80211_M_STA == ic->ic_opmode)) {
5938 		wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
5939 	} else {
5940 		return (IWH_SUCCESS);
5941 	}
5942 
5943 	(void) memset(&qosparam_cmd, 0, sizeof (qosparam_cmd));
5944 
5945 	err = iwh_wmeparam_check(wmeparam);
5946 	if (err != IWH_SUCCESS) {
5947 		return (err);
5948 	}
5949 
5950 	if (in->in_flags & IEEE80211_NODE_QOS) {
5951 		qosparam_cmd.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
5952 	}
5953 
5954 	if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
5955 		qosparam_cmd.flags |= QOS_PARAM_FLG_TGN;
5956 	}
5957 
5958 	for (i = 0; i < WME_NUM_AC; i++) {
5959 
5960 		j = iwh_wme_to_qos_ac(i);
5961 		if (j < QOS_AC_BK || j > QOS_AC_VO) {
5962 			return (IWH_FAIL);
5963 		}
5964 
5965 		qosparam_cmd.ac[j].cw_min =
5966 		    iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
5967 		qosparam_cmd.ac[j].cw_max =
5968 		    iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
5969 		qosparam_cmd.ac[j].aifsn =
5970 		    wmeparam[i].wmep_aifsn;
5971 		qosparam_cmd.ac[j].txop =
5972 		    (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
5973 	}
5974 
5975 	err = iwh_cmd(sc, REPLY_QOS_PARAM, &qosparam_cmd,
5976 	    sizeof (qosparam_cmd), async);
5977 	if (err != IWH_SUCCESS) {
5978 		cmn_err(CE_WARN, "iwh_qosparam_to_hw(): "
5979 		    "failed to update QoS parameters into hardware.\n");
5980 		return (err);
5981 	}
5982 
5983 #ifdef	DEBUG
5984 	IWH_DBG((IWH_DEBUG_QOS, "iwh_qosparam_to_hw(): "
5985 	    "EDCA parameters are as follows:\n"));
5986 
5987 	IWH_DBG((IWH_DEBUG_QOS, "BK parameters are: "
5988 	    "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5989 	    qosparam_cmd.ac[0].cw_min, qosparam_cmd.ac[0].cw_max,
5990 	    qosparam_cmd.ac[0].aifsn, qosparam_cmd.ac[0].txop));
5991 
5992 	IWH_DBG((IWH_DEBUG_QOS, "BE parameters are: "
5993 	    "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5994 	    qosparam_cmd.ac[1].cw_min, qosparam_cmd.ac[1].cw_max,
5995 	    qosparam_cmd.ac[1].aifsn, qosparam_cmd.ac[1].txop));
5996 
5997 	IWH_DBG((IWH_DEBUG_QOS, "VI parameters are: "
5998 	    "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5999 	    qosparam_cmd.ac[2].cw_min, qosparam_cmd.ac[2].cw_max,
6000 	    qosparam_cmd.ac[2].aifsn, qosparam_cmd.ac[2].txop));
6001 
6002 	IWH_DBG((IWH_DEBUG_QOS, "VO parameters are: "
6003 	    "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
6004 	    qosparam_cmd.ac[3].cw_min, qosparam_cmd.ac[3].cw_max,
6005 	    qosparam_cmd.ac[3].aifsn, qosparam_cmd.ac[3].txop));
6006 #endif
6007 	return (err);
6008 }
6009 
6010 static inline int
iwh_wme_tid_qos_ac(int tid)6011 iwh_wme_tid_qos_ac(int tid)
6012 {
6013 	switch (tid) {
6014 	case 1:
6015 	case 2:
6016 		return (QOS_AC_BK);
6017 	case 0:
6018 	case 3:
6019 		return (QOS_AC_BE);
6020 	case 4:
6021 	case 5:
6022 		return (QOS_AC_VI);
6023 	case 6:
6024 	case 7:
6025 		return (QOS_AC_VO);
6026 	}
6027 
6028 	return (QOS_AC_BE);
6029 }
6030 
6031 static inline int
iwh_qos_ac_to_txq(int qos_ac)6032 iwh_qos_ac_to_txq(int qos_ac)
6033 {
6034 	switch (qos_ac) {
6035 	case QOS_AC_BK:
6036 		return (QOS_AC_BK_TO_TXQ);
6037 	case QOS_AC_BE:
6038 		return (QOS_AC_BE_TO_TXQ);
6039 	case QOS_AC_VI:
6040 		return (QOS_AC_VI_TO_TXQ);
6041 	case QOS_AC_VO:
6042 		return (QOS_AC_VO_TO_TXQ);
6043 	}
6044 
6045 	return (QOS_AC_BE_TO_TXQ);
6046 }
6047 
6048 static int
iwh_wme_tid_to_txq(int tid)6049 iwh_wme_tid_to_txq(int tid)
6050 {
6051 	int queue_n = TXQ_FOR_AC_INVALID;
6052 	int qos_ac;
6053 
6054 	if (tid < WME_TID_MIN ||
6055 	    tid > WME_TID_MAX) {
6056 		cmn_err(CE_WARN, "wme_tid_to_txq(): "
6057 		    "TID is not in suitable range.\n");
6058 		return (queue_n);
6059 	}
6060 
6061 	qos_ac = iwh_wme_tid_qos_ac(tid);
6062 	queue_n = iwh_qos_ac_to_txq(qos_ac);
6063 
6064 	return (queue_n);
6065 }
6066 
6067 /*
6068  * This function is used for intializing HT relevant configurations.
6069  */
6070 static void
iwh_init_ht_conf(iwh_sc_t * sc)6071 iwh_init_ht_conf(iwh_sc_t *sc)
6072 {
6073 	(void) memset(&sc->sc_ht_conf, 0, sizeof (iwh_ht_conf_t));
6074 
6075 	if ((0x4235 == sc->sc_dev_id) ||
6076 	    (0x4236 == sc->sc_dev_id) ||
6077 	    (0x423a == sc->sc_dev_id)) {
6078 		sc->sc_ht_conf.ht_support = 1;
6079 
6080 		sc->sc_ht_conf.valid_chains = 3;
6081 		sc->sc_ht_conf.tx_stream_count = 2;
6082 		sc->sc_ht_conf.rx_stream_count = 2;
6083 
6084 		sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6085 		sc->sc_ht_conf.tx_support_mcs[1] = 0xff;
6086 		sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6087 		sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6088 	} else {
6089 		sc->sc_ht_conf.ht_support = 1;
6090 
6091 		sc->sc_ht_conf.valid_chains = 2;
6092 		sc->sc_ht_conf.tx_stream_count = 1;
6093 		sc->sc_ht_conf.rx_stream_count = 2;
6094 
6095 		sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6096 		sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6097 		sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6098 	}
6099 
6100 	if (sc->sc_ht_conf.ht_support) {
6101 		sc->sc_ht_conf.cap |= HT_CAP_GRN_FLD;
6102 		sc->sc_ht_conf.cap |= HT_CAP_SGI_20;
6103 		sc->sc_ht_conf.cap |= HT_CAP_MAX_AMSDU;
6104 		/* should disable MIMO */
6105 		sc->sc_ht_conf.cap |= HT_CAP_MIMO_PS;
6106 
6107 		sc->sc_ht_conf.ampdu_p.factor = HT_RX_AMPDU_FACTOR;
6108 		sc->sc_ht_conf.ampdu_p.density = HT_MPDU_DENSITY;
6109 
6110 		sc->sc_ht_conf.ht_protection = HT_PROT_CHAN_NON_HT;
6111 	}
6112 }
6113 
6114 /*
6115  * This function overwrites default ieee80211_rateset_11n struc.
6116  */
6117 static void
iwh_overwrite_11n_rateset(iwh_sc_t * sc)6118 iwh_overwrite_11n_rateset(iwh_sc_t *sc)
6119 {
6120 	uint8_t *ht_rs = sc->sc_ht_conf.rx_support_mcs;
6121 	int mcs_idx, mcs_count = 0;
6122 	int i, j;
6123 
6124 	for (i = 0; i < HT_RATESET_NUM; i++) {
6125 		for (j = 0; j < 8; j++) {
6126 			if (ht_rs[i] & (1 << j)) {
6127 				mcs_idx = i * 8 + j;
6128 				if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
6129 					break;
6130 				}
6131 
6132 				ieee80211_rateset_11n.rs_rates[mcs_idx] =
6133 				    (uint8_t)mcs_idx;
6134 				mcs_count++;
6135 			}
6136 		}
6137 	}
6138 
6139 	ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
6140 
6141 #ifdef	DEBUG
6142 	IWH_DBG((IWH_DEBUG_HTRATE, "iwh_overwrite_11n_rateset(): "
6143 	    "HT rates supported by this station is as follows:\n"));
6144 
6145 	for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
6146 		IWH_DBG((IWH_DEBUG_HTRATE, "Rate %d is %d\n",
6147 		    i, ieee80211_rateset_11n.rs_rates[i]));
6148 	}
6149 #endif
6150 }
6151 
6152 /*
6153  * This function overwrites default configurations of
6154  * ieee80211com structure in Net80211 module.
6155  */
6156 static void
iwh_overwrite_ic_default(iwh_sc_t * sc)6157 iwh_overwrite_ic_default(iwh_sc_t *sc)
6158 {
6159 	ieee80211com_t *ic = &sc->sc_ic;
6160 
6161 	sc->sc_newstate = ic->ic_newstate;
6162 	ic->ic_newstate = iwh_newstate;
6163 	ic->ic_node_alloc = iwh_node_alloc;
6164 	ic->ic_node_free = iwh_node_free;
6165 
6166 	if (sc->sc_ht_conf.ht_support) {
6167 		sc->sc_recv_action = ic->ic_recv_action;
6168 		ic->ic_recv_action = iwh_recv_action;
6169 		sc->sc_send_action = ic->ic_send_action;
6170 		ic->ic_send_action = iwh_send_action;
6171 
6172 		ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_p.factor;
6173 		ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_p.density;
6174 		ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
6175 	}
6176 }
6177 
6178 /*
6179  * This function sets "RX chain selection" feild
6180  * in RXON command during plumb driver.
6181  */
6182 static void
iwh_config_rxon_chain(iwh_sc_t * sc)6183 iwh_config_rxon_chain(iwh_sc_t *sc)
6184 {
6185 	ieee80211com_t *ic = &sc->sc_ic;
6186 	ieee80211_node_t *in = ic->ic_bss;
6187 
6188 	if (3 == sc->sc_ht_conf.valid_chains) {
6189 		sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6190 		    RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6191 		    RXON_RX_CHAIN_VALID_POS);
6192 
6193 		sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6194 		    RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6195 		    RXON_RX_CHAIN_FORCE_SEL_POS);
6196 
6197 		sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6198 		    RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6199 		    RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6200 	} else {
6201 		sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6202 		    RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_VALID_POS);
6203 
6204 		sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6205 		    RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_FORCE_SEL_POS);
6206 
6207 		sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6208 		    RXON_RX_CHAIN_B_MSK) <<
6209 		    RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6210 	}
6211 
6212 	sc->sc_config.rx_chain |= LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK);
6213 
6214 	if ((in != NULL) &&
6215 	    (in->in_flags & IEEE80211_NODE_HT) &&
6216 	    sc->sc_ht_conf.ht_support) {
6217 		if (3 == sc->sc_ht_conf.valid_chains) {
6218 			sc->sc_config.rx_chain |= LE_16(3 <<
6219 			    RXON_RX_CHAIN_CNT_POS);
6220 			sc->sc_config.rx_chain |= LE_16(3 <<
6221 			    RXON_RX_CHAIN_MIMO_CNT_POS);
6222 		} else {
6223 			sc->sc_config.rx_chain |= LE_16(2 <<
6224 			    RXON_RX_CHAIN_CNT_POS);
6225 			sc->sc_config.rx_chain |= LE_16(2 <<
6226 			    RXON_RX_CHAIN_MIMO_CNT_POS);
6227 		}
6228 
6229 		sc->sc_config.rx_chain |= LE_16(1 <<
6230 		    RXON_RX_CHAIN_MIMO_FORCE_POS);
6231 	}
6232 
6233 	IWH_DBG((IWH_DEBUG_RXON, "iwh_config_rxon_chain(): "
6234 	    "rxon->rx_chain = %x\n", sc->sc_config.rx_chain));
6235 }
6236 
6237 /*
6238  * This function adds AP station into hardware.
6239  */
6240 static int
iwh_add_ap_sta(iwh_sc_t * sc)6241 iwh_add_ap_sta(iwh_sc_t *sc)
6242 {
6243 	ieee80211com_t *ic = &sc->sc_ic;
6244 	ieee80211_node_t *in = ic->ic_bss;
6245 	iwh_add_sta_t node;
6246 	uint32_t ampdu_factor, ampdu_density;
6247 	int err = IWH_FAIL;
6248 
6249 	/*
6250 	 * Add AP node into hardware.
6251 	 */
6252 	(void) memset(&node, 0, sizeof (node));
6253 	IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6254 	node.mode = STA_MODE_ADD_MSK;
6255 	node.sta.sta_id = IWH_AP_ID;
6256 
6257 	if (sc->sc_ht_conf.ht_support &&
6258 	    (in->in_htcap_ie != NULL) &&
6259 	    (in->in_htcap != 0) &&
6260 	    (in->in_htparam != 0)) {
6261 
6262 		if (((in->in_htcap & HT_CAP_MIMO_PS) >> 2)
6263 		    == HT_CAP_MIMO_PS_DYNAMIC) {
6264 			node.station_flags |= LE_32(STA_FLG_RTS_MIMO_PROT);
6265 		}
6266 
6267 		ampdu_factor = in->in_htparam & HT_RX_AMPDU_FACTOR_MSK;
6268 		node.station_flags |=
6269 		    LE_32(ampdu_factor << STA_FLG_MAX_AMPDU_POS);
6270 
6271 		ampdu_density = (in->in_htparam & HT_MPDU_DENSITY_MSK) >>
6272 		    HT_MPDU_DENSITY_POS;
6273 		node.station_flags |=
6274 		    LE_32(ampdu_density << STA_FLG_AMPDU_DENSITY_POS);
6275 
6276 		if (in->in_htcap & LE_16(HT_CAP_SUP_WIDTH)) {
6277 			node.station_flags |=
6278 			    LE_32(STA_FLG_FAT_EN);
6279 		}
6280 	}
6281 
6282 	err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6283 	if (err != IWH_SUCCESS) {
6284 		cmn_err(CE_WARN, "iwh_add_ap_lq(): "
6285 		    "failed to add AP node\n");
6286 		return (err);
6287 	}
6288 
6289 	return (err);
6290 }
6291 
6292 /*
6293  * Each station in the Shirley Peak's internal station table has
6294  * its own table of 16 TX rates and modulation modes for retrying
6295  * TX when an ACK is not received. This function replaces the entire
6296  * table for one station.Station must already be in Shirley Peak's
6297  * station talbe.
6298  */
6299 static int
iwh_ap_lq(iwh_sc_t * sc)6300 iwh_ap_lq(iwh_sc_t *sc)
6301 {
6302 	ieee80211com_t *ic = &sc->sc_ic;
6303 	ieee80211_node_t *in = ic->ic_bss;
6304 	iwh_link_quality_cmd_t link_quality;
6305 	const struct ieee80211_rateset *rs_sup = NULL;
6306 	uint32_t masks = 0, rate;
6307 	int i, err = IWH_FAIL;
6308 
6309 	/*
6310 	 * TX_LINK_QUALITY cmd
6311 	 */
6312 	(void) memset(&link_quality, 0, sizeof (link_quality));
6313 	if (in->in_chan == IEEE80211_CHAN_ANYC)	/* skip null node */
6314 		return (err);
6315 	rs_sup = ieee80211_get_suprates(ic, in->in_chan);
6316 
6317 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6318 		if (i < rs_sup->ir_nrates) {
6319 			rate = rs_sup->ir_rates[rs_sup->ir_nrates - i] &
6320 			    IEEE80211_RATE_VAL;
6321 		} else {
6322 			rate = 2;
6323 		}
6324 
6325 		if (2 == rate || 4 == rate ||
6326 		    11 == rate || 22 == rate) {
6327 			masks |= LE_32(RATE_MCS_CCK_MSK);
6328 		}
6329 
6330 		masks |= LE_32(RATE_MCS_ANT_B_MSK);
6331 
6332 		link_quality.rate_n_flags[i] =
6333 		    LE_32(iwh_rate_to_plcp(rate) | masks);
6334 	}
6335 
6336 	link_quality.general_params.single_stream_ant_msk = LINK_QUAL_ANT_B_MSK;
6337 	link_quality.general_params.dual_stream_ant_msk = LINK_QUAL_ANT_MSK;
6338 	link_quality.agg_params.agg_dis_start_th = 3;
6339 	link_quality.agg_params.agg_time_limit = LE_16(4000);
6340 	link_quality.sta_id = IWH_AP_ID;
6341 	err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
6342 	    sizeof (link_quality), 1);
6343 	if (err != IWH_SUCCESS) {
6344 		cmn_err(CE_WARN, "iwh_ap_lq(): "
6345 		    "failed to config link quality table\n");
6346 		return (err);
6347 	}
6348 
6349 #ifdef	DEBUG
6350 	IWH_DBG((IWH_DEBUG_HWRATE, "iwh_ap_lq(): "
6351 	    "Rates in HW are as follows:\n"));
6352 
6353 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6354 		IWH_DBG((IWH_DEBUG_HWRATE,
6355 		    "Rate %d in HW is %x\n", i, link_quality.rate_n_flags[i]));
6356 	}
6357 #endif
6358 
6359 	return (err);
6360 }
6361 
6362 /*
6363  * When block ACK agreement has been set up between station and AP,
6364  * Net80211 module will call this function to inform hardware about
6365  * informations of this BA agreement.
6366  * When AP wants to delete BA agreement that was originated by it,
6367  * Net80211 modele will call this function to clean up relevant
6368  * information in hardware.
6369  */
6370 static void
iwh_recv_action(struct ieee80211_node * in,const uint8_t * frm,const uint8_t * efrm)6371 iwh_recv_action(struct ieee80211_node *in,
6372     const uint8_t *frm, const uint8_t *efrm)
6373 {
6374 	struct ieee80211com *ic;
6375 	iwh_sc_t *sc;
6376 	const struct ieee80211_action *ia;
6377 	uint16_t baparamset, baseqctl;
6378 	uint32_t tid, ssn;
6379 	iwh_add_sta_t node;
6380 	int err = IWH_FAIL;
6381 
6382 	if ((NULL == in) || (NULL == frm)) {
6383 		return;
6384 	}
6385 
6386 	ic = in->in_ic;
6387 	if (NULL == ic) {
6388 		return;
6389 	}
6390 
6391 	sc = (iwh_sc_t *)ic;
6392 
6393 	sc->sc_recv_action(in, frm, efrm);
6394 
6395 	ia = (const struct ieee80211_action *)frm;
6396 	if (ia->ia_category != IEEE80211_ACTION_CAT_BA) {
6397 		return;
6398 	}
6399 
6400 	switch (ia->ia_action) {
6401 	case IEEE80211_ACTION_BA_ADDBA_REQUEST:
6402 		baparamset = *(uint16_t *)(frm + 3);
6403 		baseqctl = *(uint16_t *)(frm + 7);
6404 
6405 		tid = MS(baparamset, IEEE80211_BAPS_TID);
6406 		ssn = MS(baseqctl, IEEE80211_BASEQ_START);
6407 
6408 		(void) memset(&node, 0, sizeof (node));
6409 		IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6410 		node.mode = STA_MODE_MODIFY_MSK;
6411 		node.sta.sta_id = IWH_AP_ID;
6412 
6413 		node.station_flags_msk = 0;
6414 		node.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
6415 		node.add_immediate_ba_tid = (uint8_t)tid;
6416 		node.add_immediate_ba_ssn = LE_16(ssn);
6417 
6418 		mutex_enter(&sc->sc_glock);
6419 		err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6420 		if (err != IWH_SUCCESS) {
6421 			cmn_err(CE_WARN, "iwh_recv_action(): "
6422 			    "failed to setup RX block ACK\n");
6423 			mutex_exit(&sc->sc_glock);
6424 			return;
6425 		}
6426 		mutex_exit(&sc->sc_glock);
6427 
6428 		IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6429 		    "RX block ACK "
6430 		    "was setup on TID %d and SSN is %d.\n", tid, ssn));
6431 
6432 		return;
6433 
6434 	case IEEE80211_ACTION_BA_DELBA:
6435 		baparamset = *(uint16_t *)(frm + 2);
6436 
6437 		if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) {
6438 			return;
6439 		}
6440 
6441 		tid = MS(baparamset, IEEE80211_DELBAPS_TID);
6442 
6443 		(void) memset(&node, 0, sizeof (node));
6444 		IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6445 		node.mode = STA_MODE_MODIFY_MSK;
6446 		node.sta.sta_id = IWH_AP_ID;
6447 
6448 		node.station_flags_msk = 0;
6449 		node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6450 		node.add_immediate_ba_tid = (uint8_t)tid;
6451 
6452 		mutex_enter(&sc->sc_glock);
6453 		err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6454 		if (err != IWH_SUCCESS) {
6455 			cmn_err(CE_WARN, "iwh_recv_action(): "
6456 			    "failed to delete RX block ACK\n");
6457 			mutex_exit(&sc->sc_glock);
6458 			return;
6459 		}
6460 		mutex_exit(&sc->sc_glock);
6461 
6462 		IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6463 		    "RX block ACK "
6464 		    "was deleted on TID %d.\n", tid));
6465 
6466 		return;
6467 	}
6468 }
6469 
6470 /*
6471  * When local station wants to delete BA agreement that was originated by AP,
6472  * Net80211 module will call this function to clean up relevant information
6473  * in hardware.
6474  */
6475 static int
iwh_send_action(struct ieee80211_node * in,int category,int action,uint16_t args[4])6476 iwh_send_action(struct ieee80211_node *in,
6477     int category, int action, uint16_t args[4])
6478 {
6479 	struct ieee80211com *ic;
6480 	iwh_sc_t *sc;
6481 	uint32_t tid;
6482 	iwh_add_sta_t node;
6483 	int ret = EIO;
6484 	int err = IWH_FAIL;
6485 
6486 
6487 	if (NULL == in) {
6488 		return (ret);
6489 	}
6490 
6491 	ic = in->in_ic;
6492 	if (NULL == ic) {
6493 		return (ret);
6494 	}
6495 
6496 	sc = (iwh_sc_t *)ic;
6497 
6498 	ret = sc->sc_send_action(in, category, action, args);
6499 
6500 	if (category != IEEE80211_ACTION_CAT_BA) {
6501 		return (ret);
6502 	}
6503 
6504 	switch (action) {
6505 	case IEEE80211_ACTION_BA_DELBA:
6506 		if (IEEE80211_DELBAPS_INIT == args[1]) {
6507 			return (ret);
6508 		}
6509 
6510 		tid = args[0];
6511 
6512 		(void) memset(&node, 0, sizeof (node));
6513 		IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6514 		node.mode = STA_MODE_MODIFY_MSK;
6515 		node.sta.sta_id = IWH_AP_ID;
6516 
6517 		node.station_flags_msk = 0;
6518 		node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6519 		node.add_immediate_ba_tid = (uint8_t)tid;
6520 
6521 		mutex_enter(&sc->sc_glock);
6522 		err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6523 		if (err != IWH_SUCCESS) {
6524 			cmn_err(CE_WARN, "iwh_send_action(): "
6525 			    "failed to delete RX balock ACK\n");
6526 			mutex_exit(&sc->sc_glock);
6527 			return (EIO);
6528 		}
6529 		mutex_exit(&sc->sc_glock);
6530 
6531 		IWH_DBG((IWH_DEBUG_BA, "iwh_send_action(): "
6532 		    "RX block ACK "
6533 		    "was deleted on TID %d.\n", tid));
6534 
6535 		break;
6536 	}
6537 
6538 	return (ret);
6539 }
6540 
6541 static int
iwh_reset_hw(iwh_sc_t * sc)6542 iwh_reset_hw(iwh_sc_t *sc)
6543 {
6544 	uint32_t tmp;
6545 	int n;
6546 
6547 	tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6548 	IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6549 	    tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6550 
6551 	/*
6552 	 * wait for HW ready
6553 	 */
6554 	for (n = 0; n < 5; n++) {
6555 		if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6556 		    CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6557 			break;
6558 		}
6559 		DELAY(10);
6560 	}
6561 
6562 	if (n != 5) {
6563 		return (IWH_SUCCESS);
6564 	}
6565 
6566 	tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6567 	IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6568 	    tmp | CSR_HW_IF_CONFIG_REG_BITS_PREPARE);
6569 
6570 	for (n = 0; n < 15000; n++) {
6571 		if (0 == (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6572 		    CSR_HW_IF_CONFIG_REG_BITS_NIC_PREPARE_DONE)) {
6573 			break;
6574 		}
6575 		DELAY(10);
6576 	}
6577 
6578 	if (15000 == n) {
6579 		return (ETIMEDOUT);
6580 	}
6581 
6582 	tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6583 	IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6584 	    tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6585 
6586 	/*
6587 	 * wait for HW ready
6588 	 */
6589 	for (n = 0; n < 5; n++) {
6590 		if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6591 		    CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6592 			break;
6593 		}
6594 		DELAY(10);
6595 	}
6596 
6597 	if (n != 5) {
6598 		return (IWH_SUCCESS);
6599 	} else {
6600 		return (ETIMEDOUT);
6601 	}
6602 }
6603