xref: /titanic_50/usr/src/uts/common/io/iwp/iwp.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2009, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Intel(R) WiFi Link 6000 Driver
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwp_calibration.h"
60 #include "iwp_hw.h"
61 #include "iwp_eeprom.h"
62 #include "iwp_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWP_DEBUG_80211		(1 << 0)
67 #define	IWP_DEBUG_CMD		(1 << 1)
68 #define	IWP_DEBUG_DMA		(1 << 2)
69 #define	IWP_DEBUG_EEPROM	(1 << 3)
70 #define	IWP_DEBUG_FW		(1 << 4)
71 #define	IWP_DEBUG_HW		(1 << 5)
72 #define	IWP_DEBUG_INTR		(1 << 6)
73 #define	IWP_DEBUG_MRR		(1 << 7)
74 #define	IWP_DEBUG_PIO		(1 << 8)
75 #define	IWP_DEBUG_RX		(1 << 9)
76 #define	IWP_DEBUG_SCAN		(1 << 10)
77 #define	IWP_DEBUG_TX		(1 << 11)
78 #define	IWP_DEBUG_RATECTL	(1 << 12)
79 #define	IWP_DEBUG_RADIO		(1 << 13)
80 #define	IWP_DEBUG_RESUME	(1 << 14)
81 #define	IWP_DEBUG_CALIBRATION	(1 << 15)
82 /*
83  * if want to see debug message of a given section,
84  * please set this flag to one of above values
85  */
86 uint32_t iwp_dbg_flags = 0;
87 #define	IWP_DBG(x) \
88 	iwp_dbg x
89 #else
90 #define	IWP_DBG(x)
91 #endif
92 
93 static void	*iwp_soft_state_p = NULL;
94 
95 /*
96  * ucode will be compiled into driver image
97  */
98 static uint8_t iwp_fw_bin [] = {
99 #include "fw-iw/iwp.ucode"
100 };
101 
102 /*
103  * DMA attributes for a shared page
104  */
105 static ddi_dma_attr_t sh_dma_attr = {
106 	DMA_ATTR_V0,	/* version of this structure */
107 	0,		/* lowest usable address */
108 	0xffffffffU,	/* highest usable address */
109 	0xffffffffU,	/* maximum DMAable byte count */
110 	0x1000,		/* alignment in bytes */
111 	0x1000,		/* burst sizes (any?) */
112 	1,		/* minimum transfer */
113 	0xffffffffU,	/* maximum transfer */
114 	0xffffffffU,	/* maximum segment length */
115 	1,		/* maximum number of segments */
116 	1,		/* granularity */
117 	0,		/* flags (reserved) */
118 };
119 
120 /*
121  * DMA attributes for a keep warm DRAM descriptor
122  */
123 static ddi_dma_attr_t kw_dma_attr = {
124 	DMA_ATTR_V0,	/* version of this structure */
125 	0,		/* lowest usable address */
126 	0xffffffffU,	/* highest usable address */
127 	0xffffffffU,	/* maximum DMAable byte count */
128 	0x1000,		/* alignment in bytes */
129 	0x1000,		/* burst sizes (any?) */
130 	1,		/* minimum transfer */
131 	0xffffffffU,	/* maximum transfer */
132 	0xffffffffU,	/* maximum segment length */
133 	1,		/* maximum number of segments */
134 	1,		/* granularity */
135 	0,		/* flags (reserved) */
136 };
137 
138 /*
139  * DMA attributes for a ring descriptor
140  */
141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 	DMA_ATTR_V0,	/* version of this structure */
143 	0,		/* lowest usable address */
144 	0xffffffffU,	/* highest usable address */
145 	0xffffffffU,	/* maximum DMAable byte count */
146 	0x100,		/* alignment in bytes */
147 	0x100,		/* burst sizes (any?) */
148 	1,		/* minimum transfer */
149 	0xffffffffU,	/* maximum transfer */
150 	0xffffffffU,	/* maximum segment length */
151 	1,		/* maximum number of segments */
152 	1,		/* granularity */
153 	0,		/* flags (reserved) */
154 };
155 
156 /*
157  * DMA attributes for a cmd
158  */
159 static ddi_dma_attr_t cmd_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	4,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a rx buffer
176  */
177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 	DMA_ATTR_V0,	/* version of this structure */
179 	0,		/* lowest usable address */
180 	0xffffffffU,	/* highest usable address */
181 	0xffffffffU,	/* maximum DMAable byte count */
182 	0x100,		/* alignment in bytes */
183 	0x100,		/* burst sizes (any?) */
184 	1,		/* minimum transfer */
185 	0xffffffffU,	/* maximum transfer */
186 	0xffffffffU,	/* maximum segment length */
187 	1,		/* maximum number of segments */
188 	1,		/* granularity */
189 	0,		/* flags (reserved) */
190 };
191 
192 /*
193  * DMA attributes for a tx buffer.
194  * the maximum number of segments is 4 for the hardware.
195  * now all the wifi drivers put the whole frame in a single
196  * descriptor, so we define the maximum  number of segments 1,
197  * just the same as the rx_buffer. we consider leverage the HW
198  * ability in the future, that is why we don't define rx and tx
199  * buffer_dma_attr as the same.
200  */
201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 	DMA_ATTR_V0,	/* version of this structure */
203 	0,		/* lowest usable address */
204 	0xffffffffU,	/* highest usable address */
205 	0xffffffffU,	/* maximum DMAable byte count */
206 	4,		/* alignment in bytes */
207 	0x100,		/* burst sizes (any?) */
208 	1,		/* minimum transfer */
209 	0xffffffffU,	/* maximum transfer */
210 	0xffffffffU,	/* maximum segment length */
211 	1,		/* maximum number of segments */
212 	1,		/* granularity */
213 	0,		/* flags (reserved) */
214 };
215 
216 /*
217  * DMA attributes for text and data part in the firmware
218  */
219 static ddi_dma_attr_t fw_dma_attr = {
220 	DMA_ATTR_V0,	/* version of this structure */
221 	0,		/* lowest usable address */
222 	0xffffffffU,	/* highest usable address */
223 	0x7fffffff,	/* maximum DMAable byte count */
224 	0x10,		/* alignment in bytes */
225 	0x100,		/* burst sizes (any?) */
226 	1,		/* minimum transfer */
227 	0xffffffffU,	/* maximum transfer */
228 	0xffffffffU,	/* maximum segment length */
229 	1,		/* maximum number of segments */
230 	1,		/* granularity */
231 	0,		/* flags (reserved) */
232 };
233 
234 /*
235  * regs access attributes
236  */
237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 	DDI_DEVICE_ATTR_V0,
239 	DDI_STRUCTURE_LE_ACC,
240 	DDI_STRICTORDER_ACC,
241 	DDI_DEFAULT_ACC
242 };
243 
244 /*
245  * DMA access attributes for descriptor
246  */
247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 	DDI_DEVICE_ATTR_V0,
249 	DDI_STRUCTURE_LE_ACC,
250 	DDI_STRICTORDER_ACC,
251 	DDI_DEFAULT_ACC
252 };
253 
254 /*
255  * DMA access attributes
256  */
257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 	DDI_DEVICE_ATTR_V0,
259 	DDI_NEVERSWAP_ACC,
260 	DDI_STRICTORDER_ACC,
261 	DDI_DEFAULT_ACC
262 };
263 
264 static int	iwp_ring_init(iwp_sc_t *);
265 static void	iwp_ring_free(iwp_sc_t *);
266 static int	iwp_alloc_shared(iwp_sc_t *);
267 static void	iwp_free_shared(iwp_sc_t *);
268 static int	iwp_alloc_kw(iwp_sc_t *);
269 static void	iwp_free_kw(iwp_sc_t *);
270 static int	iwp_alloc_fw_dma(iwp_sc_t *);
271 static void	iwp_free_fw_dma(iwp_sc_t *);
272 static int	iwp_alloc_rx_ring(iwp_sc_t *);
273 static void	iwp_reset_rx_ring(iwp_sc_t *);
274 static void	iwp_free_rx_ring(iwp_sc_t *);
275 static int	iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276     int, int);
277 static void	iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 static void	iwp_free_tx_ring(iwp_tx_ring_t *);
279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 static void	iwp_node_free(ieee80211_node_t *);
281 static int	iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 static void	iwp_mac_access_enter(iwp_sc_t *);
283 static void	iwp_mac_access_exit(iwp_sc_t *);
284 static uint32_t	iwp_reg_read(iwp_sc_t *, uint32_t);
285 static void	iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 static int	iwp_load_init_firmware(iwp_sc_t *);
287 static int	iwp_load_run_firmware(iwp_sc_t *);
288 static void	iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 static void	iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 static uint_t   iwp_intr(caddr_t, caddr_t);
291 static int	iwp_eep_load(iwp_sc_t *);
292 static void	iwp_get_mac_from_eep(iwp_sc_t *);
293 static int	iwp_eep_sem_down(iwp_sc_t *);
294 static void	iwp_eep_sem_up(iwp_sc_t *);
295 static uint_t   iwp_rx_softintr(caddr_t, caddr_t);
296 static uint8_t	iwp_rate_to_plcp(int);
297 static int	iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 static void	iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 static int	iwp_hw_set_before_auth(iwp_sc_t *);
300 static int	iwp_scan(iwp_sc_t *);
301 static int	iwp_config(iwp_sc_t *);
302 static void	iwp_stop_master(iwp_sc_t *);
303 static int	iwp_power_up(iwp_sc_t *);
304 static int	iwp_preinit(iwp_sc_t *);
305 static int	iwp_init(iwp_sc_t *);
306 static void	iwp_stop(iwp_sc_t *);
307 static int	iwp_quiesce(dev_info_t *t);
308 static void	iwp_amrr_init(iwp_amrr_t *);
309 static void	iwp_amrr_timeout(iwp_sc_t *);
310 static void	iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 static void	iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 static void	iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 static void	iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 static void	iwp_release_calib_buffer(iwp_sc_t *);
315 static int	iwp_init_common(iwp_sc_t *);
316 static uint8_t	*iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 static int	iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 static	int	iwp_alive_common(iwp_sc_t *);
319 static void	iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 static int	iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 static int	iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 static void	iwp_destroy_locks(iwp_sc_t *);
323 static int	iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 static void	iwp_thread(iwp_sc_t *);
325 static int	iwp_run_state_config(iwp_sc_t *);
326 static int	iwp_fast_recover(iwp_sc_t *);
327 static void	iwp_overwrite_ic_default(iwp_sc_t *);
328 static int	iwp_add_ap_sta(iwp_sc_t *);
329 static int	iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330     ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331     uint_t, iwp_dma_t *);
332 static void	iwp_free_dma_mem(iwp_dma_t *);
333 static int	iwp_eep_ver_chk(iwp_sc_t *);
334 static void	iwp_set_chip_param(iwp_sc_t *);
335 
336 /*
337  * GLD specific operations
338  */
339 static int	iwp_m_stat(void *, uint_t, uint64_t *);
340 static int	iwp_m_start(void *);
341 static void	iwp_m_stop(void *);
342 static int	iwp_m_unicst(void *, const uint8_t *);
343 static int	iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 static int	iwp_m_promisc(void *, boolean_t);
345 static mblk_t	*iwp_m_tx(void *, mblk_t *);
346 static void	iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 static int	iwp_m_setprop(void *arg, const char *pr_name,
348     mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 static int	iwp_m_getprop(void *arg, const char *pr_name,
350     mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
351 static void	iwp_m_propinfo(void *, const char *, mac_prop_id_t,
352     mac_prop_info_handle_t);
353 
354 /*
355  * Supported rates for 802.11b/g modes (in 500Kbps unit).
356  */
357 static const struct ieee80211_rateset iwp_rateset_11b =
358 	{ 4, { 2, 4, 11, 22 } };
359 
360 static const struct ieee80211_rateset iwp_rateset_11g =
361 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
362 
363 /*
364  * For mfthread only
365  */
366 extern pri_t minclsyspri;
367 
368 #define	DRV_NAME_SP	"iwp"
369 
370 /*
371  * Module Loading Data & Entry Points
372  */
373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
374     iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
375 
376 static struct modldrv iwp_modldrv = {
377 	&mod_driverops,
378 	"Intel(R) PumaPeak driver(N)",
379 	&iwp_devops
380 };
381 
382 static struct modlinkage iwp_modlinkage = {
383 	MODREV_1,
384 	&iwp_modldrv,
385 	NULL
386 };
387 
388 int
_init(void)389 _init(void)
390 {
391 	int	status;
392 
393 	status = ddi_soft_state_init(&iwp_soft_state_p,
394 	    sizeof (iwp_sc_t), 1);
395 	if (status != DDI_SUCCESS) {
396 		return (status);
397 	}
398 
399 	mac_init_ops(&iwp_devops, DRV_NAME_SP);
400 	status = mod_install(&iwp_modlinkage);
401 	if (status != DDI_SUCCESS) {
402 		mac_fini_ops(&iwp_devops);
403 		ddi_soft_state_fini(&iwp_soft_state_p);
404 	}
405 
406 	return (status);
407 }
408 
409 int
_fini(void)410 _fini(void)
411 {
412 	int status;
413 
414 	status = mod_remove(&iwp_modlinkage);
415 	if (DDI_SUCCESS == status) {
416 		mac_fini_ops(&iwp_devops);
417 		ddi_soft_state_fini(&iwp_soft_state_p);
418 	}
419 
420 	return (status);
421 }
422 
423 int
_info(struct modinfo * mip)424 _info(struct modinfo *mip)
425 {
426 	return (mod_info(&iwp_modlinkage, mip));
427 }
428 
429 /*
430  * Mac Call Back entries
431  */
432 mac_callbacks_t	iwp_m_callbacks = {
433 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
434 	iwp_m_stat,
435 	iwp_m_start,
436 	iwp_m_stop,
437 	iwp_m_promisc,
438 	iwp_m_multicst,
439 	iwp_m_unicst,
440 	iwp_m_tx,
441 	NULL,
442 	iwp_m_ioctl,
443 	NULL,
444 	NULL,
445 	NULL,
446 	iwp_m_setprop,
447 	iwp_m_getprop,
448 	iwp_m_propinfo
449 };
450 
451 #ifdef DEBUG
452 void
iwp_dbg(uint32_t flags,const char * fmt,...)453 iwp_dbg(uint32_t flags, const char *fmt, ...)
454 {
455 	va_list	ap;
456 
457 	if (flags & iwp_dbg_flags) {
458 		va_start(ap, fmt);
459 		vcmn_err(CE_NOTE, fmt, ap);
460 		va_end(ap);
461 	}
462 }
463 #endif	/* DEBUG */
464 
465 /*
466  * device operations
467  */
468 int
iwp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)469 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
470 {
471 	iwp_sc_t		*sc;
472 	ieee80211com_t		*ic;
473 	int			instance, i;
474 	char			strbuf[32];
475 	wifi_data_t		wd = { 0 };
476 	mac_register_t		*macp;
477 	int			intr_type;
478 	int			intr_count;
479 	int			intr_actual;
480 	int			err = DDI_FAILURE;
481 
482 	switch (cmd) {
483 	case DDI_ATTACH:
484 		break;
485 	case DDI_RESUME:
486 		instance = ddi_get_instance(dip);
487 		sc = ddi_get_soft_state(iwp_soft_state_p,
488 		    instance);
489 		ASSERT(sc != NULL);
490 
491 		if (sc->sc_flags & IWP_F_RUNNING) {
492 			(void) iwp_init(sc);
493 		}
494 
495 		atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
496 
497 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
498 		    "resume\n"));
499 		return (DDI_SUCCESS);
500 	default:
501 		goto attach_fail1;
502 	}
503 
504 	instance = ddi_get_instance(dip);
505 	err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
506 	if (err != DDI_SUCCESS) {
507 		cmn_err(CE_WARN, "iwp_attach(): "
508 		    "failed to allocate soft state\n");
509 		goto attach_fail1;
510 	}
511 
512 	sc = ddi_get_soft_state(iwp_soft_state_p, instance);
513 	ASSERT(sc != NULL);
514 
515 	sc->sc_dip = dip;
516 
517 	/*
518 	 * map configure space
519 	 */
520 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
521 	    &iwp_reg_accattr, &sc->sc_cfg_handle);
522 	if (err != DDI_SUCCESS) {
523 		cmn_err(CE_WARN, "iwp_attach(): "
524 		    "failed to map config spaces regs\n");
525 		goto attach_fail2;
526 	}
527 
528 	sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
529 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
530 	if ((sc->sc_dev_id != 0x422B) &&
531 	    (sc->sc_dev_id != 0x422C) &&
532 	    (sc->sc_dev_id != 0x4238) &&
533 	    (sc->sc_dev_id != 0x4239) &&
534 	    (sc->sc_dev_id != 0x008d) &&
535 	    (sc->sc_dev_id != 0x008e)) {
536 		cmn_err(CE_WARN, "iwp_attach(): "
537 		    "Do not support this device\n");
538 		goto attach_fail3;
539 	}
540 
541 	iwp_set_chip_param(sc);
542 
543 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
544 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
545 
546 	/*
547 	 * keep from disturbing C3 state of CPU
548 	 */
549 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
550 	    PCI_CFG_RETRY_TIMEOUT), 0);
551 
552 	/*
553 	 * determine the size of buffer for frame and command to ucode
554 	 */
555 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
556 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
557 	if (!sc->sc_clsz) {
558 		sc->sc_clsz = 16;
559 	}
560 	sc->sc_clsz = (sc->sc_clsz << 2);
561 
562 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
563 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
564 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
565 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
566 
567 	/*
568 	 * Map operating registers
569 	 */
570 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
571 	    0, 0, &iwp_reg_accattr, &sc->sc_handle);
572 	if (err != DDI_SUCCESS) {
573 		cmn_err(CE_WARN, "iwp_attach(): "
574 		    "failed to map device regs\n");
575 		goto attach_fail3;
576 	}
577 
578 	/*
579 	 * this is used to differentiate type of hardware
580 	 */
581 	sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
582 
583 	err = ddi_intr_get_supported_types(dip, &intr_type);
584 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
585 		cmn_err(CE_WARN, "iwp_attach(): "
586 		    "fixed type interrupt is not supported\n");
587 		goto attach_fail4;
588 	}
589 
590 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
591 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
592 		cmn_err(CE_WARN, "iwp_attach(): "
593 		    "no fixed interrupts\n");
594 		goto attach_fail4;
595 	}
596 
597 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
598 
599 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
600 	    intr_count, &intr_actual, 0);
601 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
602 		cmn_err(CE_WARN, "iwp_attach(): "
603 		    "ddi_intr_alloc() failed 0x%x\n", err);
604 		goto attach_fail5;
605 	}
606 
607 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
608 	if (err != DDI_SUCCESS) {
609 		cmn_err(CE_WARN, "iwp_attach(): "
610 		    "ddi_intr_get_pri() failed 0x%x\n", err);
611 		goto attach_fail6;
612 	}
613 
614 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
615 	    DDI_INTR_PRI(sc->sc_intr_pri));
616 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
617 	    DDI_INTR_PRI(sc->sc_intr_pri));
618 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
619 	    DDI_INTR_PRI(sc->sc_intr_pri));
620 
621 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
622 	cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
623 	cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
624 
625 	/*
626 	 * initialize the mfthread
627 	 */
628 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
629 	sc->sc_mf_thread = NULL;
630 	sc->sc_mf_thread_switch = 0;
631 
632 	/*
633 	 * Allocate shared buffer for communication between driver and ucode.
634 	 */
635 	err = iwp_alloc_shared(sc);
636 	if (err != DDI_SUCCESS) {
637 		cmn_err(CE_WARN, "iwp_attach(): "
638 		    "failed to allocate shared page\n");
639 		goto attach_fail7;
640 	}
641 
642 	(void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
643 
644 	/*
645 	 * Allocate keep warm page.
646 	 */
647 	err = iwp_alloc_kw(sc);
648 	if (err != DDI_SUCCESS) {
649 		cmn_err(CE_WARN, "iwp_attach(): "
650 		    "failed to allocate keep warm page\n");
651 		goto attach_fail8;
652 	}
653 
654 	/*
655 	 * Do some necessary hardware initializations.
656 	 */
657 	err = iwp_preinit(sc);
658 	if (err != IWP_SUCCESS) {
659 		cmn_err(CE_WARN, "iwp_attach(): "
660 		    "failed to initialize hardware\n");
661 		goto attach_fail9;
662 	}
663 
664 	/*
665 	 * get hardware configurations from eeprom
666 	 */
667 	err = iwp_eep_load(sc);
668 	if (err != IWP_SUCCESS) {
669 		cmn_err(CE_WARN, "iwp_attach(): "
670 		    "failed to load eeprom\n");
671 		goto attach_fail9;
672 	}
673 
674 	/*
675 	 * calibration information from EEPROM
676 	 */
677 	sc->sc_eep_calib = (struct iwp_eep_calibration *)
678 	    iwp_eep_addr_trans(sc, EEP_CALIBRATION);
679 
680 	err = iwp_eep_ver_chk(sc);
681 	if (err != IWP_SUCCESS) {
682 		goto attach_fail9;
683 	}
684 
685 	/*
686 	 * get MAC address of this chipset
687 	 */
688 	iwp_get_mac_from_eep(sc);
689 
690 
691 	/*
692 	 * initialize TX and RX ring buffers
693 	 */
694 	err = iwp_ring_init(sc);
695 	if (err != DDI_SUCCESS) {
696 		cmn_err(CE_WARN, "iwp_attach(): "
697 		    "failed to allocate and initialize ring\n");
698 		goto attach_fail9;
699 	}
700 
701 	sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
702 
703 	/*
704 	 * copy ucode to dma buffer
705 	 */
706 	err = iwp_alloc_fw_dma(sc);
707 	if (err != DDI_SUCCESS) {
708 		cmn_err(CE_WARN, "iwp_attach(): "
709 		    "failed to allocate firmware dma\n");
710 		goto attach_fail10;
711 	}
712 
713 	/*
714 	 * Initialize the wifi part, which will be used by
715 	 * 802.11 module
716 	 */
717 	ic = &sc->sc_ic;
718 	ic->ic_phytype  = IEEE80211_T_OFDM;
719 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
720 	ic->ic_state    = IEEE80211_S_INIT;
721 	ic->ic_maxrssi  = 100; /* experimental number */
722 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
723 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
724 
725 	/*
726 	 * Support WPA/WPA2
727 	 */
728 	ic->ic_caps |= IEEE80211_C_WPA;
729 
730 	/*
731 	 * set supported .11b and .11g rates
732 	 */
733 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
734 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
735 
736 	/*
737 	 * set supported .11b and .11g channels (1 through 11)
738 	 */
739 	for (i = 1; i <= 11; i++) {
740 		ic->ic_sup_channels[i].ich_freq =
741 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
742 		ic->ic_sup_channels[i].ich_flags =
743 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
744 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
745 		    IEEE80211_CHAN_PASSIVE;
746 	}
747 
748 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
749 	ic->ic_xmit = iwp_send;
750 
751 	/*
752 	 * attach to 802.11 module
753 	 */
754 	ieee80211_attach(ic);
755 
756 	/*
757 	 * different instance has different WPA door
758 	 */
759 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
760 	    ddi_driver_name(dip),
761 	    ddi_get_instance(dip));
762 
763 	/*
764 	 * Overwrite 80211 default configurations.
765 	 */
766 	iwp_overwrite_ic_default(sc);
767 
768 	/*
769 	 * initialize 802.11 module
770 	 */
771 	ieee80211_media_init(ic);
772 
773 	/*
774 	 * initialize default tx key
775 	 */
776 	ic->ic_def_txkey = 0;
777 
778 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
779 	    iwp_rx_softintr, (caddr_t)sc);
780 	if (err != DDI_SUCCESS) {
781 		cmn_err(CE_WARN, "iwp_attach(): "
782 		    "add soft interrupt failed\n");
783 		goto attach_fail12;
784 	}
785 
786 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
787 	    (caddr_t)sc, NULL);
788 	if (err != DDI_SUCCESS) {
789 		cmn_err(CE_WARN, "iwp_attach(): "
790 		    "ddi_intr_add_handle() failed\n");
791 		goto attach_fail13;
792 	}
793 
794 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
795 	if (err != DDI_SUCCESS) {
796 		cmn_err(CE_WARN, "iwp_attach(): "
797 		    "ddi_intr_enable() failed\n");
798 		goto attach_fail14;
799 	}
800 
801 	/*
802 	 * Initialize pointer to device specific functions
803 	 */
804 	wd.wd_secalloc = WIFI_SEC_NONE;
805 	wd.wd_opmode = ic->ic_opmode;
806 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
807 
808 	/*
809 	 * create relation to GLD
810 	 */
811 	macp = mac_alloc(MAC_VERSION);
812 	if (NULL == macp) {
813 		cmn_err(CE_WARN, "iwp_attach(): "
814 		    "failed to do mac_alloc()\n");
815 		goto attach_fail15;
816 	}
817 
818 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
819 	macp->m_driver		= sc;
820 	macp->m_dip		= dip;
821 	macp->m_src_addr	= ic->ic_macaddr;
822 	macp->m_callbacks	= &iwp_m_callbacks;
823 	macp->m_min_sdu		= 0;
824 	macp->m_max_sdu		= IEEE80211_MTU;
825 	macp->m_pdata		= &wd;
826 	macp->m_pdata_size	= sizeof (wd);
827 
828 	/*
829 	 * Register the macp to mac
830 	 */
831 	err = mac_register(macp, &ic->ic_mach);
832 	mac_free(macp);
833 	if (err != DDI_SUCCESS) {
834 		cmn_err(CE_WARN, "iwp_attach(): "
835 		    "failed to do mac_register()\n");
836 		goto attach_fail15;
837 	}
838 
839 	/*
840 	 * Create minor node of type DDI_NT_NET_WIFI
841 	 */
842 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
843 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
844 	    instance + 1, DDI_NT_NET_WIFI, 0);
845 	if (err != DDI_SUCCESS) {
846 		cmn_err(CE_WARN, "iwp_attach(): "
847 		    "failed to do ddi_create_minor_node()\n");
848 	}
849 
850 	/*
851 	 * Notify link is down now
852 	 */
853 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
854 
855 	/*
856 	 * create the mf thread to handle the link status,
857 	 * recovery fatal error, etc.
858 	 */
859 	sc->sc_mf_thread_switch = 1;
860 	if (NULL == sc->sc_mf_thread) {
861 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
862 		    iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
863 	}
864 
865 	atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
866 
867 	return (DDI_SUCCESS);
868 
869 attach_fail15:
870 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
871 attach_fail14:
872 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
873 attach_fail13:
874 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
875 	sc->sc_soft_hdl = NULL;
876 attach_fail12:
877 	ieee80211_detach(ic);
878 attach_fail11:
879 	iwp_free_fw_dma(sc);
880 attach_fail10:
881 	iwp_ring_free(sc);
882 attach_fail9:
883 	iwp_free_kw(sc);
884 attach_fail8:
885 	iwp_free_shared(sc);
886 attach_fail7:
887 	iwp_destroy_locks(sc);
888 attach_fail6:
889 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
890 attach_fail5:
891 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
892 attach_fail4:
893 	ddi_regs_map_free(&sc->sc_handle);
894 attach_fail3:
895 	ddi_regs_map_free(&sc->sc_cfg_handle);
896 attach_fail2:
897 	ddi_soft_state_free(iwp_soft_state_p, instance);
898 attach_fail1:
899 	return (DDI_FAILURE);
900 }
901 
902 int
iwp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)903 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904 {
905 	iwp_sc_t *sc;
906 	ieee80211com_t	*ic;
907 	int err;
908 
909 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
910 	ASSERT(sc != NULL);
911 	ic = &sc->sc_ic;
912 
913 	switch (cmd) {
914 	case DDI_DETACH:
915 		break;
916 	case DDI_SUSPEND:
917 		atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
918 		atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
919 
920 		atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
921 
922 		if (sc->sc_flags & IWP_F_RUNNING) {
923 			iwp_stop(sc);
924 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
925 
926 		}
927 
928 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
929 		    "suspend\n"));
930 		return (DDI_SUCCESS);
931 	default:
932 		return (DDI_FAILURE);
933 	}
934 
935 	if (!(sc->sc_flags & IWP_F_ATTACHED)) {
936 		return (DDI_FAILURE);
937 	}
938 
939 	/*
940 	 * Destroy the mf_thread
941 	 */
942 	sc->sc_mf_thread_switch = 0;
943 
944 	mutex_enter(&sc->sc_mt_lock);
945 	while (sc->sc_mf_thread != NULL) {
946 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
947 			break;
948 		}
949 	}
950 	mutex_exit(&sc->sc_mt_lock);
951 
952 	err = mac_disable(sc->sc_ic.ic_mach);
953 	if (err != DDI_SUCCESS) {
954 		return (err);
955 	}
956 
957 	/*
958 	 * stop chipset
959 	 */
960 	iwp_stop(sc);
961 
962 	DELAY(500000);
963 
964 	/*
965 	 * release buffer for calibration
966 	 */
967 	iwp_release_calib_buffer(sc);
968 
969 	/*
970 	 * Unregiste from GLD
971 	 */
972 	(void) mac_unregister(sc->sc_ic.ic_mach);
973 
974 	mutex_enter(&sc->sc_glock);
975 	iwp_free_fw_dma(sc);
976 	iwp_ring_free(sc);
977 	iwp_free_kw(sc);
978 	iwp_free_shared(sc);
979 	mutex_exit(&sc->sc_glock);
980 
981 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
982 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
983 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
984 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
985 
986 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
987 	sc->sc_soft_hdl = NULL;
988 
989 	/*
990 	 * detach from 80211 module
991 	 */
992 	ieee80211_detach(&sc->sc_ic);
993 
994 	iwp_destroy_locks(sc);
995 
996 	ddi_regs_map_free(&sc->sc_handle);
997 	ddi_regs_map_free(&sc->sc_cfg_handle);
998 	ddi_remove_minor_node(dip, NULL);
999 	ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1000 
1001 	return (DDI_SUCCESS);
1002 }
1003 
1004 /*
1005  * destroy all locks
1006  */
1007 static void
iwp_destroy_locks(iwp_sc_t * sc)1008 iwp_destroy_locks(iwp_sc_t *sc)
1009 {
1010 	cv_destroy(&sc->sc_mt_cv);
1011 	cv_destroy(&sc->sc_cmd_cv);
1012 	cv_destroy(&sc->sc_put_seg_cv);
1013 	cv_destroy(&sc->sc_ucode_cv);
1014 	mutex_destroy(&sc->sc_mt_lock);
1015 	mutex_destroy(&sc->sc_tx_lock);
1016 	mutex_destroy(&sc->sc_glock);
1017 }
1018 
1019 /*
1020  * Allocate an area of memory and a DMA handle for accessing it
1021  */
1022 static int
iwp_alloc_dma_mem(iwp_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwp_dma_t * dma_p)1023 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1024     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1025     uint_t dma_flags, iwp_dma_t *dma_p)
1026 {
1027 	caddr_t vaddr;
1028 	int err = DDI_FAILURE;
1029 
1030 	/*
1031 	 * Allocate handle
1032 	 */
1033 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1034 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1035 	if (err != DDI_SUCCESS) {
1036 		dma_p->dma_hdl = NULL;
1037 		return (DDI_FAILURE);
1038 	}
1039 
1040 	/*
1041 	 * Allocate memory
1042 	 */
1043 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1044 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1045 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1046 	if (err != DDI_SUCCESS) {
1047 		ddi_dma_free_handle(&dma_p->dma_hdl);
1048 		dma_p->dma_hdl = NULL;
1049 		dma_p->acc_hdl = NULL;
1050 		return (DDI_FAILURE);
1051 	}
1052 
1053 	/*
1054 	 * Bind the two together
1055 	 */
1056 	dma_p->mem_va = vaddr;
1057 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1058 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1059 	    &dma_p->cookie, &dma_p->ncookies);
1060 	if (err != DDI_DMA_MAPPED) {
1061 		ddi_dma_mem_free(&dma_p->acc_hdl);
1062 		ddi_dma_free_handle(&dma_p->dma_hdl);
1063 		dma_p->acc_hdl = NULL;
1064 		dma_p->dma_hdl = NULL;
1065 		return (DDI_FAILURE);
1066 	}
1067 
1068 	dma_p->nslots = ~0U;
1069 	dma_p->size = ~0U;
1070 	dma_p->token = ~0U;
1071 	dma_p->offset = 0;
1072 	return (DDI_SUCCESS);
1073 }
1074 
1075 /*
1076  * Free one allocated area of DMAable memory
1077  */
1078 static void
iwp_free_dma_mem(iwp_dma_t * dma_p)1079 iwp_free_dma_mem(iwp_dma_t *dma_p)
1080 {
1081 	if (dma_p->dma_hdl != NULL) {
1082 		if (dma_p->ncookies) {
1083 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1084 			dma_p->ncookies = 0;
1085 		}
1086 		ddi_dma_free_handle(&dma_p->dma_hdl);
1087 		dma_p->dma_hdl = NULL;
1088 	}
1089 
1090 	if (dma_p->acc_hdl != NULL) {
1091 		ddi_dma_mem_free(&dma_p->acc_hdl);
1092 		dma_p->acc_hdl = NULL;
1093 	}
1094 }
1095 
1096 /*
1097  * copy ucode into dma buffers
1098  */
1099 static int
iwp_alloc_fw_dma(iwp_sc_t * sc)1100 iwp_alloc_fw_dma(iwp_sc_t *sc)
1101 {
1102 	int err = DDI_FAILURE;
1103 	iwp_dma_t *dma_p;
1104 	char *t;
1105 
1106 	/*
1107 	 * firmware image layout:
1108 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1109 	 */
1110 
1111 	/*
1112 	 * Check firmware image size.
1113 	 */
1114 	if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1115 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1116 		    "firmware init text size 0x%x is too large\n",
1117 		    LE_32(sc->sc_hdr->init_textsz));
1118 
1119 		goto fail;
1120 	}
1121 
1122 	if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1123 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1124 		    "firmware init data size 0x%x is too large\n",
1125 		    LE_32(sc->sc_hdr->init_datasz));
1126 
1127 		goto fail;
1128 	}
1129 
1130 	if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1131 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1132 		    "firmware text size 0x%x is too large\n",
1133 		    LE_32(sc->sc_hdr->textsz));
1134 
1135 		goto fail;
1136 	}
1137 
1138 	if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1139 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1140 		    "firmware data size 0x%x is too large\n",
1141 		    LE_32(sc->sc_hdr->datasz));
1142 
1143 		goto fail;
1144 	}
1145 
1146 	/*
1147 	 * copy text of runtime ucode
1148 	 */
1149 	t = (char *)(sc->sc_hdr + 1);
1150 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1151 	    &fw_dma_attr, &iwp_dma_accattr,
1152 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1153 	    &sc->sc_dma_fw_text);
1154 	if (err != DDI_SUCCESS) {
1155 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1156 		    "failed to allocate text dma memory.\n");
1157 		goto fail;
1158 	}
1159 
1160 	dma_p = &sc->sc_dma_fw_text;
1161 
1162 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1163 	    "text[ncookies:%d addr:%lx size:%lx]\n",
1164 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1165 	    dma_p->cookie.dmac_size));
1166 
1167 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1168 
1169 	/*
1170 	 * copy data and bak-data of runtime ucode
1171 	 */
1172 	t += LE_32(sc->sc_hdr->textsz);
1173 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1174 	    &fw_dma_attr, &iwp_dma_accattr,
1175 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1176 	    &sc->sc_dma_fw_data);
1177 	if (err != DDI_SUCCESS) {
1178 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1179 		    "failed to allocate data dma memory\n");
1180 		goto fail;
1181 	}
1182 
1183 	dma_p = &sc->sc_dma_fw_data;
1184 
1185 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1186 	    "data[ncookies:%d addr:%lx size:%lx]\n",
1187 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1188 	    dma_p->cookie.dmac_size));
1189 
1190 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1191 
1192 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1193 	    &fw_dma_attr, &iwp_dma_accattr,
1194 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1195 	    &sc->sc_dma_fw_data_bak);
1196 	if (err != DDI_SUCCESS) {
1197 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1198 		    "failed to allocate data bakup dma memory\n");
1199 		goto fail;
1200 	}
1201 
1202 	dma_p = &sc->sc_dma_fw_data_bak;
1203 
1204 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1205 	    "data_bak[ncookies:%d addr:%lx "
1206 	    "size:%lx]\n",
1207 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1208 	    dma_p->cookie.dmac_size));
1209 
1210 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1211 
1212 	/*
1213 	 * copy text of init ucode
1214 	 */
1215 	t += LE_32(sc->sc_hdr->datasz);
1216 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1217 	    &fw_dma_attr, &iwp_dma_accattr,
1218 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1219 	    &sc->sc_dma_fw_init_text);
1220 	if (err != DDI_SUCCESS) {
1221 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1222 		    "failed to allocate init text dma memory\n");
1223 		goto fail;
1224 	}
1225 
1226 	dma_p = &sc->sc_dma_fw_init_text;
1227 
1228 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1229 	    "init_text[ncookies:%d addr:%lx "
1230 	    "size:%lx]\n",
1231 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1232 	    dma_p->cookie.dmac_size));
1233 
1234 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1235 
1236 	/*
1237 	 * copy data of init ucode
1238 	 */
1239 	t += LE_32(sc->sc_hdr->init_textsz);
1240 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1241 	    &fw_dma_attr, &iwp_dma_accattr,
1242 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1243 	    &sc->sc_dma_fw_init_data);
1244 	if (err != DDI_SUCCESS) {
1245 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1246 		    "failed to allocate init data dma memory\n");
1247 		goto fail;
1248 	}
1249 
1250 	dma_p = &sc->sc_dma_fw_init_data;
1251 
1252 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1253 	    "init_data[ncookies:%d addr:%lx "
1254 	    "size:%lx]\n",
1255 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1256 	    dma_p->cookie.dmac_size));
1257 
1258 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1259 
1260 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1261 fail:
1262 	return (err);
1263 }
1264 
1265 static void
iwp_free_fw_dma(iwp_sc_t * sc)1266 iwp_free_fw_dma(iwp_sc_t *sc)
1267 {
1268 	iwp_free_dma_mem(&sc->sc_dma_fw_text);
1269 	iwp_free_dma_mem(&sc->sc_dma_fw_data);
1270 	iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1271 	iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1272 	iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1273 }
1274 
1275 /*
1276  * Allocate a shared buffer between host and NIC.
1277  */
1278 static int
iwp_alloc_shared(iwp_sc_t * sc)1279 iwp_alloc_shared(iwp_sc_t *sc)
1280 {
1281 #ifdef	DEBUG
1282 	iwp_dma_t *dma_p;
1283 #endif
1284 	int err = DDI_FAILURE;
1285 
1286 	/*
1287 	 * must be aligned on a 4K-page boundary
1288 	 */
1289 	err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1290 	    &sh_dma_attr, &iwp_dma_descattr,
1291 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1292 	    &sc->sc_dma_sh);
1293 	if (err != DDI_SUCCESS) {
1294 		goto fail;
1295 	}
1296 
1297 	sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1298 
1299 #ifdef	DEBUG
1300 	dma_p = &sc->sc_dma_sh;
1301 #endif
1302 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1303 	    "sh[ncookies:%d addr:%lx size:%lx]\n",
1304 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1305 	    dma_p->cookie.dmac_size));
1306 
1307 	return (err);
1308 fail:
1309 	iwp_free_shared(sc);
1310 	return (err);
1311 }
1312 
1313 static void
iwp_free_shared(iwp_sc_t * sc)1314 iwp_free_shared(iwp_sc_t *sc)
1315 {
1316 	iwp_free_dma_mem(&sc->sc_dma_sh);
1317 }
1318 
1319 /*
1320  * Allocate a keep warm page.
1321  */
1322 static int
iwp_alloc_kw(iwp_sc_t * sc)1323 iwp_alloc_kw(iwp_sc_t *sc)
1324 {
1325 #ifdef	DEBUG
1326 	iwp_dma_t *dma_p;
1327 #endif
1328 	int err = DDI_FAILURE;
1329 
1330 	/*
1331 	 * must be aligned on a 4K-page boundary
1332 	 */
1333 	err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1334 	    &kw_dma_attr, &iwp_dma_descattr,
1335 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 	    &sc->sc_dma_kw);
1337 	if (err != DDI_SUCCESS) {
1338 		goto fail;
1339 	}
1340 
1341 #ifdef	DEBUG
1342 	dma_p = &sc->sc_dma_kw;
1343 #endif
1344 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1345 	    "kw[ncookies:%d addr:%lx size:%lx]\n",
1346 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1347 	    dma_p->cookie.dmac_size));
1348 
1349 	return (err);
1350 fail:
1351 	iwp_free_kw(sc);
1352 	return (err);
1353 }
1354 
1355 static void
iwp_free_kw(iwp_sc_t * sc)1356 iwp_free_kw(iwp_sc_t *sc)
1357 {
1358 	iwp_free_dma_mem(&sc->sc_dma_kw);
1359 }
1360 
1361 /*
1362  * initialize RX ring buffers
1363  */
1364 static int
iwp_alloc_rx_ring(iwp_sc_t * sc)1365 iwp_alloc_rx_ring(iwp_sc_t *sc)
1366 {
1367 	iwp_rx_ring_t *ring;
1368 	iwp_rx_data_t *data;
1369 #ifdef	DEBUG
1370 	iwp_dma_t *dma_p;
1371 #endif
1372 	int i, err = DDI_FAILURE;
1373 
1374 	ring = &sc->sc_rxq;
1375 	ring->cur = 0;
1376 
1377 	/*
1378 	 * allocate RX description ring buffer
1379 	 */
1380 	err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1381 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1382 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1383 	    &ring->dma_desc);
1384 	if (err != DDI_SUCCESS) {
1385 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1386 		    "dma alloc rx ring desc "
1387 		    "failed\n"));
1388 		goto fail;
1389 	}
1390 
1391 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1392 #ifdef	DEBUG
1393 	dma_p = &ring->dma_desc;
1394 #endif
1395 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1396 	    "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1397 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1398 	    dma_p->cookie.dmac_size));
1399 
1400 	/*
1401 	 * Allocate Rx frame buffers.
1402 	 */
1403 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1404 		data = &ring->data[i];
1405 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1406 		    &rx_buffer_dma_attr, &iwp_dma_accattr,
1407 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1408 		    &data->dma_data);
1409 		if (err != DDI_SUCCESS) {
1410 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1411 			    "dma alloc rx ring "
1412 			    "buf[%d] failed\n", i));
1413 			goto fail;
1414 		}
1415 		/*
1416 		 * the physical address bit [8-36] are used,
1417 		 * instead of bit [0-31] in 3945.
1418 		 */
1419 		ring->desc[i] = (uint32_t)
1420 		    (data->dma_data.cookie.dmac_address >> 8);
1421 	}
1422 
1423 #ifdef	DEBUG
1424 	dma_p = &ring->data[0].dma_data;
1425 #endif
1426 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1427 	    "rx buffer[0][ncookies:%d addr:%lx "
1428 	    "size:%lx]\n",
1429 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1430 	    dma_p->cookie.dmac_size));
1431 
1432 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1433 
1434 	return (err);
1435 
1436 fail:
1437 	iwp_free_rx_ring(sc);
1438 	return (err);
1439 }
1440 
1441 /*
1442  * disable RX ring
1443  */
1444 static void
iwp_reset_rx_ring(iwp_sc_t * sc)1445 iwp_reset_rx_ring(iwp_sc_t *sc)
1446 {
1447 	int n;
1448 
1449 	iwp_mac_access_enter(sc);
1450 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1451 	for (n = 0; n < 2000; n++) {
1452 		if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1453 			break;
1454 		}
1455 		DELAY(1000);
1456 	}
1457 #ifdef DEBUG
1458 	if (2000 == n) {
1459 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1460 		    "timeout resetting Rx ring\n"));
1461 	}
1462 #endif
1463 	iwp_mac_access_exit(sc);
1464 
1465 	sc->sc_rxq.cur = 0;
1466 }
1467 
1468 static void
iwp_free_rx_ring(iwp_sc_t * sc)1469 iwp_free_rx_ring(iwp_sc_t *sc)
1470 {
1471 	int i;
1472 
1473 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1474 		if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1475 			IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1476 			    DDI_DMA_SYNC_FORCPU);
1477 		}
1478 
1479 		iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1480 	}
1481 
1482 	if (sc->sc_rxq.dma_desc.dma_hdl) {
1483 		IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1484 	}
1485 
1486 	iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1487 }
1488 
1489 /*
1490  * initialize TX ring buffers
1491  */
1492 static int
iwp_alloc_tx_ring(iwp_sc_t * sc,iwp_tx_ring_t * ring,int slots,int qid)1493 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1494     int slots, int qid)
1495 {
1496 	iwp_tx_data_t *data;
1497 	iwp_tx_desc_t *desc_h;
1498 	uint32_t paddr_desc_h;
1499 	iwp_cmd_t *cmd_h;
1500 	uint32_t paddr_cmd_h;
1501 #ifdef	DEBUG
1502 	iwp_dma_t *dma_p;
1503 #endif
1504 	int i, err = DDI_FAILURE;
1505 	ring->qid = qid;
1506 	ring->count = TFD_QUEUE_SIZE_MAX;
1507 	ring->window = slots;
1508 	ring->queued = 0;
1509 	ring->cur = 0;
1510 	ring->desc_cur = 0;
1511 
1512 	/*
1513 	 * allocate buffer for TX descriptor ring
1514 	 */
1515 	err = iwp_alloc_dma_mem(sc,
1516 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1517 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1518 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1519 	    &ring->dma_desc);
1520 	if (err != DDI_SUCCESS) {
1521 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1522 		    "dma alloc tx ring desc[%d] "
1523 		    "failed\n", qid));
1524 		goto fail;
1525 	}
1526 
1527 #ifdef	DEBUG
1528 	dma_p = &ring->dma_desc;
1529 #endif
1530 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1531 	    "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1532 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1533 	    dma_p->cookie.dmac_size));
1534 
1535 	desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1536 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1537 
1538 	/*
1539 	 * allocate buffer for ucode command
1540 	 */
1541 	err = iwp_alloc_dma_mem(sc,
1542 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1543 	    &cmd_dma_attr, &iwp_dma_accattr,
1544 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1545 	    &ring->dma_cmd);
1546 	if (err != DDI_SUCCESS) {
1547 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1548 		    "dma alloc tx ring cmd[%d]"
1549 		    " failed\n", qid));
1550 		goto fail;
1551 	}
1552 
1553 #ifdef	DEBUG
1554 	dma_p = &ring->dma_cmd;
1555 #endif
1556 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1557 	    "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1558 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1559 	    dma_p->cookie.dmac_size));
1560 
1561 	cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1562 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1563 
1564 	/*
1565 	 * Allocate Tx frame buffers.
1566 	 */
1567 	ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1568 	    KM_NOSLEEP);
1569 	if (NULL == ring->data) {
1570 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1571 		    "could not allocate "
1572 		    "tx data slots\n"));
1573 		goto fail;
1574 	}
1575 
1576 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1577 		data = &ring->data[i];
1578 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1579 		    &tx_buffer_dma_attr, &iwp_dma_accattr,
1580 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1581 		    &data->dma_data);
1582 		if (err != DDI_SUCCESS) {
1583 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1584 			    "dma alloc tx "
1585 			    "ring buf[%d] failed\n", i));
1586 			goto fail;
1587 		}
1588 
1589 		data->desc = desc_h + i;
1590 		data->paddr_desc = paddr_desc_h +
1591 		    _PTRDIFF(data->desc, desc_h);
1592 		data->cmd = cmd_h +  i;
1593 		data->paddr_cmd = paddr_cmd_h +
1594 		    _PTRDIFF(data->cmd, cmd_h);
1595 	}
1596 #ifdef	DEBUG
1597 	dma_p = &ring->data[0].dma_data;
1598 #endif
1599 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1600 	    "tx buffer[0][ncookies:%d addr:%lx "
1601 	    "size:%lx]\n",
1602 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1603 	    dma_p->cookie.dmac_size));
1604 
1605 	return (err);
1606 
1607 fail:
1608 	iwp_free_tx_ring(ring);
1609 
1610 	return (err);
1611 }
1612 
1613 /*
1614  * disable TX ring
1615  */
1616 static void
iwp_reset_tx_ring(iwp_sc_t * sc,iwp_tx_ring_t * ring)1617 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1618 {
1619 	iwp_tx_data_t *data;
1620 	int i, n;
1621 
1622 	iwp_mac_access_enter(sc);
1623 
1624 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1625 	for (n = 0; n < 200; n++) {
1626 		if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1627 		    IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1628 			break;
1629 		}
1630 		DELAY(10);
1631 	}
1632 
1633 #ifdef	DEBUG
1634 	if (200 == n) {
1635 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1636 		    "timeout reset tx ring %d\n",
1637 		    ring->qid));
1638 	}
1639 #endif
1640 
1641 	iwp_mac_access_exit(sc);
1642 
1643 	/* by pass, if it's quiesce */
1644 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1645 		for (i = 0; i < ring->count; i++) {
1646 			data = &ring->data[i];
1647 			IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1648 		}
1649 	}
1650 
1651 	ring->queued = 0;
1652 	ring->cur = 0;
1653 	ring->desc_cur = 0;
1654 }
1655 
1656 static void
iwp_free_tx_ring(iwp_tx_ring_t * ring)1657 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1658 {
1659 	int i;
1660 
1661 	if (ring->dma_desc.dma_hdl != NULL) {
1662 		IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1663 	}
1664 	iwp_free_dma_mem(&ring->dma_desc);
1665 
1666 	if (ring->dma_cmd.dma_hdl != NULL) {
1667 		IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1668 	}
1669 	iwp_free_dma_mem(&ring->dma_cmd);
1670 
1671 	if (ring->data != NULL) {
1672 		for (i = 0; i < ring->count; i++) {
1673 			if (ring->data[i].dma_data.dma_hdl) {
1674 				IWP_DMA_SYNC(ring->data[i].dma_data,
1675 				    DDI_DMA_SYNC_FORDEV);
1676 			}
1677 			iwp_free_dma_mem(&ring->data[i].dma_data);
1678 		}
1679 		kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1680 	}
1681 }
1682 
1683 /*
1684  * initialize TX and RX ring
1685  */
1686 static int
iwp_ring_init(iwp_sc_t * sc)1687 iwp_ring_init(iwp_sc_t *sc)
1688 {
1689 	int i, err = DDI_FAILURE;
1690 
1691 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
1692 		if (IWP_CMD_QUEUE_NUM == i) {
1693 			continue;
1694 		}
1695 
1696 		err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1697 		    i);
1698 		if (err != DDI_SUCCESS) {
1699 			goto fail;
1700 		}
1701 	}
1702 
1703 	/*
1704 	 * initialize command queue
1705 	 */
1706 	err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1707 	    TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1708 	if (err != DDI_SUCCESS) {
1709 		goto fail;
1710 	}
1711 
1712 	err = iwp_alloc_rx_ring(sc);
1713 	if (err != DDI_SUCCESS) {
1714 		goto fail;
1715 	}
1716 
1717 fail:
1718 	return (err);
1719 }
1720 
1721 static void
iwp_ring_free(iwp_sc_t * sc)1722 iwp_ring_free(iwp_sc_t *sc)
1723 {
1724 	int i = IWP_NUM_QUEUES;
1725 
1726 	iwp_free_rx_ring(sc);
1727 	while (--i >= 0) {
1728 		iwp_free_tx_ring(&sc->sc_txq[i]);
1729 	}
1730 }
1731 
1732 /* ARGSUSED */
1733 static ieee80211_node_t *
iwp_node_alloc(ieee80211com_t * ic)1734 iwp_node_alloc(ieee80211com_t *ic)
1735 {
1736 	iwp_amrr_t *amrr;
1737 
1738 	amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1739 	if (NULL == amrr) {
1740 		cmn_err(CE_WARN, "iwp_node_alloc(): "
1741 		    "failed to allocate memory for amrr structure\n");
1742 		return (NULL);
1743 	}
1744 
1745 	iwp_amrr_init(amrr);
1746 
1747 	return (&amrr->in);
1748 }
1749 
1750 static void
iwp_node_free(ieee80211_node_t * in)1751 iwp_node_free(ieee80211_node_t *in)
1752 {
1753 	ieee80211com_t *ic;
1754 
1755 	if ((NULL == in) ||
1756 	    (NULL == in->in_ic)) {
1757 		cmn_err(CE_WARN, "iwp_node_free() "
1758 		    "Got a NULL point from Net80211 module\n");
1759 		return;
1760 	}
1761 	ic = in->in_ic;
1762 
1763 	if (ic->ic_node_cleanup != NULL) {
1764 		ic->ic_node_cleanup(in);
1765 	}
1766 
1767 	if (in->in_wpa_ie != NULL) {
1768 		ieee80211_free(in->in_wpa_ie);
1769 	}
1770 
1771 	if (in->in_wme_ie != NULL) {
1772 		ieee80211_free(in->in_wme_ie);
1773 	}
1774 
1775 	if (in->in_htcap_ie != NULL) {
1776 		ieee80211_free(in->in_htcap_ie);
1777 	}
1778 
1779 	kmem_free(in, sizeof (iwp_amrr_t));
1780 }
1781 
1782 
1783 /*
1784  * change station's state. this function will be invoked by 80211 module
1785  * when need to change staton's state.
1786  */
1787 static int
iwp_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1788 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1789 {
1790 	iwp_sc_t *sc;
1791 	ieee80211_node_t *in;
1792 	enum ieee80211_state ostate;
1793 	iwp_add_sta_t node;
1794 	int i, err = IWP_FAIL;
1795 
1796 	if (NULL == ic) {
1797 		return (err);
1798 	}
1799 	sc = (iwp_sc_t *)ic;
1800 	in = ic->ic_bss;
1801 	ostate = ic->ic_state;
1802 
1803 	mutex_enter(&sc->sc_glock);
1804 
1805 	switch (nstate) {
1806 	case IEEE80211_S_SCAN:
1807 		switch (ostate) {
1808 		case IEEE80211_S_INIT:
1809 			atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1810 			iwp_set_led(sc, 2, 10, 2);
1811 
1812 			/*
1813 			 * clear association to receive beacons from
1814 			 * all BSS'es
1815 			 */
1816 			sc->sc_config.assoc_id = 0;
1817 			sc->sc_config.filter_flags &=
1818 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1819 
1820 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1821 			    "config chan %d "
1822 			    "flags %x filter_flags %x\n",
1823 			    LE_16(sc->sc_config.chan),
1824 			    LE_32(sc->sc_config.flags),
1825 			    LE_32(sc->sc_config.filter_flags)));
1826 
1827 			err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1828 			    sizeof (iwp_rxon_cmd_t), 1);
1829 			if (err != IWP_SUCCESS) {
1830 				cmn_err(CE_WARN, "iwp_newstate(): "
1831 				    "could not clear association\n");
1832 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1833 				mutex_exit(&sc->sc_glock);
1834 				return (err);
1835 			}
1836 
1837 			/* add broadcast node to send probe request */
1838 			(void) memset(&node, 0, sizeof (node));
1839 			(void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1840 			node.sta.sta_id = IWP_BROADCAST_ID;
1841 			err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1842 			    sizeof (node), 1);
1843 			if (err != IWP_SUCCESS) {
1844 				cmn_err(CE_WARN, "iwp_newstate(): "
1845 				    "could not add broadcast node\n");
1846 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1847 				mutex_exit(&sc->sc_glock);
1848 				return (err);
1849 			}
1850 			break;
1851 		case IEEE80211_S_SCAN:
1852 			mutex_exit(&sc->sc_glock);
1853 			/* step to next channel before actual FW scan */
1854 			err = sc->sc_newstate(ic, nstate, arg);
1855 			mutex_enter(&sc->sc_glock);
1856 			if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1857 				cmn_err(CE_WARN, "iwp_newstate(): "
1858 				    "could not initiate scan\n");
1859 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1860 				ieee80211_cancel_scan(ic);
1861 			}
1862 			mutex_exit(&sc->sc_glock);
1863 			return (err);
1864 		default:
1865 			break;
1866 		}
1867 		sc->sc_clk = 0;
1868 		break;
1869 
1870 	case IEEE80211_S_AUTH:
1871 		if (ostate == IEEE80211_S_SCAN) {
1872 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1873 		}
1874 
1875 		/*
1876 		 * reset state to handle reassociations correctly
1877 		 */
1878 		sc->sc_config.assoc_id = 0;
1879 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1880 
1881 		/*
1882 		 * before sending authentication and association request frame,
1883 		 * we need do something in the hardware, such as setting the
1884 		 * channel same to the target AP...
1885 		 */
1886 		if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1887 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1888 			    "could not send authentication request\n"));
1889 			mutex_exit(&sc->sc_glock);
1890 			return (err);
1891 		}
1892 		break;
1893 
1894 	case IEEE80211_S_RUN:
1895 		if (ostate == IEEE80211_S_SCAN) {
1896 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1897 		}
1898 
1899 		if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1900 			/* let LED blink when monitoring */
1901 			iwp_set_led(sc, 2, 10, 10);
1902 			break;
1903 		}
1904 
1905 		IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1906 		    "associated.\n"));
1907 
1908 		err = iwp_run_state_config(sc);
1909 		if (err != IWP_SUCCESS) {
1910 			cmn_err(CE_WARN, "iwp_newstate(): "
1911 			    "failed to set up association\n");
1912 			mutex_exit(&sc->sc_glock);
1913 			return (err);
1914 		}
1915 
1916 		/*
1917 		 * start automatic rate control
1918 		 */
1919 		if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1920 			atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1921 
1922 			/*
1923 			 * set rate to some reasonable initial value
1924 			 */
1925 			i = in->in_rates.ir_nrates - 1;
1926 			while (i > 0 && IEEE80211_RATE(i) > 72) {
1927 				i--;
1928 			}
1929 			in->in_txrate = i;
1930 
1931 		} else {
1932 			atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1933 		}
1934 
1935 		/*
1936 		 * set LED on after associated
1937 		 */
1938 		iwp_set_led(sc, 2, 0, 1);
1939 		break;
1940 
1941 	case IEEE80211_S_INIT:
1942 		if (ostate == IEEE80211_S_SCAN) {
1943 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1944 		}
1945 		/*
1946 		 * set LED off after init
1947 		 */
1948 		iwp_set_led(sc, 2, 1, 0);
1949 		break;
1950 
1951 	case IEEE80211_S_ASSOC:
1952 		if (ostate == IEEE80211_S_SCAN) {
1953 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1954 		}
1955 		break;
1956 	}
1957 
1958 	mutex_exit(&sc->sc_glock);
1959 
1960 	return (sc->sc_newstate(ic, nstate, arg));
1961 }
1962 
1963 /*
1964  * exclusive access to mac begin.
1965  */
1966 static void
iwp_mac_access_enter(iwp_sc_t * sc)1967 iwp_mac_access_enter(iwp_sc_t *sc)
1968 {
1969 	uint32_t tmp;
1970 	int n;
1971 
1972 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
1973 	IWP_WRITE(sc, CSR_GP_CNTRL,
1974 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1975 
1976 	/* wait until we succeed */
1977 	for (n = 0; n < 1000; n++) {
1978 		if ((IWP_READ(sc, CSR_GP_CNTRL) &
1979 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1980 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1981 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1982 			break;
1983 		}
1984 		DELAY(10);
1985 	}
1986 
1987 #ifdef	DEBUG
1988 	if (1000 == n) {
1989 		IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1990 		    "could not lock memory\n"));
1991 	}
1992 #endif
1993 }
1994 
1995 /*
1996  * exclusive access to mac end.
1997  */
1998 static void
iwp_mac_access_exit(iwp_sc_t * sc)1999 iwp_mac_access_exit(iwp_sc_t *sc)
2000 {
2001 	uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2002 	IWP_WRITE(sc, CSR_GP_CNTRL,
2003 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2004 }
2005 
2006 /*
2007  * this function defined here for future use.
2008  * static uint32_t
2009  * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2010  * {
2011  * 	IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2012  * 	return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2013  * }
2014  */
2015 
2016 /*
2017  * write mac memory
2018  */
2019 static void
iwp_mem_write(iwp_sc_t * sc,uint32_t addr,uint32_t data)2020 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2021 {
2022 	IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2023 	IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2024 }
2025 
2026 /*
2027  * read mac register
2028  */
2029 static uint32_t
iwp_reg_read(iwp_sc_t * sc,uint32_t addr)2030 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2031 {
2032 	IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2033 	return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2034 }
2035 
2036 /*
2037  * write mac register
2038  */
2039 static void
iwp_reg_write(iwp_sc_t * sc,uint32_t addr,uint32_t data)2040 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2041 {
2042 	IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2043 	IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2044 }
2045 
2046 
2047 /*
2048  * steps of loading ucode:
2049  * load init ucode=>init alive=>calibrate=>
2050  * receive calibration result=>reinitialize NIC=>
2051  * load runtime ucode=>runtime alive=>
2052  * send calibration result=>running.
2053  */
2054 static int
iwp_load_init_firmware(iwp_sc_t * sc)2055 iwp_load_init_firmware(iwp_sc_t *sc)
2056 {
2057 	int	err = IWP_FAIL;
2058 	clock_t	clk;
2059 
2060 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2061 
2062 	/*
2063 	 * load init_text section of uCode to hardware
2064 	 */
2065 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2066 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2067 	if (err != IWP_SUCCESS) {
2068 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2069 		    "failed to write init uCode.\n");
2070 		return (err);
2071 	}
2072 
2073 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2074 
2075 	/* wait loading init_text until completed or timeout */
2076 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2077 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2078 			break;
2079 		}
2080 	}
2081 
2082 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2083 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2084 		    "timeout waiting for init uCode load.\n");
2085 		return (IWP_FAIL);
2086 	}
2087 
2088 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2089 
2090 	/*
2091 	 * load init_data section of uCode to hardware
2092 	 */
2093 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2094 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2095 	if (err != IWP_SUCCESS) {
2096 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2097 		    "failed to write init_data uCode.\n");
2098 		return (err);
2099 	}
2100 
2101 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2102 
2103 	/*
2104 	 * wait loading init_data until completed or timeout
2105 	 */
2106 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2107 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2108 			break;
2109 		}
2110 	}
2111 
2112 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2113 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2114 		    "timeout waiting for init_data uCode load.\n");
2115 		return (IWP_FAIL);
2116 	}
2117 
2118 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2119 
2120 	return (err);
2121 }
2122 
2123 static int
iwp_load_run_firmware(iwp_sc_t * sc)2124 iwp_load_run_firmware(iwp_sc_t *sc)
2125 {
2126 	int	err = IWP_FAIL;
2127 	clock_t	clk;
2128 
2129 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2130 
2131 	/*
2132 	 * load init_text section of uCode to hardware
2133 	 */
2134 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2135 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2136 	if (err != IWP_SUCCESS) {
2137 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2138 		    "failed to write run uCode.\n");
2139 		return (err);
2140 	}
2141 
2142 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2143 
2144 	/* wait loading run_text until completed or timeout */
2145 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2146 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2147 			break;
2148 		}
2149 	}
2150 
2151 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2152 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2153 		    "timeout waiting for run uCode load.\n");
2154 		return (IWP_FAIL);
2155 	}
2156 
2157 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2158 
2159 	/*
2160 	 * load run_data section of uCode to hardware
2161 	 */
2162 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2163 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2164 	if (err != IWP_SUCCESS) {
2165 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2166 		    "failed to write run_data uCode.\n");
2167 		return (err);
2168 	}
2169 
2170 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2171 
2172 	/*
2173 	 * wait loading run_data until completed or timeout
2174 	 */
2175 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2176 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2177 			break;
2178 		}
2179 	}
2180 
2181 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2182 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2183 		    "timeout waiting for run_data uCode load.\n");
2184 		return (IWP_FAIL);
2185 	}
2186 
2187 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2188 
2189 	return (err);
2190 }
2191 
2192 /*
2193  * this function will be invoked to receive phy information
2194  * when a frame is received.
2195  */
2196 static void
iwp_rx_phy_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2197 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2198 {
2199 
2200 	sc->sc_rx_phy_res.flag = 1;
2201 
2202 	(void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2203 	    sizeof (iwp_rx_phy_res_t));
2204 }
2205 
2206 /*
2207  * this function will be invoked to receive body of frame when
2208  * a frame is received.
2209  */
2210 static void
iwp_rx_mpdu_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2211 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2212 {
2213 	ieee80211com_t	*ic = &sc->sc_ic;
2214 #ifdef	DEBUG
2215 	iwp_rx_ring_t	*ring = &sc->sc_rxq;
2216 #endif
2217 	struct ieee80211_frame		*wh;
2218 	struct iwp_rx_non_cfg_phy	*phyinfo;
2219 	struct	iwp_rx_mpdu_body_size	*mpdu_size;
2220 
2221 	mblk_t			*mp;
2222 	int16_t			t;
2223 	uint16_t		len, rssi, agc;
2224 	uint32_t		temp, crc, *tail;
2225 	uint32_t		arssi, brssi, crssi, mrssi;
2226 	iwp_rx_phy_res_t	*stat;
2227 	ieee80211_node_t	*in;
2228 
2229 	/*
2230 	 * assuming not 11n here. cope with 11n in phase-II
2231 	 */
2232 	mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2233 	stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2234 	if (stat->cfg_phy_cnt > 20) {
2235 		return;
2236 	}
2237 
2238 	phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2239 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2240 	agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2241 
2242 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2243 	arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2244 	brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2245 
2246 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2247 	crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2248 
2249 	mrssi = MAX(arssi, brssi);
2250 	mrssi = MAX(mrssi, crssi);
2251 
2252 	t = mrssi - agc - IWP_RSSI_OFFSET;
2253 	/*
2254 	 * convert dBm to percentage
2255 	 */
2256 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2257 	    / (75 * 75);
2258 	if (rssi > 100) {
2259 		rssi = 100;
2260 	}
2261 	if (rssi < 1) {
2262 		rssi = 1;
2263 	}
2264 
2265 	/*
2266 	 * size of frame, not include FCS
2267 	 */
2268 	len = LE_16(mpdu_size->byte_count);
2269 	tail = (uint32_t *)((uint8_t *)(desc + 1) +
2270 	    sizeof (struct iwp_rx_mpdu_body_size) + len);
2271 	bcopy(tail, &crc, 4);
2272 
2273 	IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2274 	    "rx intr: idx=%d phy_len=%x len=%d "
2275 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2276 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2277 	    len, stat->rate.r.s.rate, stat->channel,
2278 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2279 	    stat->cfg_phy_cnt, LE_32(crc)));
2280 
2281 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2282 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2283 		    "rx frame oversize\n"));
2284 		return;
2285 	}
2286 
2287 	/*
2288 	 * discard Rx frames with bad CRC
2289 	 */
2290 	if ((LE_32(crc) &
2291 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2292 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2293 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2294 		    "rx crc error tail: %x\n",
2295 		    LE_32(crc)));
2296 		sc->sc_rx_err++;
2297 		return;
2298 	}
2299 
2300 	wh = (struct ieee80211_frame *)
2301 	    ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2302 
2303 	if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2304 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2305 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2306 		    "rx : association id = %x\n",
2307 		    sc->sc_assoc_id));
2308 	}
2309 
2310 #ifdef DEBUG
2311 	if (iwp_dbg_flags & IWP_DEBUG_RX) {
2312 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2313 	}
2314 #endif
2315 
2316 	in = ieee80211_find_rxnode(ic, wh);
2317 	mp = allocb(len, BPRI_MED);
2318 	if (mp) {
2319 		(void) memcpy(mp->b_wptr, wh, len);
2320 		mp->b_wptr += len;
2321 
2322 		/*
2323 		 * send the frame to the 802.11 layer
2324 		 */
2325 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2326 	} else {
2327 		sc->sc_rx_nobuf++;
2328 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2329 		    "alloc rx buf failed\n"));
2330 	}
2331 
2332 	/*
2333 	 * release node reference
2334 	 */
2335 	ieee80211_free_node(in);
2336 }
2337 
2338 /*
2339  * process correlative affairs after a frame is sent.
2340  */
2341 static void
iwp_tx_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2342 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2343 {
2344 	ieee80211com_t *ic = &sc->sc_ic;
2345 	iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2346 	iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2347 	iwp_amrr_t *amrr;
2348 
2349 	if (NULL == ic->ic_bss) {
2350 		return;
2351 	}
2352 
2353 	amrr = (iwp_amrr_t *)ic->ic_bss;
2354 
2355 	amrr->txcnt++;
2356 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2357 	    "tx: %d cnt\n", amrr->txcnt));
2358 
2359 	if (stat->ntries > 0) {
2360 		amrr->retrycnt++;
2361 		sc->sc_tx_retries++;
2362 		IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2363 		    "tx: %d retries\n",
2364 		    sc->sc_tx_retries));
2365 	}
2366 
2367 	mutex_enter(&sc->sc_mt_lock);
2368 	sc->sc_tx_timer = 0;
2369 	mutex_exit(&sc->sc_mt_lock);
2370 
2371 	mutex_enter(&sc->sc_tx_lock);
2372 
2373 	ring->queued--;
2374 	if (ring->queued < 0) {
2375 		ring->queued = 0;
2376 	}
2377 
2378 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2379 		sc->sc_need_reschedule = 0;
2380 		mutex_exit(&sc->sc_tx_lock);
2381 		mac_tx_update(ic->ic_mach);
2382 		mutex_enter(&sc->sc_tx_lock);
2383 	}
2384 
2385 	mutex_exit(&sc->sc_tx_lock);
2386 }
2387 
2388 /*
2389  * inform a given command has been executed
2390  */
2391 static void
iwp_cmd_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2392 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2393 {
2394 	if ((desc->hdr.qid & 7) != 4) {
2395 		return;
2396 	}
2397 
2398 	if (sc->sc_cmd_accum > 0) {
2399 		sc->sc_cmd_accum--;
2400 		return;
2401 	}
2402 
2403 	mutex_enter(&sc->sc_glock);
2404 
2405 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2406 
2407 	cv_signal(&sc->sc_cmd_cv);
2408 
2409 	mutex_exit(&sc->sc_glock);
2410 
2411 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2412 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2413 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2414 	    desc->hdr.type));
2415 }
2416 
2417 /*
2418  * this function will be invoked when alive notification occur.
2419  */
2420 static void
iwp_ucode_alive(iwp_sc_t * sc,iwp_rx_desc_t * desc)2421 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2422 {
2423 	uint32_t rv;
2424 	struct iwp_calib_cfg_cmd cmd;
2425 	struct iwp_alive_resp *ar =
2426 	    (struct iwp_alive_resp *)(desc + 1);
2427 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
2428 
2429 	/*
2430 	 * the microcontroller is ready
2431 	 */
2432 	IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2433 	    "microcode alive notification minor: %x major: %x type: "
2434 	    "%x subtype: %x\n",
2435 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2436 
2437 #ifdef	DEBUG
2438 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2439 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2440 		    "microcontroller initialization failed\n"));
2441 	}
2442 #endif
2443 
2444 	/*
2445 	 * determine if init alive or runtime alive.
2446 	 */
2447 	if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2448 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2449 		    "initialization alive received.\n"));
2450 
2451 		(void) memcpy(&sc->sc_card_alive_init, ar,
2452 		    sizeof (struct iwp_init_alive_resp));
2453 
2454 		/*
2455 		 * necessary configuration to NIC
2456 		 */
2457 		mutex_enter(&sc->sc_glock);
2458 
2459 		rv = iwp_alive_common(sc);
2460 		if (rv != IWP_SUCCESS) {
2461 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2462 			    "common alive process failed in init alive.\n");
2463 			mutex_exit(&sc->sc_glock);
2464 			return;
2465 		}
2466 
2467 		(void) memset(&cmd, 0, sizeof (cmd));
2468 
2469 		cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2470 		cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2471 		cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2472 		cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2473 
2474 		/*
2475 		 * require ucode execute calibration
2476 		 */
2477 		rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2478 		if (rv != IWP_SUCCESS) {
2479 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2480 			    "failed to send calibration configure command.\n");
2481 			mutex_exit(&sc->sc_glock);
2482 			return;
2483 		}
2484 
2485 		mutex_exit(&sc->sc_glock);
2486 
2487 	} else {	/* runtime alive */
2488 
2489 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2490 		    "runtime alive received.\n"));
2491 
2492 		(void) memcpy(&sc->sc_card_alive_run, ar,
2493 		    sizeof (struct iwp_alive_resp));
2494 
2495 		mutex_enter(&sc->sc_glock);
2496 
2497 		/*
2498 		 * necessary configuration to NIC
2499 		 */
2500 		rv = iwp_alive_common(sc);
2501 		if (rv != IWP_SUCCESS) {
2502 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2503 			    "common alive process failed in run alive.\n");
2504 			mutex_exit(&sc->sc_glock);
2505 			return;
2506 		}
2507 
2508 		/*
2509 		 * send the result of local oscilator calibration to uCode.
2510 		 */
2511 		if (res_p->lo_res != NULL) {
2512 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2513 			    res_p->lo_res, res_p->lo_res_len, 1);
2514 			if (rv != IWP_SUCCESS) {
2515 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2516 				    "failed to send local"
2517 				    "oscilator calibration command.\n");
2518 				mutex_exit(&sc->sc_glock);
2519 				return;
2520 			}
2521 
2522 			DELAY(1000);
2523 		}
2524 
2525 		/*
2526 		 * send the result of TX IQ calibration to uCode.
2527 		 */
2528 		if (res_p->tx_iq_res != NULL) {
2529 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2530 			    res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2531 			if (rv != IWP_SUCCESS) {
2532 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2533 				    "failed to send TX IQ"
2534 				    "calibration command.\n");
2535 				mutex_exit(&sc->sc_glock);
2536 				return;
2537 			}
2538 
2539 			DELAY(1000);
2540 		}
2541 
2542 		/*
2543 		 * send the result of TX IQ perd calibration to uCode.
2544 		 */
2545 		if (res_p->tx_iq_perd_res != NULL) {
2546 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2547 			    res_p->tx_iq_perd_res,
2548 			    res_p->tx_iq_perd_res_len, 1);
2549 			if (rv != IWP_SUCCESS) {
2550 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2551 				    "failed to send TX IQ perd"
2552 				    "calibration command.\n");
2553 				mutex_exit(&sc->sc_glock);
2554 				return;
2555 			}
2556 
2557 			DELAY(1000);
2558 		}
2559 
2560 		/*
2561 		 * send the result of Base Band calibration to uCode.
2562 		 */
2563 		if (res_p->base_band_res != NULL) {
2564 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2565 			    res_p->base_band_res,
2566 			    res_p->base_band_res_len, 1);
2567 			if (rv != IWP_SUCCESS) {
2568 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2569 				    "failed to send Base Band"
2570 				    "calibration command.\n");
2571 				mutex_exit(&sc->sc_glock);
2572 				return;
2573 			}
2574 
2575 			DELAY(1000);
2576 		}
2577 
2578 		atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2579 		cv_signal(&sc->sc_ucode_cv);
2580 
2581 		mutex_exit(&sc->sc_glock);
2582 	}
2583 
2584 }
2585 
2586 /*
2587  * deal with receiving frames, command response
2588  * and all notifications from ucode.
2589  */
2590 /* ARGSUSED */
2591 static uint_t
iwp_rx_softintr(caddr_t arg,caddr_t unused)2592 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2593 {
2594 	iwp_sc_t *sc;
2595 	ieee80211com_t *ic;
2596 	iwp_rx_desc_t *desc;
2597 	iwp_rx_data_t *data;
2598 	uint32_t index;
2599 
2600 	if (NULL == arg) {
2601 		return (DDI_INTR_UNCLAIMED);
2602 	}
2603 	sc = (iwp_sc_t *)arg;
2604 	ic = &sc->sc_ic;
2605 
2606 	/*
2607 	 * firmware has moved the index of the rx queue, driver get it,
2608 	 * and deal with it.
2609 	 */
2610 	index = (sc->sc_shared->val0) & 0xfff;
2611 
2612 	while (sc->sc_rxq.cur != index) {
2613 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2614 		desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2615 
2616 		IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2617 		    "rx notification index = %d"
2618 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2619 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2620 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2621 
2622 		/*
2623 		 * a command other than a tx need to be replied
2624 		 */
2625 		if (!(desc->hdr.qid & 0x80) &&
2626 		    (desc->hdr.type != REPLY_SCAN_CMD) &&
2627 		    (desc->hdr.type != REPLY_TX)) {
2628 			iwp_cmd_intr(sc, desc);
2629 		}
2630 
2631 		switch (desc->hdr.type) {
2632 		case REPLY_RX_PHY_CMD:
2633 			iwp_rx_phy_intr(sc, desc);
2634 			break;
2635 
2636 		case REPLY_RX_MPDU_CMD:
2637 			iwp_rx_mpdu_intr(sc, desc);
2638 			break;
2639 
2640 		case REPLY_TX:
2641 			iwp_tx_intr(sc, desc);
2642 			break;
2643 
2644 		case REPLY_ALIVE:
2645 			iwp_ucode_alive(sc, desc);
2646 			break;
2647 
2648 		case CARD_STATE_NOTIFICATION:
2649 		{
2650 			uint32_t *status = (uint32_t *)(desc + 1);
2651 
2652 			IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2653 			    "state changed to %x\n",
2654 			    LE_32(*status)));
2655 
2656 			if (LE_32(*status) & 1) {
2657 				/*
2658 				 * the radio button has to be pushed(OFF). It
2659 				 * is considered as a hw error, the
2660 				 * iwp_thread() tries to recover it after the
2661 				 * button is pushed again(ON)
2662 				 */
2663 				cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2664 				    "radio transmitter is off\n");
2665 				sc->sc_ostate = sc->sc_ic.ic_state;
2666 				ieee80211_new_state(&sc->sc_ic,
2667 				    IEEE80211_S_INIT, -1);
2668 				atomic_or_32(&sc->sc_flags,
2669 				    IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2670 			}
2671 
2672 			break;
2673 		}
2674 
2675 		case SCAN_START_NOTIFICATION:
2676 		{
2677 			iwp_start_scan_t *scan =
2678 			    (iwp_start_scan_t *)(desc + 1);
2679 
2680 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2681 			    "scanning channel %d status %x\n",
2682 			    scan->chan, LE_32(scan->status)));
2683 
2684 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2685 			break;
2686 		}
2687 
2688 		case SCAN_COMPLETE_NOTIFICATION:
2689 		{
2690 #ifdef	DEBUG
2691 			iwp_stop_scan_t *scan =
2692 			    (iwp_stop_scan_t *)(desc + 1);
2693 
2694 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2695 			    "completed channel %d (burst of %d) status %02x\n",
2696 			    scan->chan, scan->nchan, scan->status));
2697 #endif
2698 
2699 			sc->sc_scan_pending++;
2700 			break;
2701 		}
2702 
2703 		case STATISTICS_NOTIFICATION:
2704 		{
2705 			/*
2706 			 * handle statistics notification
2707 			 */
2708 			break;
2709 		}
2710 
2711 		case CALIBRATION_RES_NOTIFICATION:
2712 			iwp_save_calib_result(sc, desc);
2713 			break;
2714 
2715 		case CALIBRATION_COMPLETE_NOTIFICATION:
2716 			mutex_enter(&sc->sc_glock);
2717 			atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2718 			cv_signal(&sc->sc_ucode_cv);
2719 			mutex_exit(&sc->sc_glock);
2720 			break;
2721 
2722 		case MISSED_BEACONS_NOTIFICATION:
2723 		{
2724 			struct iwp_beacon_missed *miss =
2725 			    (struct iwp_beacon_missed *)(desc + 1);
2726 
2727 			if ((ic->ic_state == IEEE80211_S_RUN) &&
2728 			    (LE_32(miss->consecutive) > 50)) {
2729 				cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2730 				    "beacon missed %d/%d\n",
2731 				    LE_32(miss->consecutive),
2732 				    LE_32(miss->total));
2733 				(void) ieee80211_new_state(ic,
2734 				    IEEE80211_S_INIT, -1);
2735 			}
2736 			break;
2737 		}
2738 		}
2739 
2740 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2741 	}
2742 
2743 	/*
2744 	 * driver dealt with what received in rx queue and tell the information
2745 	 * to the firmware.
2746 	 */
2747 	index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2748 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2749 
2750 	/*
2751 	 * re-enable interrupts
2752 	 */
2753 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2754 
2755 	return (DDI_INTR_CLAIMED);
2756 }
2757 
2758 /*
2759  * the handle of interrupt
2760  */
2761 /* ARGSUSED */
2762 static uint_t
iwp_intr(caddr_t arg,caddr_t unused)2763 iwp_intr(caddr_t arg, caddr_t unused)
2764 {
2765 	iwp_sc_t *sc;
2766 	uint32_t r, rfh;
2767 
2768 	if (NULL == arg) {
2769 		return (DDI_INTR_UNCLAIMED);
2770 	}
2771 	sc = (iwp_sc_t *)arg;
2772 
2773 	r = IWP_READ(sc, CSR_INT);
2774 	if (0 == r || 0xffffffff == r) {
2775 		return (DDI_INTR_UNCLAIMED);
2776 	}
2777 
2778 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2779 	    "interrupt reg %x\n", r));
2780 
2781 	rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2782 
2783 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2784 	    "FH interrupt reg %x\n", rfh));
2785 
2786 	/*
2787 	 * disable interrupts
2788 	 */
2789 	IWP_WRITE(sc, CSR_INT_MASK, 0);
2790 
2791 	/*
2792 	 * ack interrupts
2793 	 */
2794 	IWP_WRITE(sc, CSR_INT, r);
2795 	IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2796 
2797 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2798 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2799 		    "fatal firmware error\n"));
2800 		iwp_stop(sc);
2801 		sc->sc_ostate = sc->sc_ic.ic_state;
2802 
2803 		/* notify upper layer */
2804 		if (!IWP_CHK_FAST_RECOVER(sc)) {
2805 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2806 		}
2807 
2808 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2809 		return (DDI_INTR_CLAIMED);
2810 	}
2811 
2812 	if (r & BIT_INT_RF_KILL) {
2813 		uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2814 		if (tmp & (1 << 27)) {
2815 			cmn_err(CE_NOTE, "RF switch: radio on\n");
2816 		}
2817 	}
2818 
2819 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2820 	    (rfh & FH_INT_RX_MASK)) {
2821 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2822 		return (DDI_INTR_CLAIMED);
2823 	}
2824 
2825 	if (r & BIT_INT_FH_TX) {
2826 		mutex_enter(&sc->sc_glock);
2827 		atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2828 		cv_signal(&sc->sc_put_seg_cv);
2829 		mutex_exit(&sc->sc_glock);
2830 	}
2831 
2832 #ifdef	DEBUG
2833 	if (r & BIT_INT_ALIVE)	{
2834 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2835 		    "firmware initialized.\n"));
2836 	}
2837 #endif
2838 
2839 	/*
2840 	 * re-enable interrupts
2841 	 */
2842 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2843 
2844 	return (DDI_INTR_CLAIMED);
2845 }
2846 
2847 static uint8_t
iwp_rate_to_plcp(int rate)2848 iwp_rate_to_plcp(int rate)
2849 {
2850 	uint8_t ret;
2851 
2852 	switch (rate) {
2853 	/*
2854 	 * CCK rates
2855 	 */
2856 	case 2:
2857 		ret = 0xa;
2858 		break;
2859 
2860 	case 4:
2861 		ret = 0x14;
2862 		break;
2863 
2864 	case 11:
2865 		ret = 0x37;
2866 		break;
2867 
2868 	case 22:
2869 		ret = 0x6e;
2870 		break;
2871 
2872 	/*
2873 	 * OFDM rates
2874 	 */
2875 	case 12:
2876 		ret = 0xd;
2877 		break;
2878 
2879 	case 18:
2880 		ret = 0xf;
2881 		break;
2882 
2883 	case 24:
2884 		ret = 0x5;
2885 		break;
2886 
2887 	case 36:
2888 		ret = 0x7;
2889 		break;
2890 
2891 	case 48:
2892 		ret = 0x9;
2893 		break;
2894 
2895 	case 72:
2896 		ret = 0xb;
2897 		break;
2898 
2899 	case 96:
2900 		ret = 0x1;
2901 		break;
2902 
2903 	case 108:
2904 		ret = 0x3;
2905 		break;
2906 
2907 	default:
2908 		ret = 0;
2909 		break;
2910 	}
2911 
2912 	return (ret);
2913 }
2914 
2915 /*
2916  * invoked by GLD send frames
2917  */
2918 static mblk_t *
iwp_m_tx(void * arg,mblk_t * mp)2919 iwp_m_tx(void *arg, mblk_t *mp)
2920 {
2921 	iwp_sc_t	*sc;
2922 	ieee80211com_t	*ic;
2923 	mblk_t		*next;
2924 
2925 	if (NULL == arg) {
2926 		return (NULL);
2927 	}
2928 	sc = (iwp_sc_t *)arg;
2929 	ic = &sc->sc_ic;
2930 
2931 	if (sc->sc_flags & IWP_F_SUSPEND) {
2932 		freemsgchain(mp);
2933 		return (NULL);
2934 	}
2935 
2936 	if (ic->ic_state != IEEE80211_S_RUN) {
2937 		freemsgchain(mp);
2938 		return (NULL);
2939 	}
2940 
2941 	if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2942 	    IWP_CHK_FAST_RECOVER(sc)) {
2943 		IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2944 		    "hold queue\n"));
2945 		return (mp);
2946 	}
2947 
2948 
2949 	while (mp != NULL) {
2950 		next = mp->b_next;
2951 		mp->b_next = NULL;
2952 		if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2953 			mp->b_next = next;
2954 			break;
2955 		}
2956 		mp = next;
2957 	}
2958 
2959 	return (mp);
2960 }
2961 
2962 /*
2963  * send frames
2964  */
2965 static int
iwp_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)2966 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2967 {
2968 	iwp_sc_t *sc;
2969 	iwp_tx_ring_t *ring;
2970 	iwp_tx_desc_t *desc;
2971 	iwp_tx_data_t *data;
2972 	iwp_tx_data_t *desc_data;
2973 	iwp_cmd_t *cmd;
2974 	iwp_tx_cmd_t *tx;
2975 	ieee80211_node_t *in;
2976 	struct ieee80211_frame *wh;
2977 	struct ieee80211_key *k = NULL;
2978 	mblk_t *m, *m0;
2979 	int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2980 	uint16_t masks = 0;
2981 	uint32_t rate, s_id = 0;
2982 
2983 	if (NULL == ic) {
2984 		return (IWP_FAIL);
2985 	}
2986 	sc = (iwp_sc_t *)ic;
2987 
2988 	if (sc->sc_flags & IWP_F_SUSPEND) {
2989 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2990 		    IEEE80211_FC0_TYPE_DATA) {
2991 			freemsg(mp);
2992 		}
2993 		err = IWP_FAIL;
2994 		goto exit;
2995 	}
2996 
2997 	mutex_enter(&sc->sc_tx_lock);
2998 	ring = &sc->sc_txq[0];
2999 	data = &ring->data[ring->cur];
3000 	cmd = data->cmd;
3001 	bzero(cmd, sizeof (*cmd));
3002 
3003 	ring->cur = (ring->cur + 1) % ring->count;
3004 
3005 	/*
3006 	 * Need reschedule TX if TX buffer is full.
3007 	 */
3008 	if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3009 		IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3010 		"no txbuf\n"));
3011 
3012 		sc->sc_need_reschedule = 1;
3013 		mutex_exit(&sc->sc_tx_lock);
3014 
3015 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3016 		    IEEE80211_FC0_TYPE_DATA) {
3017 			freemsg(mp);
3018 		}
3019 		sc->sc_tx_nobuf++;
3020 		err = IWP_FAIL;
3021 		goto exit;
3022 	}
3023 
3024 	ring->queued++;
3025 
3026 	mutex_exit(&sc->sc_tx_lock);
3027 
3028 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3029 
3030 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3031 	if (NULL == m) { /* can not alloc buf, drop this package */
3032 		cmn_err(CE_WARN, "iwp_send(): "
3033 		    "failed to allocate msgbuf\n");
3034 		freemsg(mp);
3035 
3036 		mutex_enter(&sc->sc_tx_lock);
3037 		ring->queued--;
3038 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3039 			sc->sc_need_reschedule = 0;
3040 			mutex_exit(&sc->sc_tx_lock);
3041 			mac_tx_update(ic->ic_mach);
3042 			mutex_enter(&sc->sc_tx_lock);
3043 		}
3044 		mutex_exit(&sc->sc_tx_lock);
3045 
3046 		err = IWP_SUCCESS;
3047 		goto exit;
3048 	}
3049 
3050 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3051 		mblen = MBLKL(m0);
3052 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3053 		off += mblen;
3054 	}
3055 
3056 	m->b_wptr += off;
3057 
3058 	wh = (struct ieee80211_frame *)m->b_rptr;
3059 
3060 	/*
3061 	 * determine send which AP or station in IBSS
3062 	 */
3063 	in = ieee80211_find_txnode(ic, wh->i_addr1);
3064 	if (NULL == in) {
3065 		cmn_err(CE_WARN, "iwp_send(): "
3066 		    "failed to find tx node\n");
3067 		freemsg(mp);
3068 		freemsg(m);
3069 		sc->sc_tx_err++;
3070 
3071 		mutex_enter(&sc->sc_tx_lock);
3072 		ring->queued--;
3073 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3074 			sc->sc_need_reschedule = 0;
3075 			mutex_exit(&sc->sc_tx_lock);
3076 			mac_tx_update(ic->ic_mach);
3077 			mutex_enter(&sc->sc_tx_lock);
3078 		}
3079 		mutex_exit(&sc->sc_tx_lock);
3080 
3081 		err = IWP_SUCCESS;
3082 		goto exit;
3083 	}
3084 
3085 	/*
3086 	 * Net80211 module encapsulate outbound data frames.
3087 	 * Add some feilds of 80211 frame.
3088 	 */
3089 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3090 	    IEEE80211_FC0_TYPE_DATA) {
3091 		(void) ieee80211_encap(ic, m, in);
3092 	}
3093 
3094 	freemsg(mp);
3095 
3096 	cmd->hdr.type = REPLY_TX;
3097 	cmd->hdr.flags = 0;
3098 	cmd->hdr.qid = ring->qid;
3099 
3100 	tx = (iwp_tx_cmd_t *)cmd->data;
3101 	tx->tx_flags = 0;
3102 
3103 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3104 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3105 	} else {
3106 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3107 	}
3108 
3109 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3110 		k = ieee80211_crypto_encap(ic, m);
3111 		if (NULL == k) {
3112 			freemsg(m);
3113 			sc->sc_tx_err++;
3114 
3115 			mutex_enter(&sc->sc_tx_lock);
3116 			ring->queued--;
3117 			if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3118 				sc->sc_need_reschedule = 0;
3119 				mutex_exit(&sc->sc_tx_lock);
3120 				mac_tx_update(ic->ic_mach);
3121 				mutex_enter(&sc->sc_tx_lock);
3122 			}
3123 			mutex_exit(&sc->sc_tx_lock);
3124 
3125 			err = IWP_SUCCESS;
3126 			goto exit;
3127 		}
3128 
3129 		/* packet header may have moved, reset our local pointer */
3130 		wh = (struct ieee80211_frame *)m->b_rptr;
3131 	}
3132 
3133 	len = msgdsize(m);
3134 
3135 #ifdef DEBUG
3136 	if (iwp_dbg_flags & IWP_DEBUG_TX) {
3137 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3138 	}
3139 #endif
3140 
3141 	tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3142 	tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3143 
3144 	/*
3145 	 * specific TX parameters for management frames
3146 	 */
3147 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3148 	    IEEE80211_FC0_TYPE_MGT) {
3149 		/*
3150 		 * mgmt frames are sent at 1M
3151 		 */
3152 		if ((in->in_rates.ir_rates[0] &
3153 		    IEEE80211_RATE_VAL) != 0) {
3154 			rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3155 		} else {
3156 			rate = 2;
3157 		}
3158 
3159 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3160 
3161 		/*
3162 		 * tell h/w to set timestamp in probe responses
3163 		 */
3164 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3165 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3166 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3167 
3168 			tx->data_retry_limit = 3;
3169 			if (tx->data_retry_limit < tx->rts_retry_limit) {
3170 				tx->rts_retry_limit = tx->data_retry_limit;
3171 			}
3172 		}
3173 
3174 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3175 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3176 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3177 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3178 			tx->timeout.pm_frame_timeout = LE_16(3);
3179 		} else {
3180 			tx->timeout.pm_frame_timeout = LE_16(2);
3181 		}
3182 
3183 	} else {
3184 		/*
3185 		 * do it here for the software way rate scaling.
3186 		 * later for rate scaling in hardware.
3187 		 *
3188 		 * now the txrate is determined in tx cmd flags, set to the
3189 		 * max value 54M for 11g and 11M for 11b originally.
3190 		 */
3191 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3192 			rate = ic->ic_fixed_rate;
3193 		} else {
3194 			if ((in->in_rates.ir_rates[in->in_txrate] &
3195 			    IEEE80211_RATE_VAL) != 0) {
3196 				rate = in->in_rates.
3197 				    ir_rates[in->in_txrate] &
3198 				    IEEE80211_RATE_VAL;
3199 			}
3200 		}
3201 
3202 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3203 
3204 		tx->timeout.pm_frame_timeout = 0;
3205 	}
3206 
3207 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3208 	    "tx rate[%d of %d] = %x",
3209 	    in->in_txrate, in->in_rates.ir_nrates, rate));
3210 
3211 	len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3212 	if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3213 		tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3214 	}
3215 
3216 	/*
3217 	 * retrieve destination node's id
3218 	 */
3219 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3220 		tx->sta_id = IWP_BROADCAST_ID;
3221 	} else {
3222 		tx->sta_id = IWP_AP_ID;
3223 	}
3224 
3225 	if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3226 		masks |= RATE_MCS_CCK_MSK;
3227 	}
3228 
3229 	masks |= RATE_MCS_ANT_B_MSK;
3230 	tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3231 
3232 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3233 	    "tx flag = %x",
3234 	    tx->tx_flags));
3235 
3236 	tx->stop_time.life_time  = LE_32(0xffffffff);
3237 
3238 	tx->len = LE_16(len);
3239 
3240 	tx->dram_lsb_ptr =
3241 	    LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3242 	tx->dram_msb_ptr = 0;
3243 	tx->driver_txop = 0;
3244 	tx->next_frame_len = 0;
3245 
3246 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
3247 	m->b_rptr += hdrlen;
3248 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3249 
3250 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3251 	    "sending data: qid=%d idx=%d len=%d",
3252 	    ring->qid, ring->cur, len));
3253 
3254 	/*
3255 	 * first segment includes the tx cmd plus the 802.11 header,
3256 	 * the second includes the remaining of the 802.11 frame.
3257 	 */
3258 	mutex_enter(&sc->sc_tx_lock);
3259 
3260 	cmd->hdr.idx = ring->desc_cur;
3261 
3262 	desc_data = &ring->data[ring->desc_cur];
3263 	desc = desc_data->desc;
3264 	bzero(desc, sizeof (*desc));
3265 	desc->val0 = 2 << 24;
3266 	desc->pa[0].tb1_addr = data->paddr_cmd;
3267 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3268 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3269 	desc->pa[0].val2 =
3270 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3271 	    ((len - hdrlen) << 20);
3272 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3273 	    "phy addr1 = 0x%x phy addr2 = 0x%x "
3274 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3275 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
3276 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3277 
3278 	/*
3279 	 * kick ring
3280 	 */
3281 	s_id = tx->sta_id;
3282 
3283 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3284 	    tfd_offset[ring->desc_cur].val =
3285 	    (8 + len) | (s_id << 12);
3286 	if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3287 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3288 		    tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3289 		    (8 + len) | (s_id << 12);
3290 	}
3291 
3292 	IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3293 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3294 
3295 	ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3296 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3297 
3298 	mutex_exit(&sc->sc_tx_lock);
3299 	freemsg(m);
3300 
3301 	/*
3302 	 * release node reference
3303 	 */
3304 	ieee80211_free_node(in);
3305 
3306 	ic->ic_stats.is_tx_bytes += len;
3307 	ic->ic_stats.is_tx_frags++;
3308 
3309 	mutex_enter(&sc->sc_mt_lock);
3310 	if (0 == sc->sc_tx_timer) {
3311 		sc->sc_tx_timer = 4;
3312 	}
3313 	mutex_exit(&sc->sc_mt_lock);
3314 
3315 exit:
3316 	return (err);
3317 }
3318 
3319 /*
3320  * invoked by GLD to deal with IOCTL affaires
3321  */
3322 static void
iwp_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3323 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3324 {
3325 	iwp_sc_t	*sc;
3326 	ieee80211com_t	*ic;
3327 	int		err = EINVAL;
3328 
3329 	if (NULL == arg) {
3330 		return;
3331 	}
3332 	sc = (iwp_sc_t *)arg;
3333 	ic = &sc->sc_ic;
3334 
3335 	err = ieee80211_ioctl(ic, wq, mp);
3336 	if (ENETRESET == err) {
3337 		/*
3338 		 * This is special for the hidden AP connection.
3339 		 * In any case, we should make sure only one 'scan'
3340 		 * in the driver for a 'connect' CLI command. So
3341 		 * when connecting to a hidden AP, the scan is just
3342 		 * sent out to the air when we know the desired
3343 		 * essid of the AP we want to connect.
3344 		 */
3345 		if (ic->ic_des_esslen) {
3346 			if (sc->sc_flags & IWP_F_RUNNING) {
3347 				iwp_m_stop(sc);
3348 				(void) iwp_m_start(sc);
3349 				(void) ieee80211_new_state(ic,
3350 				    IEEE80211_S_SCAN, -1);
3351 			}
3352 		}
3353 	}
3354 }
3355 
3356 /*
3357  * Call back functions for get/set proporty
3358  */
3359 static int
iwp_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3360 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3361     uint_t wldp_length, void *wldp_buf)
3362 {
3363 	iwp_sc_t	*sc;
3364 	int		err = EINVAL;
3365 
3366 	if (NULL == arg) {
3367 		return (EINVAL);
3368 	}
3369 	sc = (iwp_sc_t *)arg;
3370 
3371 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3372 	    wldp_length, wldp_buf);
3373 
3374 	return (err);
3375 }
3376 
3377 static void
iwp_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t prh)3378 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3379     mac_prop_info_handle_t prh)
3380 {
3381 	iwp_sc_t	*sc;
3382 
3383 	sc = (iwp_sc_t *)arg;
3384 	ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3385 }
3386 
3387 static int
iwp_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3388 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3389     uint_t wldp_length, const void *wldp_buf)
3390 {
3391 	iwp_sc_t		*sc;
3392 	ieee80211com_t		*ic;
3393 	int			err = EINVAL;
3394 
3395 	if (NULL == arg) {
3396 		return (EINVAL);
3397 	}
3398 	sc = (iwp_sc_t *)arg;
3399 	ic = &sc->sc_ic;
3400 
3401 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3402 	    wldp_buf);
3403 
3404 	if (err == ENETRESET) {
3405 		if (ic->ic_des_esslen) {
3406 			if (sc->sc_flags & IWP_F_RUNNING) {
3407 				iwp_m_stop(sc);
3408 				(void) iwp_m_start(sc);
3409 				(void) ieee80211_new_state(ic,
3410 				    IEEE80211_S_SCAN, -1);
3411 			}
3412 		}
3413 		err = 0;
3414 	}
3415 	return (err);
3416 }
3417 
3418 /*
3419  * invoked by GLD supply statistics NIC and driver
3420  */
3421 static int
iwp_m_stat(void * arg,uint_t stat,uint64_t * val)3422 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3423 {
3424 	iwp_sc_t	*sc;
3425 	ieee80211com_t	*ic;
3426 	ieee80211_node_t *in;
3427 
3428 	if (NULL == arg) {
3429 		return (EINVAL);
3430 	}
3431 	sc = (iwp_sc_t *)arg;
3432 	ic = &sc->sc_ic;
3433 
3434 	mutex_enter(&sc->sc_glock);
3435 
3436 	switch (stat) {
3437 	case MAC_STAT_IFSPEED:
3438 		in = ic->ic_bss;
3439 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3440 		    IEEE80211_RATE(in->in_txrate) :
3441 		    ic->ic_fixed_rate) / 2 * 1000000;
3442 		break;
3443 	case MAC_STAT_NOXMTBUF:
3444 		*val = sc->sc_tx_nobuf;
3445 		break;
3446 	case MAC_STAT_NORCVBUF:
3447 		*val = sc->sc_rx_nobuf;
3448 		break;
3449 	case MAC_STAT_IERRORS:
3450 		*val = sc->sc_rx_err;
3451 		break;
3452 	case MAC_STAT_RBYTES:
3453 		*val = ic->ic_stats.is_rx_bytes;
3454 		break;
3455 	case MAC_STAT_IPACKETS:
3456 		*val = ic->ic_stats.is_rx_frags;
3457 		break;
3458 	case MAC_STAT_OBYTES:
3459 		*val = ic->ic_stats.is_tx_bytes;
3460 		break;
3461 	case MAC_STAT_OPACKETS:
3462 		*val = ic->ic_stats.is_tx_frags;
3463 		break;
3464 	case MAC_STAT_OERRORS:
3465 	case WIFI_STAT_TX_FAILED:
3466 		*val = sc->sc_tx_err;
3467 		break;
3468 	case WIFI_STAT_TX_RETRANS:
3469 		*val = sc->sc_tx_retries;
3470 		break;
3471 	case WIFI_STAT_FCS_ERRORS:
3472 	case WIFI_STAT_WEP_ERRORS:
3473 	case WIFI_STAT_TX_FRAGS:
3474 	case WIFI_STAT_MCAST_TX:
3475 	case WIFI_STAT_RTS_SUCCESS:
3476 	case WIFI_STAT_RTS_FAILURE:
3477 	case WIFI_STAT_ACK_FAILURE:
3478 	case WIFI_STAT_RX_FRAGS:
3479 	case WIFI_STAT_MCAST_RX:
3480 	case WIFI_STAT_RX_DUPS:
3481 		mutex_exit(&sc->sc_glock);
3482 		return (ieee80211_stat(ic, stat, val));
3483 	default:
3484 		mutex_exit(&sc->sc_glock);
3485 		return (ENOTSUP);
3486 	}
3487 
3488 	mutex_exit(&sc->sc_glock);
3489 
3490 	return (IWP_SUCCESS);
3491 
3492 }
3493 
3494 /*
3495  * invoked by GLD to start or open NIC
3496  */
3497 static int
iwp_m_start(void * arg)3498 iwp_m_start(void *arg)
3499 {
3500 	iwp_sc_t *sc;
3501 	ieee80211com_t	*ic;
3502 	int err = IWP_FAIL;
3503 
3504 	if (NULL == arg) {
3505 		return (EINVAL);
3506 	}
3507 	sc = (iwp_sc_t *)arg;
3508 	ic = &sc->sc_ic;
3509 
3510 	err = iwp_init(sc);
3511 	if (err != IWP_SUCCESS) {
3512 		/*
3513 		 * The hw init err(eg. RF is OFF). Return Success to make
3514 		 * the 'plumb' succeed. The iwp_thread() tries to re-init
3515 		 * background.
3516 		 */
3517 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3518 		return (IWP_SUCCESS);
3519 	}
3520 
3521 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3522 
3523 	atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3524 
3525 	return (IWP_SUCCESS);
3526 }
3527 
3528 /*
3529  * invoked by GLD to stop or down NIC
3530  */
3531 static void
iwp_m_stop(void * arg)3532 iwp_m_stop(void *arg)
3533 {
3534 	iwp_sc_t *sc;
3535 	ieee80211com_t	*ic;
3536 
3537 	if (NULL == arg) {
3538 		return;
3539 	}
3540 	sc = (iwp_sc_t *)arg;
3541 	ic = &sc->sc_ic;
3542 
3543 	iwp_stop(sc);
3544 
3545 	/*
3546 	 * release buffer for calibration
3547 	 */
3548 	iwp_release_calib_buffer(sc);
3549 
3550 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3551 
3552 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3553 	atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3554 
3555 	atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3556 	atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3557 }
3558 
3559 /*
3560  * invoked by GLD to configure NIC
3561  */
3562 static int
iwp_m_unicst(void * arg,const uint8_t * macaddr)3563 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3564 {
3565 	iwp_sc_t *sc;
3566 	ieee80211com_t	*ic;
3567 	int err = IWP_SUCCESS;
3568 
3569 	if (NULL == arg) {
3570 		return (EINVAL);
3571 	}
3572 	sc = (iwp_sc_t *)arg;
3573 	ic = &sc->sc_ic;
3574 
3575 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3576 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3577 		mutex_enter(&sc->sc_glock);
3578 		err = iwp_config(sc);
3579 		mutex_exit(&sc->sc_glock);
3580 		if (err != IWP_SUCCESS) {
3581 			cmn_err(CE_WARN, "iwp_m_unicst(): "
3582 			    "failed to configure device\n");
3583 			goto fail;
3584 		}
3585 	}
3586 
3587 	return (err);
3588 
3589 fail:
3590 	return (err);
3591 }
3592 
3593 /* ARGSUSED */
3594 static int
iwp_m_multicst(void * arg,boolean_t add,const uint8_t * m)3595 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3596 {
3597 	return (IWP_SUCCESS);
3598 }
3599 
3600 /* ARGSUSED */
3601 static int
iwp_m_promisc(void * arg,boolean_t on)3602 iwp_m_promisc(void *arg, boolean_t on)
3603 {
3604 	return (IWP_SUCCESS);
3605 }
3606 
3607 /*
3608  * kernel thread to deal with exceptional situation
3609  */
3610 static void
iwp_thread(iwp_sc_t * sc)3611 iwp_thread(iwp_sc_t *sc)
3612 {
3613 	ieee80211com_t	*ic = &sc->sc_ic;
3614 	clock_t clk;
3615 	int err, n = 0, timeout = 0;
3616 	uint32_t tmp;
3617 #ifdef	DEBUG
3618 	int times = 0;
3619 #endif
3620 
3621 	while (sc->sc_mf_thread_switch) {
3622 		tmp = IWP_READ(sc, CSR_GP_CNTRL);
3623 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3624 			atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3625 		} else {
3626 			atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3627 		}
3628 
3629 		/*
3630 		 * If  in SUSPEND or the RF is OFF, do nothing.
3631 		 */
3632 		if (sc->sc_flags & IWP_F_RADIO_OFF) {
3633 			delay(drv_usectohz(100000));
3634 			continue;
3635 		}
3636 
3637 		/*
3638 		 * recovery fatal error
3639 		 */
3640 		if (ic->ic_mach &&
3641 		    (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3642 
3643 			IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3644 			    "try to recover fatal hw error: %d\n", times++));
3645 
3646 			iwp_stop(sc);
3647 
3648 			if (IWP_CHK_FAST_RECOVER(sc)) {
3649 				/* save runtime configuration */
3650 				bcopy(&sc->sc_config, &sc->sc_config_save,
3651 				    sizeof (sc->sc_config));
3652 			} else {
3653 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3654 				delay(drv_usectohz(2000000 + n*500000));
3655 			}
3656 
3657 			err = iwp_init(sc);
3658 			if (err != IWP_SUCCESS) {
3659 				n++;
3660 				if (n < 20) {
3661 					continue;
3662 				}
3663 			}
3664 
3665 			n = 0;
3666 			if (!err) {
3667 				atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3668 			}
3669 
3670 
3671 			if (!IWP_CHK_FAST_RECOVER(sc) ||
3672 			    iwp_fast_recover(sc) != IWP_SUCCESS) {
3673 				atomic_and_32(&sc->sc_flags,
3674 				    ~IWP_F_HW_ERR_RECOVER);
3675 
3676 				delay(drv_usectohz(2000000));
3677 				if (sc->sc_ostate != IEEE80211_S_INIT) {
3678 					ieee80211_new_state(ic,
3679 					    IEEE80211_S_SCAN, 0);
3680 				}
3681 			}
3682 		}
3683 
3684 		if (ic->ic_mach &&
3685 		    (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3686 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3687 			    "wait for probe response\n"));
3688 
3689 			sc->sc_scan_pending--;
3690 			delay(drv_usectohz(200000));
3691 			ieee80211_next_scan(ic);
3692 		}
3693 
3694 		/*
3695 		 * rate ctl
3696 		 */
3697 		if (ic->ic_mach &&
3698 		    (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3699 			clk = ddi_get_lbolt();
3700 			if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3701 				iwp_amrr_timeout(sc);
3702 			}
3703 		}
3704 
3705 		delay(drv_usectohz(100000));
3706 
3707 		mutex_enter(&sc->sc_mt_lock);
3708 		if (sc->sc_tx_timer) {
3709 			timeout++;
3710 			if (10 == timeout) {
3711 				sc->sc_tx_timer--;
3712 				if (0 == sc->sc_tx_timer) {
3713 					atomic_or_32(&sc->sc_flags,
3714 					    IWP_F_HW_ERR_RECOVER);
3715 					sc->sc_ostate = IEEE80211_S_RUN;
3716 					IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3717 					    "try to recover from "
3718 					    "send fail\n"));
3719 				}
3720 				timeout = 0;
3721 			}
3722 		}
3723 		mutex_exit(&sc->sc_mt_lock);
3724 	}
3725 
3726 	mutex_enter(&sc->sc_mt_lock);
3727 	sc->sc_mf_thread = NULL;
3728 	cv_signal(&sc->sc_mt_cv);
3729 	mutex_exit(&sc->sc_mt_lock);
3730 }
3731 
3732 
3733 /*
3734  * Send a command to the ucode.
3735  */
3736 static int
iwp_cmd(iwp_sc_t * sc,int code,const void * buf,int size,int async)3737 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3738 {
3739 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3740 	iwp_tx_desc_t *desc;
3741 	iwp_cmd_t *cmd;
3742 
3743 	ASSERT(size <= sizeof (cmd->data));
3744 	ASSERT(mutex_owned(&sc->sc_glock));
3745 
3746 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3747 	    "code[%d]", code));
3748 	desc = ring->data[ring->cur].desc;
3749 	cmd = ring->data[ring->cur].cmd;
3750 
3751 	cmd->hdr.type = (uint8_t)code;
3752 	cmd->hdr.flags = 0;
3753 	cmd->hdr.qid = ring->qid;
3754 	cmd->hdr.idx = ring->cur;
3755 	(void) memcpy(cmd->data, buf, size);
3756 	(void) memset(desc, 0, sizeof (*desc));
3757 
3758 	desc->val0 = 1 << 24;
3759 	desc->pa[0].tb1_addr =
3760 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3761 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3762 
3763 	if (async) {
3764 		sc->sc_cmd_accum++;
3765 	}
3766 
3767 	/*
3768 	 * kick cmd ring XXX
3769 	 */
3770 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3771 	    tfd_offset[ring->cur].val = 8;
3772 	if (ring->cur < IWP_MAX_WIN_SIZE) {
3773 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3774 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3775 	}
3776 	ring->cur = (ring->cur + 1) % ring->count;
3777 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3778 
3779 	if (async) {
3780 		return (IWP_SUCCESS);
3781 	} else {
3782 		clock_t clk;
3783 
3784 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3785 		while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3786 			if (cv_timedwait(&sc->sc_cmd_cv,
3787 			    &sc->sc_glock, clk) < 0) {
3788 				break;
3789 			}
3790 		}
3791 
3792 		if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3793 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3794 			return (IWP_SUCCESS);
3795 		} else {
3796 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3797 			return (IWP_FAIL);
3798 		}
3799 	}
3800 }
3801 
3802 /*
3803  * require ucode seting led of NIC
3804  */
3805 static void
iwp_set_led(iwp_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3806 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3807 {
3808 	iwp_led_cmd_t led;
3809 
3810 	led.interval = LE_32(100000);	/* unit: 100ms */
3811 	led.id = id;
3812 	led.off = off;
3813 	led.on = on;
3814 
3815 	(void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3816 }
3817 
3818 /*
3819  * necessary setting to NIC before authentication
3820  */
3821 static int
iwp_hw_set_before_auth(iwp_sc_t * sc)3822 iwp_hw_set_before_auth(iwp_sc_t *sc)
3823 {
3824 	ieee80211com_t *ic = &sc->sc_ic;
3825 	ieee80211_node_t *in = ic->ic_bss;
3826 	int err = IWP_FAIL;
3827 
3828 	/*
3829 	 * update adapter's configuration according
3830 	 * the info of target AP
3831 	 */
3832 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3833 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3834 
3835 		sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3836 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3837 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3838 
3839 		if (IEEE80211_MODE_11B == ic->ic_curmode) {
3840 			sc->sc_config.cck_basic_rates  = 0x03;
3841 			sc->sc_config.ofdm_basic_rates = 0;
3842 		} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3843 		    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3844 			sc->sc_config.cck_basic_rates  = 0;
3845 			sc->sc_config.ofdm_basic_rates = 0x15;
3846 		} else { /* assume 802.11b/g */
3847 			sc->sc_config.cck_basic_rates  = 0x0f;
3848 			sc->sc_config.ofdm_basic_rates = 0xff;
3849 		}
3850 
3851 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3852 	    RXON_FLG_SHORT_SLOT_MSK);
3853 
3854 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3855 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3856 	} else {
3857 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3858 	}
3859 
3860 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3861 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3862 	} else {
3863 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3864 	}
3865 
3866 	IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3867 	    "config chan %d flags %x "
3868 	    "filter_flags %x  cck %x ofdm %x"
3869 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3870 	    LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3871 	    LE_32(sc->sc_config.filter_flags),
3872 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3873 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3874 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3875 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3876 
3877 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3878 	    sizeof (iwp_rxon_cmd_t), 1);
3879 	if (err != IWP_SUCCESS) {
3880 		cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3881 		    "failed to config chan%d\n", sc->sc_config.chan);
3882 		return (err);
3883 	}
3884 
3885 	/*
3886 	 * add default AP node
3887 	 */
3888 	err = iwp_add_ap_sta(sc);
3889 	if (err != IWP_SUCCESS) {
3890 		return (err);
3891 	}
3892 
3893 
3894 	return (err);
3895 }
3896 
3897 /*
3898  * Send a scan request(assembly scan cmd) to the firmware.
3899  */
3900 static int
iwp_scan(iwp_sc_t * sc)3901 iwp_scan(iwp_sc_t *sc)
3902 {
3903 	ieee80211com_t *ic = &sc->sc_ic;
3904 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3905 	iwp_tx_desc_t *desc;
3906 	iwp_tx_data_t *data;
3907 	iwp_cmd_t *cmd;
3908 	iwp_scan_hdr_t *hdr;
3909 	iwp_scan_chan_t chan;
3910 	struct ieee80211_frame *wh;
3911 	ieee80211_node_t *in = ic->ic_bss;
3912 	uint8_t essid[IEEE80211_NWID_LEN+1];
3913 	struct ieee80211_rateset *rs;
3914 	enum ieee80211_phymode mode;
3915 	uint8_t *frm;
3916 	int i, pktlen, nrates;
3917 
3918 	data = &ring->data[ring->cur];
3919 	desc = data->desc;
3920 	cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3921 
3922 	cmd->hdr.type = REPLY_SCAN_CMD;
3923 	cmd->hdr.flags = 0;
3924 	cmd->hdr.qid = ring->qid;
3925 	cmd->hdr.idx = ring->cur | 0x40;
3926 
3927 	hdr = (iwp_scan_hdr_t *)cmd->data;
3928 	(void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3929 	hdr->nchan = 1;
3930 	hdr->quiet_time = LE_16(50);
3931 	hdr->quiet_plcp_th = LE_16(1);
3932 
3933 	hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3934 	hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3935 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
3936 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3937 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3938 
3939 	hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3940 	hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3941 	hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3942 	hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3943 	hdr->tx_cmd.rate.r.rate_n_flags |=
3944 	    LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3945 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3946 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3947 
3948 	hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3949 	    RXON_FILTER_BCON_AWARE_MSK);
3950 
3951 	if (ic->ic_des_esslen) {
3952 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3953 		essid[ic->ic_des_esslen] = '\0';
3954 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3955 		    "directed scan %s\n", essid));
3956 
3957 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3958 		    ic->ic_des_esslen);
3959 	} else {
3960 		bzero(hdr->direct_scan[0].ssid,
3961 		    sizeof (hdr->direct_scan[0].ssid));
3962 	}
3963 
3964 	/*
3965 	 * a probe request frame is required after the REPLY_SCAN_CMD
3966 	 */
3967 	wh = (struct ieee80211_frame *)(hdr + 1);
3968 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3969 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3970 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3971 	(void) memset(wh->i_addr1, 0xff, 6);
3972 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3973 	(void) memset(wh->i_addr3, 0xff, 6);
3974 	*(uint16_t *)&wh->i_dur[0] = 0;
3975 	*(uint16_t *)&wh->i_seq[0] = 0;
3976 
3977 	frm = (uint8_t *)(wh + 1);
3978 
3979 	/*
3980 	 * essid IE
3981 	 */
3982 	if (in->in_esslen) {
3983 		bcopy(in->in_essid, essid, in->in_esslen);
3984 		essid[in->in_esslen] = '\0';
3985 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3986 		    "probe with ESSID %s\n",
3987 		    essid));
3988 	}
3989 	*frm++ = IEEE80211_ELEMID_SSID;
3990 	*frm++ = in->in_esslen;
3991 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3992 	frm += in->in_esslen;
3993 
3994 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3995 	rs = &ic->ic_sup_rates[mode];
3996 
3997 	/*
3998 	 * supported rates IE
3999 	 */
4000 	*frm++ = IEEE80211_ELEMID_RATES;
4001 	nrates = rs->ir_nrates;
4002 	if (nrates > IEEE80211_RATE_SIZE) {
4003 		nrates = IEEE80211_RATE_SIZE;
4004 	}
4005 
4006 	*frm++ = (uint8_t)nrates;
4007 	(void) memcpy(frm, rs->ir_rates, nrates);
4008 	frm += nrates;
4009 
4010 	/*
4011 	 * supported xrates IE
4012 	 */
4013 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4014 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4015 		*frm++ = IEEE80211_ELEMID_XRATES;
4016 		*frm++ = (uint8_t)nrates;
4017 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4018 		frm += nrates;
4019 	}
4020 
4021 	/*
4022 	 * optionnal IE (usually for wpa)
4023 	 */
4024 	if (ic->ic_opt_ie != NULL) {
4025 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4026 		frm += ic->ic_opt_ie_len;
4027 	}
4028 
4029 	/* setup length of probe request */
4030 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4031 	hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4032 	    LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4033 
4034 	/*
4035 	 * the attribute of the scan channels are required after the probe
4036 	 * request frame.
4037 	 */
4038 	for (i = 1; i <= hdr->nchan; i++) {
4039 		if (ic->ic_des_esslen) {
4040 			chan.type = LE_32(3);
4041 		} else {
4042 			chan.type = LE_32(1);
4043 		}
4044 
4045 		chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4046 		chan.tpc.tx_gain = 0x28;
4047 		chan.tpc.dsp_atten = 110;
4048 		chan.active_dwell = LE_16(50);
4049 		chan.passive_dwell = LE_16(120);
4050 
4051 		bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4052 		frm += sizeof (iwp_scan_chan_t);
4053 	}
4054 
4055 	pktlen = _PTRDIFF(frm, cmd);
4056 
4057 	(void) memset(desc, 0, sizeof (*desc));
4058 	desc->val0 = 1 << 24;
4059 	desc->pa[0].tb1_addr =
4060 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4061 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4062 
4063 	/*
4064 	 * maybe for cmd, filling the byte cnt table is not necessary.
4065 	 * anyway, we fill it here.
4066 	 */
4067 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4068 	    .tfd_offset[ring->cur].val = 8;
4069 	if (ring->cur < IWP_MAX_WIN_SIZE) {
4070 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4071 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4072 	}
4073 
4074 	/*
4075 	 * kick cmd ring
4076 	 */
4077 	ring->cur = (ring->cur + 1) % ring->count;
4078 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4079 
4080 	return (IWP_SUCCESS);
4081 }
4082 
4083 /*
4084  * configure NIC by using ucode commands after loading ucode.
4085  */
4086 static int
iwp_config(iwp_sc_t * sc)4087 iwp_config(iwp_sc_t *sc)
4088 {
4089 	ieee80211com_t *ic = &sc->sc_ic;
4090 	iwp_powertable_cmd_t powertable;
4091 	iwp_bt_cmd_t bt;
4092 	iwp_add_sta_t node;
4093 	iwp_rem_sta_t	rm_sta;
4094 	const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4095 	int err = IWP_FAIL;
4096 
4097 	/*
4098 	 * set power mode. Disable power management at present, do it later
4099 	 */
4100 	(void) memset(&powertable, 0, sizeof (powertable));
4101 	powertable.flags = LE_16(0x8);
4102 	err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4103 	    sizeof (powertable), 0);
4104 	if (err != IWP_SUCCESS) {
4105 		cmn_err(CE_WARN, "iwp_config(): "
4106 		    "failed to set power mode\n");
4107 		return (err);
4108 	}
4109 
4110 	/*
4111 	 * configure bt coexistence
4112 	 */
4113 	(void) memset(&bt, 0, sizeof (bt));
4114 	bt.flags = 3;
4115 	bt.lead_time = 0xaa;
4116 	bt.max_kill = 1;
4117 	err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4118 	    sizeof (bt), 0);
4119 	if (err != IWP_SUCCESS) {
4120 		cmn_err(CE_WARN, "iwp_config(): "
4121 		    "failed to configurate bt coexistence\n");
4122 		return (err);
4123 	}
4124 
4125 	/*
4126 	 * configure rxon
4127 	 */
4128 	(void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4129 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4130 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4131 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4132 	sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4133 	sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4134 	    RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4135 
4136 	switch (ic->ic_opmode) {
4137 	case IEEE80211_M_STA:
4138 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4139 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4140 		    RXON_FILTER_DIS_DECRYPT_MSK |
4141 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4142 		break;
4143 	case IEEE80211_M_IBSS:
4144 	case IEEE80211_M_AHDEMO:
4145 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4146 
4147 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4148 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4149 		    RXON_FILTER_DIS_DECRYPT_MSK |
4150 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4151 		break;
4152 	case IEEE80211_M_HOSTAP:
4153 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4154 		break;
4155 	case IEEE80211_M_MONITOR:
4156 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4157 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4158 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4159 		break;
4160 	}
4161 
4162 	/*
4163 	 * Support all CCK rates.
4164 	 */
4165 	sc->sc_config.cck_basic_rates  = 0x0f;
4166 
4167 	/*
4168 	 * Support all OFDM rates.
4169 	 */
4170 	sc->sc_config.ofdm_basic_rates = 0xff;
4171 
4172 	sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4173 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
4174 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4175 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4176 
4177 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4178 	    sizeof (iwp_rxon_cmd_t), 0);
4179 	if (err != IWP_SUCCESS) {
4180 		cmn_err(CE_WARN, "iwp_config(): "
4181 		    "failed to set configure command\n");
4182 		return (err);
4183 	}
4184 
4185 	/*
4186 	 * remove all nodes in NIC
4187 	 */
4188 	(void) memset(&rm_sta, 0, sizeof (rm_sta));
4189 	rm_sta.num_sta = 1;
4190 	(void) memcpy(rm_sta.addr, bcast, 6);
4191 
4192 	err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4193 	if (err != IWP_SUCCESS) {
4194 		cmn_err(CE_WARN, "iwp_config(): "
4195 		    "failed to remove broadcast node in hardware.\n");
4196 		return (err);
4197 	}
4198 
4199 	/*
4200 	 * add broadcast node so that we can send broadcast frame
4201 	 */
4202 	(void) memset(&node, 0, sizeof (node));
4203 	(void) memset(node.sta.addr, 0xff, 6);
4204 	node.mode = 0;
4205 	node.sta.sta_id = IWP_BROADCAST_ID;
4206 	node.station_flags = 0;
4207 
4208 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4209 	if (err != IWP_SUCCESS) {
4210 		cmn_err(CE_WARN, "iwp_config(): "
4211 		    "failed to add broadcast node\n");
4212 		return (err);
4213 	}
4214 
4215 	return (err);
4216 }
4217 
4218 /*
4219  * quiesce(9E) entry point.
4220  * This function is called when the system is single-threaded at high
4221  * PIL with preemption disabled. Therefore, this function must not be
4222  * blocked.
4223  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4224  * DDI_FAILURE indicates an error condition and should almost never happen.
4225  */
4226 static int
iwp_quiesce(dev_info_t * dip)4227 iwp_quiesce(dev_info_t *dip)
4228 {
4229 	iwp_sc_t *sc;
4230 
4231 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4232 	if (NULL == sc) {
4233 		return (DDI_FAILURE);
4234 	}
4235 
4236 #ifdef DEBUG
4237 	/* by pass any messages, if it's quiesce */
4238 	iwp_dbg_flags = 0;
4239 #endif
4240 
4241 	/*
4242 	 * No more blocking is allowed while we are in the
4243 	 * quiesce(9E) entry point.
4244 	 */
4245 	atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4246 
4247 	/*
4248 	 * Disable and mask all interrupts.
4249 	 */
4250 	iwp_stop(sc);
4251 
4252 	return (DDI_SUCCESS);
4253 }
4254 
4255 static void
iwp_stop_master(iwp_sc_t * sc)4256 iwp_stop_master(iwp_sc_t *sc)
4257 {
4258 	uint32_t tmp;
4259 	int n;
4260 
4261 	tmp = IWP_READ(sc, CSR_RESET);
4262 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4263 
4264 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4265 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4266 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4267 		return;
4268 	}
4269 
4270 	for (n = 0; n < 2000; n++) {
4271 		if (IWP_READ(sc, CSR_RESET) &
4272 		    CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4273 			break;
4274 		}
4275 		DELAY(1000);
4276 	}
4277 
4278 #ifdef	DEBUG
4279 	if (2000 == n) {
4280 		IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4281 		    "timeout waiting for master stop\n"));
4282 	}
4283 #endif
4284 }
4285 
4286 static int
iwp_power_up(iwp_sc_t * sc)4287 iwp_power_up(iwp_sc_t *sc)
4288 {
4289 	uint32_t tmp;
4290 
4291 	iwp_mac_access_enter(sc);
4292 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4293 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4294 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4295 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4296 	iwp_mac_access_exit(sc);
4297 
4298 	DELAY(5000);
4299 	return (IWP_SUCCESS);
4300 }
4301 
4302 /*
4303  * hardware initialization
4304  */
4305 static int
iwp_preinit(iwp_sc_t * sc)4306 iwp_preinit(iwp_sc_t *sc)
4307 {
4308 	int		n;
4309 	uint8_t		vlink;
4310 	uint16_t	radio_cfg;
4311 	uint32_t	tmp;
4312 
4313 	/*
4314 	 * clear any pending interrupts
4315 	 */
4316 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
4317 
4318 	tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4319 	IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4320 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4321 
4322 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4323 	IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4324 
4325 	/*
4326 	 * wait for clock ready
4327 	 */
4328 	for (n = 0; n < 1000; n++) {
4329 		if (IWP_READ(sc, CSR_GP_CNTRL) &
4330 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4331 			break;
4332 		}
4333 		DELAY(10);
4334 	}
4335 
4336 	if (1000 == n) {
4337 		return (ETIMEDOUT);
4338 	}
4339 
4340 	iwp_mac_access_enter(sc);
4341 
4342 	iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4343 
4344 	DELAY(20);
4345 	tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4346 	iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4347 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4348 	iwp_mac_access_exit(sc);
4349 
4350 	radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4351 	if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4352 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4353 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4354 		    tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4355 		    SP_RADIO_STEP_MSK(radio_cfg) |
4356 		    SP_RADIO_DASH_MSK(radio_cfg));
4357 	} else {
4358 		cmn_err(CE_WARN, "iwp_preinit(): "
4359 		    "radio configuration information in eeprom is wrong\n");
4360 		return (IWP_FAIL);
4361 	}
4362 
4363 
4364 	IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4365 
4366 	(void) iwp_power_up(sc);
4367 
4368 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4369 		tmp = ddi_get32(sc->sc_cfg_handle,
4370 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
4371 		ddi_put32(sc->sc_cfg_handle,
4372 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
4373 		    tmp & ~(1 << 11));
4374 	}
4375 
4376 	vlink = ddi_get8(sc->sc_cfg_handle,
4377 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
4378 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4379 	    vlink & ~2);
4380 
4381 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4382 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4383 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4384 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4385 
4386 	/*
4387 	 * make sure power supply on each part of the hardware
4388 	 */
4389 	iwp_mac_access_enter(sc);
4390 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4391 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4392 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4393 	DELAY(5);
4394 
4395 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4396 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4397 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4398 	iwp_mac_access_exit(sc);
4399 
4400 	if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4401 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4402 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4403 	}
4404 
4405 	if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4406 
4407 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4408 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4409 	}
4410 
4411 	return (IWP_SUCCESS);
4412 }
4413 
4414 /*
4415  * set up semphore flag to own EEPROM
4416  */
4417 static int
iwp_eep_sem_down(iwp_sc_t * sc)4418 iwp_eep_sem_down(iwp_sc_t *sc)
4419 {
4420 	int count1, count2;
4421 	uint32_t tmp;
4422 
4423 	for (count1 = 0; count1 < 1000; count1++) {
4424 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4425 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4426 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4427 
4428 		for (count2 = 0; count2 < 2; count2++) {
4429 			if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4430 			    CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4431 				return (IWP_SUCCESS);
4432 			}
4433 			DELAY(10000);
4434 		}
4435 	}
4436 	return (IWP_FAIL);
4437 }
4438 
4439 /*
4440  * reset semphore flag to release EEPROM
4441  */
4442 static void
iwp_eep_sem_up(iwp_sc_t * sc)4443 iwp_eep_sem_up(iwp_sc_t *sc)
4444 {
4445 	uint32_t tmp;
4446 
4447 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4448 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4449 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4450 }
4451 
4452 /*
4453  * This function read all infomation from eeprom
4454  */
4455 static int
iwp_eep_load(iwp_sc_t * sc)4456 iwp_eep_load(iwp_sc_t *sc)
4457 {
4458 	int i, rr;
4459 	uint32_t rv, tmp, eep_gp;
4460 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4461 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4462 
4463 	/*
4464 	 * read eeprom gp register in CSR
4465 	 */
4466 	eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4467 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4468 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4469 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4470 		    "not find eeprom\n"));
4471 		return (IWP_FAIL);
4472 	}
4473 
4474 	rr = iwp_eep_sem_down(sc);
4475 	if (rr != 0) {
4476 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4477 		    "driver failed to own EEPROM\n"));
4478 		return (IWP_FAIL);
4479 	}
4480 
4481 	for (addr = 0; addr < eep_sz; addr += 2) {
4482 		IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4483 		tmp = IWP_READ(sc, CSR_EEPROM_REG);
4484 		IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4485 
4486 		for (i = 0; i < 10; i++) {
4487 			rv = IWP_READ(sc, CSR_EEPROM_REG);
4488 			if (rv & 1) {
4489 				break;
4490 			}
4491 			DELAY(10);
4492 		}
4493 
4494 		if (!(rv & 1)) {
4495 			IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4496 			    "time out when read eeprome\n"));
4497 			iwp_eep_sem_up(sc);
4498 			return (IWP_FAIL);
4499 		}
4500 
4501 		eep_p[addr/2] = LE_16(rv >> 16);
4502 	}
4503 
4504 	iwp_eep_sem_up(sc);
4505 	return (IWP_SUCCESS);
4506 }
4507 
4508 /*
4509  * initialize mac address in ieee80211com_t struct
4510  */
4511 static void
iwp_get_mac_from_eep(iwp_sc_t * sc)4512 iwp_get_mac_from_eep(iwp_sc_t *sc)
4513 {
4514 	ieee80211com_t *ic = &sc->sc_ic;
4515 
4516 	IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4517 
4518 	IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4519 	    "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4520 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4521 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4522 }
4523 
4524 /*
4525  * main initialization function
4526  */
4527 static int
iwp_init(iwp_sc_t * sc)4528 iwp_init(iwp_sc_t *sc)
4529 {
4530 	int err = IWP_FAIL;
4531 	clock_t clk;
4532 
4533 	/*
4534 	 * release buffer for calibration
4535 	 */
4536 	iwp_release_calib_buffer(sc);
4537 
4538 	mutex_enter(&sc->sc_glock);
4539 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4540 
4541 	err = iwp_init_common(sc);
4542 	if (err != IWP_SUCCESS) {
4543 		mutex_exit(&sc->sc_glock);
4544 		return (IWP_FAIL);
4545 	}
4546 
4547 	/*
4548 	 * backup ucode data part for future use.
4549 	 */
4550 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4551 	    sc->sc_dma_fw_data.mem_va,
4552 	    sc->sc_dma_fw_data.alength);
4553 
4554 	/* load firmware init segment into NIC */
4555 	err = iwp_load_init_firmware(sc);
4556 	if (err != IWP_SUCCESS) {
4557 		cmn_err(CE_WARN, "iwp_init(): "
4558 		    "failed to setup init firmware\n");
4559 		mutex_exit(&sc->sc_glock);
4560 		return (IWP_FAIL);
4561 	}
4562 
4563 	/*
4564 	 * now press "execute" start running
4565 	 */
4566 	IWP_WRITE(sc, CSR_RESET, 0);
4567 
4568 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4569 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4570 		if (cv_timedwait(&sc->sc_ucode_cv,
4571 		    &sc->sc_glock, clk) < 0) {
4572 			break;
4573 		}
4574 	}
4575 
4576 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4577 		cmn_err(CE_WARN, "iwp_init(): "
4578 		    "failed to process init alive.\n");
4579 		mutex_exit(&sc->sc_glock);
4580 		return (IWP_FAIL);
4581 	}
4582 
4583 	mutex_exit(&sc->sc_glock);
4584 
4585 	/*
4586 	 * stop chipset for initializing chipset again
4587 	 */
4588 	iwp_stop(sc);
4589 
4590 	mutex_enter(&sc->sc_glock);
4591 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4592 
4593 	err = iwp_init_common(sc);
4594 	if (err != IWP_SUCCESS) {
4595 		mutex_exit(&sc->sc_glock);
4596 		return (IWP_FAIL);
4597 	}
4598 
4599 	/*
4600 	 * load firmware run segment into NIC
4601 	 */
4602 	err = iwp_load_run_firmware(sc);
4603 	if (err != IWP_SUCCESS) {
4604 		cmn_err(CE_WARN, "iwp_init(): "
4605 		    "failed to setup run firmware\n");
4606 		mutex_exit(&sc->sc_glock);
4607 		return (IWP_FAIL);
4608 	}
4609 
4610 	/*
4611 	 * now press "execute" start running
4612 	 */
4613 	IWP_WRITE(sc, CSR_RESET, 0);
4614 
4615 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4616 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4617 		if (cv_timedwait(&sc->sc_ucode_cv,
4618 		    &sc->sc_glock, clk) < 0) {
4619 			break;
4620 		}
4621 	}
4622 
4623 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4624 		cmn_err(CE_WARN, "iwp_init(): "
4625 		    "failed to process runtime alive.\n");
4626 		mutex_exit(&sc->sc_glock);
4627 		return (IWP_FAIL);
4628 	}
4629 
4630 	mutex_exit(&sc->sc_glock);
4631 
4632 	DELAY(1000);
4633 
4634 	mutex_enter(&sc->sc_glock);
4635 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4636 
4637 	/*
4638 	 * at this point, the firmware is loaded OK, then config the hardware
4639 	 * with the ucode API, including rxon, txpower, etc.
4640 	 */
4641 	err = iwp_config(sc);
4642 	if (err) {
4643 		cmn_err(CE_WARN, "iwp_init(): "
4644 		    "failed to configure device\n");
4645 		mutex_exit(&sc->sc_glock);
4646 		return (IWP_FAIL);
4647 	}
4648 
4649 	/*
4650 	 * at this point, hardware may receive beacons :)
4651 	 */
4652 	mutex_exit(&sc->sc_glock);
4653 	return (IWP_SUCCESS);
4654 }
4655 
4656 /*
4657  * stop or disable NIC
4658  */
4659 static void
iwp_stop(iwp_sc_t * sc)4660 iwp_stop(iwp_sc_t *sc)
4661 {
4662 	uint32_t tmp;
4663 	int i;
4664 
4665 	/* by pass if it's quiesced */
4666 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4667 		mutex_enter(&sc->sc_glock);
4668 	}
4669 
4670 	IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4671 	/*
4672 	 * disable interrupts
4673 	 */
4674 	IWP_WRITE(sc, CSR_INT_MASK, 0);
4675 	IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4676 	IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4677 
4678 	/*
4679 	 * reset all Tx rings
4680 	 */
4681 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4682 		iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4683 	}
4684 
4685 	/*
4686 	 * reset Rx ring
4687 	 */
4688 	iwp_reset_rx_ring(sc);
4689 
4690 	iwp_mac_access_enter(sc);
4691 	iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4692 	iwp_mac_access_exit(sc);
4693 
4694 	DELAY(5);
4695 
4696 	iwp_stop_master(sc);
4697 
4698 	mutex_enter(&sc->sc_mt_lock);
4699 	sc->sc_tx_timer = 0;
4700 	mutex_exit(&sc->sc_mt_lock);
4701 
4702 	tmp = IWP_READ(sc, CSR_RESET);
4703 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4704 
4705 	/* by pass if it's quiesced */
4706 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4707 		mutex_exit(&sc->sc_glock);
4708 	}
4709 }
4710 
4711 /*
4712  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4713  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4714  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4715  * INRIA Sophia - Projet Planete
4716  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4717  */
4718 #define	is_success(amrr)	\
4719 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4720 #define	is_failure(amrr)	\
4721 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4722 #define	is_enough(amrr)		\
4723 	((amrr)->txcnt > 200)
4724 #define	not_very_few(amrr)	\
4725 	((amrr)->txcnt > 40)
4726 #define	is_min_rate(in)		\
4727 	(0 == (in)->in_txrate)
4728 #define	is_max_rate(in)		\
4729 	((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4730 #define	increase_rate(in)	\
4731 	((in)->in_txrate++)
4732 #define	decrease_rate(in)	\
4733 	((in)->in_txrate--)
4734 #define	reset_cnt(amrr)		\
4735 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4736 
4737 #define	IWP_AMRR_MIN_SUCCESS_THRESHOLD	 1
4738 #define	IWP_AMRR_MAX_SUCCESS_THRESHOLD	15
4739 
4740 static void
iwp_amrr_init(iwp_amrr_t * amrr)4741 iwp_amrr_init(iwp_amrr_t *amrr)
4742 {
4743 	amrr->success = 0;
4744 	amrr->recovery = 0;
4745 	amrr->txcnt = amrr->retrycnt = 0;
4746 	amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4747 }
4748 
4749 static void
iwp_amrr_timeout(iwp_sc_t * sc)4750 iwp_amrr_timeout(iwp_sc_t *sc)
4751 {
4752 	ieee80211com_t *ic = &sc->sc_ic;
4753 
4754 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4755 	    "enter\n"));
4756 
4757 	if (IEEE80211_M_STA == ic->ic_opmode) {
4758 		iwp_amrr_ratectl(NULL, ic->ic_bss);
4759 	} else {
4760 		ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4761 	}
4762 
4763 	sc->sc_clk = ddi_get_lbolt();
4764 }
4765 
4766 /* ARGSUSED */
4767 static void
iwp_amrr_ratectl(void * arg,ieee80211_node_t * in)4768 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4769 {
4770 	iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4771 	int need_change = 0;
4772 
4773 	if (is_success(amrr) && is_enough(amrr)) {
4774 		amrr->success++;
4775 		if (amrr->success >= amrr->success_threshold &&
4776 		    !is_max_rate(in)) {
4777 			amrr->recovery = 1;
4778 			amrr->success = 0;
4779 			increase_rate(in);
4780 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4781 			    "AMRR increasing rate %d "
4782 			    "(txcnt=%d retrycnt=%d)\n",
4783 			    in->in_txrate, amrr->txcnt,
4784 			    amrr->retrycnt));
4785 			need_change = 1;
4786 		} else {
4787 			amrr->recovery = 0;
4788 		}
4789 	} else if (not_very_few(amrr) && is_failure(amrr)) {
4790 		amrr->success = 0;
4791 		if (!is_min_rate(in)) {
4792 			if (amrr->recovery) {
4793 				amrr->success_threshold++;
4794 				if (amrr->success_threshold >
4795 				    IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4796 					amrr->success_threshold =
4797 					    IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4798 				}
4799 			} else {
4800 				amrr->success_threshold =
4801 				    IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4802 			}
4803 			decrease_rate(in);
4804 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4805 			    "AMRR decreasing rate %d "
4806 			    "(txcnt=%d retrycnt=%d)\n",
4807 			    in->in_txrate, amrr->txcnt,
4808 			    amrr->retrycnt));
4809 			need_change = 1;
4810 		}
4811 		amrr->recovery = 0;	/* paper is incorrect */
4812 	}
4813 
4814 	if (is_enough(amrr) || need_change) {
4815 		reset_cnt(amrr);
4816 	}
4817 }
4818 
4819 /*
4820  * translate indirect address in eeprom to direct address
4821  * in eeprom and return address of entry whos indirect address
4822  * is indi_addr
4823  */
4824 static uint8_t *
iwp_eep_addr_trans(iwp_sc_t * sc,uint32_t indi_addr)4825 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4826 {
4827 	uint32_t	di_addr;
4828 	uint16_t	temp;
4829 
4830 	if (!(indi_addr & INDIRECT_ADDRESS)) {
4831 		di_addr = indi_addr;
4832 		return (&sc->sc_eep_map[di_addr]);
4833 	}
4834 
4835 	switch (indi_addr & INDIRECT_TYPE_MSK) {
4836 	case INDIRECT_GENERAL:
4837 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4838 		break;
4839 	case	INDIRECT_HOST:
4840 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4841 		break;
4842 	case	INDIRECT_REGULATORY:
4843 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4844 		break;
4845 	case	INDIRECT_CALIBRATION:
4846 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4847 		break;
4848 	case	INDIRECT_PROCESS_ADJST:
4849 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4850 		break;
4851 	case	INDIRECT_OTHERS:
4852 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4853 		break;
4854 	default:
4855 		temp = 0;
4856 		cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4857 		    "incorrect indirect eeprom address.\n");
4858 		break;
4859 	}
4860 
4861 	di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4862 
4863 	return (&sc->sc_eep_map[di_addr]);
4864 }
4865 
4866 /*
4867  * loade a section of ucode into NIC
4868  */
4869 static int
iwp_put_seg_fw(iwp_sc_t * sc,uint32_t addr_s,uint32_t addr_d,uint32_t len)4870 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4871 {
4872 
4873 	iwp_mac_access_enter(sc);
4874 
4875 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4876 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4877 
4878 	IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4879 
4880 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4881 	    (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4882 
4883 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4884 
4885 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4886 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4887 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4888 	    IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4889 
4890 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4891 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4892 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4893 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4894 
4895 	iwp_mac_access_exit(sc);
4896 
4897 	return (IWP_SUCCESS);
4898 }
4899 
4900 /*
4901  * necessary setting during alive notification
4902  */
4903 static int
iwp_alive_common(iwp_sc_t * sc)4904 iwp_alive_common(iwp_sc_t *sc)
4905 {
4906 	uint32_t	base;
4907 	uint32_t	i;
4908 	iwp_wimax_coex_cmd_t	w_cmd;
4909 	iwp_calibration_crystal_cmd_t	c_cmd;
4910 	uint32_t	rv = IWP_FAIL;
4911 
4912 	/*
4913 	 * initialize SCD related registers to make TX work.
4914 	 */
4915 	iwp_mac_access_enter(sc);
4916 
4917 	/*
4918 	 * read sram address of data base.
4919 	 */
4920 	sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4921 
4922 	for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4923 	    base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4924 	    base += 4) {
4925 		iwp_mem_write(sc, base, 0);
4926 	}
4927 
4928 	for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4929 	    base += 4) {
4930 		iwp_mem_write(sc, base, 0);
4931 	}
4932 
4933 	for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4934 		iwp_mem_write(sc, base + i, 0);
4935 	}
4936 
4937 	iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4938 	    sc->sc_dma_sh.cookie.dmac_address >> 10);
4939 
4940 	iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4941 	    IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4942 
4943 	iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4944 
4945 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4946 		iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4947 		IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4948 		iwp_mem_write(sc, sc->sc_scd_base +
4949 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4950 		iwp_mem_write(sc, sc->sc_scd_base +
4951 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4952 		    sizeof (uint32_t),
4953 		    ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4954 		    IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4955 		    ((SCD_FRAME_LIMIT <<
4956 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4958 	}
4959 
4960 	iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4961 
4962 	iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4963 	    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4964 
4965 	IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4966 	iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4967 
4968 	/*
4969 	 * queue 0-7 map to FIFO 0-7 and
4970 	 * all queues work under FIFO mode(none-scheduler_ack)
4971 	 */
4972 	for (i = 0; i < 4; i++) {
4973 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4974 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4975 		    ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4976 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4977 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4978 	}
4979 
4980 	iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4981 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4982 	    (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4983 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4984 	    IWP_SCD_QUEUE_STTS_REG_MSK);
4985 
4986 	for (i = 5; i < 7; i++) {
4987 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4988 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4989 		    (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4990 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4991 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4992 	}
4993 
4994 	iwp_mac_access_exit(sc);
4995 
4996 	(void) memset(&w_cmd, 0, sizeof (w_cmd));
4997 
4998 	rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4999 	if (rv != IWP_SUCCESS) {
5000 		cmn_err(CE_WARN, "iwp_alive_common(): "
5001 		    "failed to send wimax coexist command.\n");
5002 		return (rv);
5003 	}
5004 
5005 	(void) memset(&c_cmd, 0, sizeof (c_cmd));
5006 
5007 	c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5008 	c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5009 	c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5010 
5011 	rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5012 	if (rv != IWP_SUCCESS) {
5013 		cmn_err(CE_WARN, "iwp_alive_common(): "
5014 		    "failed to send crystal frq calibration command.\n");
5015 		return (rv);
5016 	}
5017 
5018 	/*
5019 	 * make sure crystal frequency calibration ready
5020 	 * before next operations.
5021 	 */
5022 	DELAY(1000);
5023 
5024 	return (IWP_SUCCESS);
5025 }
5026 
5027 /*
5028  * save results of calibration from ucode
5029  */
5030 static void
iwp_save_calib_result(iwp_sc_t * sc,iwp_rx_desc_t * desc)5031 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5032 {
5033 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
5034 	struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5035 	int len = LE_32(desc->len);
5036 
5037 	/*
5038 	 * ensure the size of buffer is not too big
5039 	 */
5040 	len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5041 
5042 	switch (calib_hdr->op_code) {
5043 	case PHY_CALIBRATE_LO_CMD:
5044 		if (NULL == res_p->lo_res) {
5045 			res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5046 		}
5047 
5048 		if (NULL == res_p->lo_res) {
5049 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5050 			    "failed to allocate memory.\n");
5051 			return;
5052 		}
5053 
5054 		res_p->lo_res_len = len;
5055 		(void) memcpy(res_p->lo_res, calib_hdr, len);
5056 		break;
5057 	case PHY_CALIBRATE_TX_IQ_CMD:
5058 		if (NULL == res_p->tx_iq_res) {
5059 			res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5060 		}
5061 
5062 		if (NULL == res_p->tx_iq_res) {
5063 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5064 			    "failed to allocate memory.\n");
5065 			return;
5066 		}
5067 
5068 		res_p->tx_iq_res_len = len;
5069 		(void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5070 		break;
5071 	case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5072 		if (NULL == res_p->tx_iq_perd_res) {
5073 			res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5074 		}
5075 
5076 		if (NULL == res_p->tx_iq_perd_res) {
5077 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5078 			    "failed to allocate memory.\n");
5079 		}
5080 
5081 		res_p->tx_iq_perd_res_len = len;
5082 		(void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5083 		break;
5084 	case PHY_CALIBRATE_BASE_BAND_CMD:
5085 		if (NULL == res_p->base_band_res) {
5086 			res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5087 		}
5088 
5089 		if (NULL == res_p->base_band_res) {
5090 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5091 			    "failed to allocate memory.\n");
5092 		}
5093 
5094 		res_p->base_band_res_len = len;
5095 		(void) memcpy(res_p->base_band_res, calib_hdr, len);
5096 		break;
5097 	default:
5098 		cmn_err(CE_WARN, "iwp_save_calib_result(): "
5099 		    "incorrect calibration type(%d).\n", calib_hdr->op_code);
5100 		break;
5101 	}
5102 
5103 }
5104 
5105 static void
iwp_release_calib_buffer(iwp_sc_t * sc)5106 iwp_release_calib_buffer(iwp_sc_t *sc)
5107 {
5108 	if (sc->sc_calib_results.lo_res != NULL) {
5109 		kmem_free(sc->sc_calib_results.lo_res,
5110 		    sc->sc_calib_results.lo_res_len);
5111 		sc->sc_calib_results.lo_res = NULL;
5112 	}
5113 
5114 	if (sc->sc_calib_results.tx_iq_res != NULL) {
5115 		kmem_free(sc->sc_calib_results.tx_iq_res,
5116 		    sc->sc_calib_results.tx_iq_res_len);
5117 		sc->sc_calib_results.tx_iq_res = NULL;
5118 	}
5119 
5120 	if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5121 		kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5122 		    sc->sc_calib_results.tx_iq_perd_res_len);
5123 		sc->sc_calib_results.tx_iq_perd_res = NULL;
5124 	}
5125 
5126 	if (sc->sc_calib_results.base_band_res != NULL) {
5127 		kmem_free(sc->sc_calib_results.base_band_res,
5128 		    sc->sc_calib_results.base_band_res_len);
5129 		sc->sc_calib_results.base_band_res = NULL;
5130 	}
5131 
5132 }
5133 
5134 /*
5135  * common section of intialization
5136  */
5137 static int
iwp_init_common(iwp_sc_t * sc)5138 iwp_init_common(iwp_sc_t *sc)
5139 {
5140 	int32_t	qid;
5141 	uint32_t tmp;
5142 
5143 	(void) iwp_preinit(sc);
5144 
5145 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
5146 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5147 		cmn_err(CE_NOTE, "iwp_init_common(): "
5148 		    "radio transmitter is off\n");
5149 		return (IWP_FAIL);
5150 	}
5151 
5152 	/*
5153 	 * init Rx ring
5154 	 */
5155 	iwp_mac_access_enter(sc);
5156 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5157 
5158 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5159 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5160 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5161 
5162 	IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5163 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5164 	    offsetof(struct iwp_shared, val0)) >> 4));
5165 
5166 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5167 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5168 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5169 	    IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5170 	    (RX_QUEUE_SIZE_LOG <<
5171 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5172 	iwp_mac_access_exit(sc);
5173 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5174 	    (RX_QUEUE_SIZE - 1) & ~0x7);
5175 
5176 	/*
5177 	 * init Tx rings
5178 	 */
5179 	iwp_mac_access_enter(sc);
5180 	iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5181 
5182 	/*
5183 	 * keep warm page
5184 	 */
5185 	IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5186 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
5187 
5188 	for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5189 		IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5190 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5191 		IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5192 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5193 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5194 	}
5195 
5196 	iwp_mac_access_exit(sc);
5197 
5198 	/*
5199 	 * clear "radio off" and "disable command" bits
5200 	 */
5201 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5202 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5203 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5204 
5205 	/*
5206 	 * clear any pending interrupts
5207 	 */
5208 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
5209 
5210 	/*
5211 	 * enable interrupts
5212 	 */
5213 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5214 
5215 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5216 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5217 
5218 	return (IWP_SUCCESS);
5219 }
5220 
5221 static int
iwp_fast_recover(iwp_sc_t * sc)5222 iwp_fast_recover(iwp_sc_t *sc)
5223 {
5224 	ieee80211com_t *ic = &sc->sc_ic;
5225 	int err = IWP_FAIL;
5226 
5227 	mutex_enter(&sc->sc_glock);
5228 
5229 	/* restore runtime configuration */
5230 	bcopy(&sc->sc_config_save, &sc->sc_config,
5231 	    sizeof (sc->sc_config));
5232 
5233 	sc->sc_config.assoc_id = 0;
5234 	sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5235 
5236 	if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5237 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5238 		    "could not setup authentication\n");
5239 		mutex_exit(&sc->sc_glock);
5240 		return (err);
5241 	}
5242 
5243 	bcopy(&sc->sc_config_save, &sc->sc_config,
5244 	    sizeof (sc->sc_config));
5245 
5246 	/* update adapter's configuration */
5247 	err = iwp_run_state_config(sc);
5248 	if (err != IWP_SUCCESS) {
5249 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5250 		    "failed to setup association\n");
5251 		mutex_exit(&sc->sc_glock);
5252 		return (err);
5253 	}
5254 	/* set LED on */
5255 	iwp_set_led(sc, 2, 0, 1);
5256 
5257 	mutex_exit(&sc->sc_glock);
5258 
5259 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5260 
5261 	/* start queue */
5262 	IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5263 	    "resume xmit\n"));
5264 	mac_tx_update(ic->ic_mach);
5265 
5266 	return (IWP_SUCCESS);
5267 }
5268 
5269 static int
iwp_run_state_config(iwp_sc_t * sc)5270 iwp_run_state_config(iwp_sc_t *sc)
5271 {
5272 	struct ieee80211com *ic = &sc->sc_ic;
5273 	ieee80211_node_t *in = ic->ic_bss;
5274 	int err = IWP_FAIL;
5275 
5276 	/*
5277 	 * update adapter's configuration
5278 	 */
5279 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5280 
5281 	/*
5282 	 * short preamble/slot time are
5283 	 * negotiated when associating
5284 	 */
5285 	sc->sc_config.flags &=
5286 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5287 	    RXON_FLG_SHORT_SLOT_MSK);
5288 
5289 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5290 		sc->sc_config.flags |=
5291 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
5292 	}
5293 
5294 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5295 		sc->sc_config.flags |=
5296 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5297 	}
5298 
5299 	sc->sc_config.filter_flags |=
5300 	    LE_32(RXON_FILTER_ASSOC_MSK);
5301 
5302 	if (ic->ic_opmode != IEEE80211_M_STA) {
5303 		sc->sc_config.filter_flags |=
5304 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
5305 	}
5306 
5307 	IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5308 	    "config chan %d flags %x"
5309 	    " filter_flags %x\n",
5310 	    sc->sc_config.chan, sc->sc_config.flags,
5311 	    sc->sc_config.filter_flags));
5312 
5313 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5314 	    sizeof (iwp_rxon_cmd_t), 1);
5315 	if (err != IWP_SUCCESS) {
5316 		cmn_err(CE_WARN, "iwp_run_state_config(): "
5317 		    "could not update configuration\n");
5318 		return (err);
5319 	}
5320 
5321 	return (err);
5322 }
5323 
5324 /*
5325  * This function overwrites default configurations of
5326  * ieee80211com structure in Net80211 module.
5327  */
5328 static void
iwp_overwrite_ic_default(iwp_sc_t * sc)5329 iwp_overwrite_ic_default(iwp_sc_t *sc)
5330 {
5331 	ieee80211com_t *ic = &sc->sc_ic;
5332 
5333 	sc->sc_newstate = ic->ic_newstate;
5334 	ic->ic_newstate = iwp_newstate;
5335 	ic->ic_node_alloc = iwp_node_alloc;
5336 	ic->ic_node_free = iwp_node_free;
5337 }
5338 
5339 
5340 /*
5341  * This function adds AP station into hardware.
5342  */
5343 static int
iwp_add_ap_sta(iwp_sc_t * sc)5344 iwp_add_ap_sta(iwp_sc_t *sc)
5345 {
5346 	ieee80211com_t *ic = &sc->sc_ic;
5347 	ieee80211_node_t *in = ic->ic_bss;
5348 	iwp_add_sta_t node;
5349 	int err = IWP_FAIL;
5350 
5351 	/*
5352 	 * Add AP node into hardware.
5353 	 */
5354 	(void) memset(&node, 0, sizeof (node));
5355 	IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5356 	node.mode = STA_MODE_ADD_MSK;
5357 	node.sta.sta_id = IWP_AP_ID;
5358 
5359 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5360 	if (err != IWP_SUCCESS) {
5361 		cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5362 		    "failed to add AP node\n");
5363 		return (err);
5364 	}
5365 
5366 	return (err);
5367 }
5368 
5369 /*
5370  * Check EEPROM version and Calibration version.
5371  */
5372 static int
iwp_eep_ver_chk(iwp_sc_t * sc)5373 iwp_eep_ver_chk(iwp_sc_t *sc)
5374 {
5375 	if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5376 	    (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5377 		cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5378 		    "unsupported eeprom detected\n");
5379 		return (IWP_FAIL);
5380 	}
5381 
5382 	return (IWP_SUCCESS);
5383 }
5384 
5385 /*
5386  * Determine parameters for all supported chips.
5387  */
5388 static void
iwp_set_chip_param(iwp_sc_t * sc)5389 iwp_set_chip_param(iwp_sc_t *sc)
5390 {
5391 	if ((0x008d == sc->sc_dev_id) ||
5392 	    (0x008e == sc->sc_dev_id)) {
5393 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5394 		    PHY_MODE_A | PHY_MODE_N;
5395 
5396 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5397 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5398 
5399 		sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5400 	}
5401 
5402 	if ((0x422c == sc->sc_dev_id) ||
5403 	    (0x4239 == sc->sc_dev_id)) {
5404 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5405 		    PHY_MODE_A | PHY_MODE_N;
5406 
5407 		sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5408 		sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5409 
5410 		sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5411 	}
5412 
5413 	if ((0x422b == sc->sc_dev_id) ||
5414 	    (0x4238 == sc->sc_dev_id)) {
5415 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5416 		    PHY_MODE_A | PHY_MODE_N;
5417 
5418 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5419 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5420 
5421 		sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5422 	}
5423 }
5424