xref: /illumos-gate/usr/src/uts/common/io/iwp/iwp.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * Copyright (c) 2018, Joyent, Inc.
3  */
4 
5 /*
6  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
7  * Use is subject to license terms.
8  */
9 
10 /*
11  * Copyright (c) 2009, Intel Corporation
12  * All rights reserved.
13  */
14 
15 /*
16  * Copyright (c) 2006
17  * Copyright (c) 2007
18  *	Damien Bergamini <damien.bergamini@free.fr>
19  *
20  * Permission to use, copy, modify, and distribute this software for any
21  * purpose with or without fee is hereby granted, provided that the above
22  * copyright notice and this permission notice appear in all copies.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
25  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
27  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
28  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
29  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
30  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
31  */
32 
33 /*
34  * Intel(R) WiFi Link 6000 Driver
35  */
36 
37 #include <sys/types.h>
38 #include <sys/byteorder.h>
39 #include <sys/conf.h>
40 #include <sys/cmn_err.h>
41 #include <sys/stat.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/strsubr.h>
45 #include <sys/ethernet.h>
46 #include <inet/common.h>
47 #include <inet/nd.h>
48 #include <inet/mi.h>
49 #include <sys/note.h>
50 #include <sys/stream.h>
51 #include <sys/strsun.h>
52 #include <sys/modctl.h>
53 #include <sys/devops.h>
54 #include <sys/dlpi.h>
55 #include <sys/mac_provider.h>
56 #include <sys/mac_wifi.h>
57 #include <sys/net80211.h>
58 #include <sys/net80211_proto.h>
59 #include <sys/varargs.h>
60 #include <sys/policy.h>
61 #include <sys/pci.h>
62 
63 #include "iwp_calibration.h"
64 #include "iwp_hw.h"
65 #include "iwp_eeprom.h"
66 #include "iwp_var.h"
67 #include <inet/wifi_ioctl.h>
68 
69 #ifdef DEBUG
70 #define	IWP_DEBUG_80211		(1 << 0)
71 #define	IWP_DEBUG_CMD		(1 << 1)
72 #define	IWP_DEBUG_DMA		(1 << 2)
73 #define	IWP_DEBUG_EEPROM	(1 << 3)
74 #define	IWP_DEBUG_FW		(1 << 4)
75 #define	IWP_DEBUG_HW		(1 << 5)
76 #define	IWP_DEBUG_INTR		(1 << 6)
77 #define	IWP_DEBUG_MRR		(1 << 7)
78 #define	IWP_DEBUG_PIO		(1 << 8)
79 #define	IWP_DEBUG_RX		(1 << 9)
80 #define	IWP_DEBUG_SCAN		(1 << 10)
81 #define	IWP_DEBUG_TX		(1 << 11)
82 #define	IWP_DEBUG_RATECTL	(1 << 12)
83 #define	IWP_DEBUG_RADIO		(1 << 13)
84 #define	IWP_DEBUG_RESUME	(1 << 14)
85 #define	IWP_DEBUG_CALIBRATION	(1 << 15)
86 /*
87  * if want to see debug message of a given section,
88  * please set this flag to one of above values
89  */
90 uint32_t iwp_dbg_flags = 0;
91 #define	IWP_DBG(x) \
92 	iwp_dbg x
93 #else
94 #define	IWP_DBG(x)
95 #endif
96 
97 static void	*iwp_soft_state_p = NULL;
98 
99 /*
100  * ucode will be compiled into driver image
101  */
102 static uint8_t iwp_fw_bin [] = {
103 #include "fw-iw/iwp.ucode"
104 };
105 
106 /*
107  * DMA attributes for a shared page
108  */
109 static ddi_dma_attr_t sh_dma_attr = {
110 	DMA_ATTR_V0,	/* version of this structure */
111 	0,		/* lowest usable address */
112 	0xffffffffU,	/* highest usable address */
113 	0xffffffffU,	/* maximum DMAable byte count */
114 	0x1000,		/* alignment in bytes */
115 	0x1000,		/* burst sizes (any?) */
116 	1,		/* minimum transfer */
117 	0xffffffffU,	/* maximum transfer */
118 	0xffffffffU,	/* maximum segment length */
119 	1,		/* maximum number of segments */
120 	1,		/* granularity */
121 	0,		/* flags (reserved) */
122 };
123 
124 /*
125  * DMA attributes for a keep warm DRAM descriptor
126  */
127 static ddi_dma_attr_t kw_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x1000,		/* alignment in bytes */
133 	0x1000,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /*
143  * DMA attributes for a ring descriptor
144  */
145 static ddi_dma_attr_t ring_desc_dma_attr = {
146 	DMA_ATTR_V0,	/* version of this structure */
147 	0,		/* lowest usable address */
148 	0xffffffffU,	/* highest usable address */
149 	0xffffffffU,	/* maximum DMAable byte count */
150 	0x100,		/* alignment in bytes */
151 	0x100,		/* burst sizes (any?) */
152 	1,		/* minimum transfer */
153 	0xffffffffU,	/* maximum transfer */
154 	0xffffffffU,	/* maximum segment length */
155 	1,		/* maximum number of segments */
156 	1,		/* granularity */
157 	0,		/* flags (reserved) */
158 };
159 
160 /*
161  * DMA attributes for a cmd
162  */
163 static ddi_dma_attr_t cmd_dma_attr = {
164 	DMA_ATTR_V0,	/* version of this structure */
165 	0,		/* lowest usable address */
166 	0xffffffffU,	/* highest usable address */
167 	0xffffffffU,	/* maximum DMAable byte count */
168 	4,		/* alignment in bytes */
169 	0x100,		/* burst sizes (any?) */
170 	1,		/* minimum transfer */
171 	0xffffffffU,	/* maximum transfer */
172 	0xffffffffU,	/* maximum segment length */
173 	1,		/* maximum number of segments */
174 	1,		/* granularity */
175 	0,		/* flags (reserved) */
176 };
177 
178 /*
179  * DMA attributes for a rx buffer
180  */
181 static ddi_dma_attr_t rx_buffer_dma_attr = {
182 	DMA_ATTR_V0,	/* version of this structure */
183 	0,		/* lowest usable address */
184 	0xffffffffU,	/* highest usable address */
185 	0xffffffffU,	/* maximum DMAable byte count */
186 	0x100,		/* alignment in bytes */
187 	0x100,		/* burst sizes (any?) */
188 	1,		/* minimum transfer */
189 	0xffffffffU,	/* maximum transfer */
190 	0xffffffffU,	/* maximum segment length */
191 	1,		/* maximum number of segments */
192 	1,		/* granularity */
193 	0,		/* flags (reserved) */
194 };
195 
196 /*
197  * DMA attributes for a tx buffer.
198  * the maximum number of segments is 4 for the hardware.
199  * now all the wifi drivers put the whole frame in a single
200  * descriptor, so we define the maximum  number of segments 1,
201  * just the same as the rx_buffer. we consider leverage the HW
202  * ability in the future, that is why we don't define rx and tx
203  * buffer_dma_attr as the same.
204  */
205 static ddi_dma_attr_t tx_buffer_dma_attr = {
206 	DMA_ATTR_V0,	/* version of this structure */
207 	0,		/* lowest usable address */
208 	0xffffffffU,	/* highest usable address */
209 	0xffffffffU,	/* maximum DMAable byte count */
210 	4,		/* alignment in bytes */
211 	0x100,		/* burst sizes (any?) */
212 	1,		/* minimum transfer */
213 	0xffffffffU,	/* maximum transfer */
214 	0xffffffffU,	/* maximum segment length */
215 	1,		/* maximum number of segments */
216 	1,		/* granularity */
217 	0,		/* flags (reserved) */
218 };
219 
220 /*
221  * DMA attributes for text and data part in the firmware
222  */
223 static ddi_dma_attr_t fw_dma_attr = {
224 	DMA_ATTR_V0,	/* version of this structure */
225 	0,		/* lowest usable address */
226 	0xffffffffU,	/* highest usable address */
227 	0x7fffffff,	/* maximum DMAable byte count */
228 	0x10,		/* alignment in bytes */
229 	0x100,		/* burst sizes (any?) */
230 	1,		/* minimum transfer */
231 	0xffffffffU,	/* maximum transfer */
232 	0xffffffffU,	/* maximum segment length */
233 	1,		/* maximum number of segments */
234 	1,		/* granularity */
235 	0,		/* flags (reserved) */
236 };
237 
238 /*
239  * regs access attributes
240  */
241 static ddi_device_acc_attr_t iwp_reg_accattr = {
242 	DDI_DEVICE_ATTR_V0,
243 	DDI_STRUCTURE_LE_ACC,
244 	DDI_STRICTORDER_ACC,
245 	DDI_DEFAULT_ACC
246 };
247 
248 /*
249  * DMA access attributes for descriptor
250  */
251 static ddi_device_acc_attr_t iwp_dma_descattr = {
252 	DDI_DEVICE_ATTR_V0,
253 	DDI_STRUCTURE_LE_ACC,
254 	DDI_STRICTORDER_ACC,
255 	DDI_DEFAULT_ACC
256 };
257 
258 /*
259  * DMA access attributes
260  */
261 static ddi_device_acc_attr_t iwp_dma_accattr = {
262 	DDI_DEVICE_ATTR_V0,
263 	DDI_NEVERSWAP_ACC,
264 	DDI_STRICTORDER_ACC,
265 	DDI_DEFAULT_ACC
266 };
267 
268 static int	iwp_ring_init(iwp_sc_t *);
269 static void	iwp_ring_free(iwp_sc_t *);
270 static int	iwp_alloc_shared(iwp_sc_t *);
271 static void	iwp_free_shared(iwp_sc_t *);
272 static int	iwp_alloc_kw(iwp_sc_t *);
273 static void	iwp_free_kw(iwp_sc_t *);
274 static int	iwp_alloc_fw_dma(iwp_sc_t *);
275 static void	iwp_free_fw_dma(iwp_sc_t *);
276 static int	iwp_alloc_rx_ring(iwp_sc_t *);
277 static void	iwp_reset_rx_ring(iwp_sc_t *);
278 static void	iwp_free_rx_ring(iwp_sc_t *);
279 static int	iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
280     int, int);
281 static void	iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
282 static void	iwp_free_tx_ring(iwp_tx_ring_t *);
283 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
284 static void	iwp_node_free(ieee80211_node_t *);
285 static int	iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
286 static void	iwp_mac_access_enter(iwp_sc_t *);
287 static void	iwp_mac_access_exit(iwp_sc_t *);
288 static uint32_t	iwp_reg_read(iwp_sc_t *, uint32_t);
289 static void	iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
290 static int	iwp_load_init_firmware(iwp_sc_t *);
291 static int	iwp_load_run_firmware(iwp_sc_t *);
292 static void	iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
293 static void	iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
294 static uint_t   iwp_intr(caddr_t, caddr_t);
295 static int	iwp_eep_load(iwp_sc_t *);
296 static void	iwp_get_mac_from_eep(iwp_sc_t *);
297 static int	iwp_eep_sem_down(iwp_sc_t *);
298 static void	iwp_eep_sem_up(iwp_sc_t *);
299 static uint_t   iwp_rx_softintr(caddr_t, caddr_t);
300 static uint8_t	iwp_rate_to_plcp(int);
301 static int	iwp_cmd(iwp_sc_t *, int, const void *, int, int);
302 static void	iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
303 static int	iwp_hw_set_before_auth(iwp_sc_t *);
304 static int	iwp_scan(iwp_sc_t *);
305 static int	iwp_config(iwp_sc_t *);
306 static void	iwp_stop_master(iwp_sc_t *);
307 static int	iwp_power_up(iwp_sc_t *);
308 static int	iwp_preinit(iwp_sc_t *);
309 static int	iwp_init(iwp_sc_t *);
310 static void	iwp_stop(iwp_sc_t *);
311 static int	iwp_quiesce(dev_info_t *t);
312 static void	iwp_amrr_init(iwp_amrr_t *);
313 static void	iwp_amrr_timeout(iwp_sc_t *);
314 static void	iwp_amrr_ratectl(void *, ieee80211_node_t *);
315 static void	iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
316 static void	iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
317 static void	iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
318 static void	iwp_release_calib_buffer(iwp_sc_t *);
319 static int	iwp_init_common(iwp_sc_t *);
320 static uint8_t	*iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
321 static int	iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
322 static	int	iwp_alive_common(iwp_sc_t *);
323 static void	iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
324 static int	iwp_attach(dev_info_t *, ddi_attach_cmd_t);
325 static int	iwp_detach(dev_info_t *, ddi_detach_cmd_t);
326 static void	iwp_destroy_locks(iwp_sc_t *);
327 static int	iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
328 static void	iwp_thread(iwp_sc_t *);
329 static int	iwp_run_state_config(iwp_sc_t *);
330 static int	iwp_fast_recover(iwp_sc_t *);
331 static void	iwp_overwrite_ic_default(iwp_sc_t *);
332 static int	iwp_add_ap_sta(iwp_sc_t *);
333 static int	iwp_alloc_dma_mem(iwp_sc_t *, size_t,
334     ddi_dma_attr_t *, ddi_device_acc_attr_t *,
335     uint_t, iwp_dma_t *);
336 static void	iwp_free_dma_mem(iwp_dma_t *);
337 static int	iwp_eep_ver_chk(iwp_sc_t *);
338 static void	iwp_set_chip_param(iwp_sc_t *);
339 
340 /*
341  * GLD specific operations
342  */
343 static int	iwp_m_stat(void *, uint_t, uint64_t *);
344 static int	iwp_m_start(void *);
345 static void	iwp_m_stop(void *);
346 static int	iwp_m_unicst(void *, const uint8_t *);
347 static int	iwp_m_multicst(void *, boolean_t, const uint8_t *);
348 static int	iwp_m_promisc(void *, boolean_t);
349 static mblk_t	*iwp_m_tx(void *, mblk_t *);
350 static void	iwp_m_ioctl(void *, queue_t *, mblk_t *);
351 static int	iwp_m_setprop(void *arg, const char *pr_name,
352     mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
353 static int	iwp_m_getprop(void *arg, const char *pr_name,
354     mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
355 static void	iwp_m_propinfo(void *, const char *, mac_prop_id_t,
356     mac_prop_info_handle_t);
357 
358 /*
359  * Supported rates for 802.11b/g modes (in 500Kbps unit).
360  */
361 static const struct ieee80211_rateset iwp_rateset_11b =
362 	{ 4, { 2, 4, 11, 22 } };
363 
364 static const struct ieee80211_rateset iwp_rateset_11g =
365 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
366 
367 /*
368  * For mfthread only
369  */
370 extern pri_t minclsyspri;
371 
372 #define	DRV_NAME_SP	"iwp"
373 
374 /*
375  * Module Loading Data & Entry Points
376  */
377 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
378     iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
379 
380 static struct modldrv iwp_modldrv = {
381 	&mod_driverops,
382 	"Intel(R) PumaPeak driver(N)",
383 	&iwp_devops
384 };
385 
386 static struct modlinkage iwp_modlinkage = {
387 	MODREV_1,
388 	&iwp_modldrv,
389 	NULL
390 };
391 
392 int
_init(void)393 _init(void)
394 {
395 	int	status;
396 
397 	status = ddi_soft_state_init(&iwp_soft_state_p,
398 	    sizeof (iwp_sc_t), 1);
399 	if (status != DDI_SUCCESS) {
400 		return (status);
401 	}
402 
403 	mac_init_ops(&iwp_devops, DRV_NAME_SP);
404 	status = mod_install(&iwp_modlinkage);
405 	if (status != DDI_SUCCESS) {
406 		mac_fini_ops(&iwp_devops);
407 		ddi_soft_state_fini(&iwp_soft_state_p);
408 	}
409 
410 	return (status);
411 }
412 
413 int
_fini(void)414 _fini(void)
415 {
416 	int status;
417 
418 	status = mod_remove(&iwp_modlinkage);
419 	if (DDI_SUCCESS == status) {
420 		mac_fini_ops(&iwp_devops);
421 		ddi_soft_state_fini(&iwp_soft_state_p);
422 	}
423 
424 	return (status);
425 }
426 
427 int
_info(struct modinfo * mip)428 _info(struct modinfo *mip)
429 {
430 	return (mod_info(&iwp_modlinkage, mip));
431 }
432 
433 /*
434  * Mac Call Back entries
435  */
436 mac_callbacks_t	iwp_m_callbacks = {
437 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
438 	iwp_m_stat,
439 	iwp_m_start,
440 	iwp_m_stop,
441 	iwp_m_promisc,
442 	iwp_m_multicst,
443 	iwp_m_unicst,
444 	iwp_m_tx,
445 	NULL,
446 	iwp_m_ioctl,
447 	NULL,
448 	NULL,
449 	NULL,
450 	iwp_m_setprop,
451 	iwp_m_getprop,
452 	iwp_m_propinfo
453 };
454 
455 #ifdef DEBUG
456 void
iwp_dbg(uint32_t flags,const char * fmt,...)457 iwp_dbg(uint32_t flags, const char *fmt, ...)
458 {
459 	va_list	ap;
460 
461 	if (flags & iwp_dbg_flags) {
462 		va_start(ap, fmt);
463 		vcmn_err(CE_NOTE, fmt, ap);
464 		va_end(ap);
465 	}
466 }
467 #endif	/* DEBUG */
468 
469 /*
470  * device operations
471  */
472 int
iwp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)473 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
474 {
475 	iwp_sc_t		*sc;
476 	ieee80211com_t		*ic;
477 	int			instance, i;
478 	char			strbuf[32];
479 	wifi_data_t		wd = { 0 };
480 	mac_register_t		*macp;
481 	int			intr_type;
482 	int			intr_count;
483 	int			intr_actual;
484 	int			err = DDI_FAILURE;
485 
486 	switch (cmd) {
487 	case DDI_ATTACH:
488 		break;
489 	case DDI_RESUME:
490 		instance = ddi_get_instance(dip);
491 		sc = ddi_get_soft_state(iwp_soft_state_p,
492 		    instance);
493 		ASSERT(sc != NULL);
494 
495 		if (sc->sc_flags & IWP_F_RUNNING) {
496 			(void) iwp_init(sc);
497 		}
498 
499 		atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
500 
501 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
502 		    "resume\n"));
503 		return (DDI_SUCCESS);
504 	default:
505 		goto attach_fail1;
506 	}
507 
508 	instance = ddi_get_instance(dip);
509 	err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
510 	if (err != DDI_SUCCESS) {
511 		cmn_err(CE_WARN, "iwp_attach(): "
512 		    "failed to allocate soft state\n");
513 		goto attach_fail1;
514 	}
515 
516 	sc = ddi_get_soft_state(iwp_soft_state_p, instance);
517 	ASSERT(sc != NULL);
518 
519 	sc->sc_dip = dip;
520 
521 	/*
522 	 * map configure space
523 	 */
524 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
525 	    &iwp_reg_accattr, &sc->sc_cfg_handle);
526 	if (err != DDI_SUCCESS) {
527 		cmn_err(CE_WARN, "iwp_attach(): "
528 		    "failed to map config spaces regs\n");
529 		goto attach_fail2;
530 	}
531 
532 	sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
533 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
534 	if ((sc->sc_dev_id != 0x422B) &&
535 	    (sc->sc_dev_id != 0x422C) &&
536 	    (sc->sc_dev_id != 0x4238) &&
537 	    (sc->sc_dev_id != 0x4239) &&
538 	    (sc->sc_dev_id != 0x008d) &&
539 	    (sc->sc_dev_id != 0x008e)) {
540 		cmn_err(CE_WARN, "iwp_attach(): "
541 		    "Do not support this device\n");
542 		goto attach_fail3;
543 	}
544 
545 	iwp_set_chip_param(sc);
546 
547 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
548 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
549 
550 	/*
551 	 * keep from disturbing C3 state of CPU
552 	 */
553 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
554 	    PCI_CFG_RETRY_TIMEOUT), 0);
555 
556 	/*
557 	 * determine the size of buffer for frame and command to ucode
558 	 */
559 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
560 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
561 	if (!sc->sc_clsz) {
562 		sc->sc_clsz = 16;
563 	}
564 	sc->sc_clsz = (sc->sc_clsz << 2);
565 
566 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
567 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
568 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
569 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
570 
571 	/*
572 	 * Map operating registers
573 	 */
574 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
575 	    0, 0, &iwp_reg_accattr, &sc->sc_handle);
576 	if (err != DDI_SUCCESS) {
577 		cmn_err(CE_WARN, "iwp_attach(): "
578 		    "failed to map device regs\n");
579 		goto attach_fail3;
580 	}
581 
582 	/*
583 	 * this is used to differentiate type of hardware
584 	 */
585 	sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
586 
587 	err = ddi_intr_get_supported_types(dip, &intr_type);
588 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
589 		cmn_err(CE_WARN, "iwp_attach(): "
590 		    "fixed type interrupt is not supported\n");
591 		goto attach_fail4;
592 	}
593 
594 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
595 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
596 		cmn_err(CE_WARN, "iwp_attach(): "
597 		    "no fixed interrupts\n");
598 		goto attach_fail4;
599 	}
600 
601 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
602 
603 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
604 	    intr_count, &intr_actual, 0);
605 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
606 		cmn_err(CE_WARN, "iwp_attach(): "
607 		    "ddi_intr_alloc() failed 0x%x\n", err);
608 		goto attach_fail5;
609 	}
610 
611 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
612 	if (err != DDI_SUCCESS) {
613 		cmn_err(CE_WARN, "iwp_attach(): "
614 		    "ddi_intr_get_pri() failed 0x%x\n", err);
615 		goto attach_fail6;
616 	}
617 
618 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
619 	    DDI_INTR_PRI(sc->sc_intr_pri));
620 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
621 	    DDI_INTR_PRI(sc->sc_intr_pri));
622 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
623 	    DDI_INTR_PRI(sc->sc_intr_pri));
624 
625 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
626 	cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
627 	cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
628 
629 	/*
630 	 * initialize the mfthread
631 	 */
632 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
633 	sc->sc_mf_thread = NULL;
634 	sc->sc_mf_thread_switch = 0;
635 
636 	/*
637 	 * Allocate shared buffer for communication between driver and ucode.
638 	 */
639 	err = iwp_alloc_shared(sc);
640 	if (err != DDI_SUCCESS) {
641 		cmn_err(CE_WARN, "iwp_attach(): "
642 		    "failed to allocate shared page\n");
643 		goto attach_fail7;
644 	}
645 
646 	(void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
647 
648 	/*
649 	 * Allocate keep warm page.
650 	 */
651 	err = iwp_alloc_kw(sc);
652 	if (err != DDI_SUCCESS) {
653 		cmn_err(CE_WARN, "iwp_attach(): "
654 		    "failed to allocate keep warm page\n");
655 		goto attach_fail8;
656 	}
657 
658 	/*
659 	 * Do some necessary hardware initializations.
660 	 */
661 	err = iwp_preinit(sc);
662 	if (err != IWP_SUCCESS) {
663 		cmn_err(CE_WARN, "iwp_attach(): "
664 		    "failed to initialize hardware\n");
665 		goto attach_fail9;
666 	}
667 
668 	/*
669 	 * get hardware configurations from eeprom
670 	 */
671 	err = iwp_eep_load(sc);
672 	if (err != IWP_SUCCESS) {
673 		cmn_err(CE_WARN, "iwp_attach(): "
674 		    "failed to load eeprom\n");
675 		goto attach_fail9;
676 	}
677 
678 	/*
679 	 * calibration information from EEPROM
680 	 */
681 	sc->sc_eep_calib = (struct iwp_eep_calibration *)
682 	    iwp_eep_addr_trans(sc, EEP_CALIBRATION);
683 
684 	err = iwp_eep_ver_chk(sc);
685 	if (err != IWP_SUCCESS) {
686 		goto attach_fail9;
687 	}
688 
689 	/*
690 	 * get MAC address of this chipset
691 	 */
692 	iwp_get_mac_from_eep(sc);
693 
694 
695 	/*
696 	 * initialize TX and RX ring buffers
697 	 */
698 	err = iwp_ring_init(sc);
699 	if (err != DDI_SUCCESS) {
700 		cmn_err(CE_WARN, "iwp_attach(): "
701 		    "failed to allocate and initialize ring\n");
702 		goto attach_fail9;
703 	}
704 
705 	sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
706 
707 	/*
708 	 * copy ucode to dma buffer
709 	 */
710 	err = iwp_alloc_fw_dma(sc);
711 	if (err != DDI_SUCCESS) {
712 		cmn_err(CE_WARN, "iwp_attach(): "
713 		    "failed to allocate firmware dma\n");
714 		goto attach_fail10;
715 	}
716 
717 	/*
718 	 * Initialize the wifi part, which will be used by
719 	 * 802.11 module
720 	 */
721 	ic = &sc->sc_ic;
722 	ic->ic_phytype  = IEEE80211_T_OFDM;
723 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
724 	ic->ic_state    = IEEE80211_S_INIT;
725 	ic->ic_maxrssi  = 100; /* experimental number */
726 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
727 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
728 
729 	/*
730 	 * Support WPA/WPA2
731 	 */
732 	ic->ic_caps |= IEEE80211_C_WPA;
733 
734 	/*
735 	 * set supported .11b and .11g rates
736 	 */
737 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
738 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
739 
740 	/*
741 	 * set supported .11b and .11g channels (1 through 11)
742 	 */
743 	for (i = 1; i <= 11; i++) {
744 		ic->ic_sup_channels[i].ich_freq =
745 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
746 		ic->ic_sup_channels[i].ich_flags =
747 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
748 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
749 		    IEEE80211_CHAN_PASSIVE;
750 	}
751 
752 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
753 	ic->ic_xmit = iwp_send;
754 
755 	/*
756 	 * attach to 802.11 module
757 	 */
758 	ieee80211_attach(ic);
759 
760 	/*
761 	 * different instance has different WPA door
762 	 */
763 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
764 	    ddi_driver_name(dip),
765 	    ddi_get_instance(dip));
766 
767 	/*
768 	 * Overwrite 80211 default configurations.
769 	 */
770 	iwp_overwrite_ic_default(sc);
771 
772 	/*
773 	 * initialize 802.11 module
774 	 */
775 	ieee80211_media_init(ic);
776 
777 	/*
778 	 * initialize default tx key
779 	 */
780 	ic->ic_def_txkey = 0;
781 
782 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
783 	    iwp_rx_softintr, (caddr_t)sc);
784 	if (err != DDI_SUCCESS) {
785 		cmn_err(CE_WARN, "iwp_attach(): "
786 		    "add soft interrupt failed\n");
787 		goto attach_fail12;
788 	}
789 
790 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
791 	    (caddr_t)sc, NULL);
792 	if (err != DDI_SUCCESS) {
793 		cmn_err(CE_WARN, "iwp_attach(): "
794 		    "ddi_intr_add_handle() failed\n");
795 		goto attach_fail13;
796 	}
797 
798 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
799 	if (err != DDI_SUCCESS) {
800 		cmn_err(CE_WARN, "iwp_attach(): "
801 		    "ddi_intr_enable() failed\n");
802 		goto attach_fail14;
803 	}
804 
805 	/*
806 	 * Initialize pointer to device specific functions
807 	 */
808 	wd.wd_secalloc = WIFI_SEC_NONE;
809 	wd.wd_opmode = ic->ic_opmode;
810 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
811 
812 	/*
813 	 * create relation to GLD
814 	 */
815 	macp = mac_alloc(MAC_VERSION);
816 	if (NULL == macp) {
817 		cmn_err(CE_WARN, "iwp_attach(): "
818 		    "failed to do mac_alloc()\n");
819 		goto attach_fail15;
820 	}
821 
822 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
823 	macp->m_driver		= sc;
824 	macp->m_dip		= dip;
825 	macp->m_src_addr	= ic->ic_macaddr;
826 	macp->m_callbacks	= &iwp_m_callbacks;
827 	macp->m_min_sdu		= 0;
828 	macp->m_max_sdu		= IEEE80211_MTU;
829 	macp->m_pdata		= &wd;
830 	macp->m_pdata_size	= sizeof (wd);
831 
832 	/*
833 	 * Register the macp to mac
834 	 */
835 	err = mac_register(macp, &ic->ic_mach);
836 	mac_free(macp);
837 	if (err != DDI_SUCCESS) {
838 		cmn_err(CE_WARN, "iwp_attach(): "
839 		    "failed to do mac_register()\n");
840 		goto attach_fail15;
841 	}
842 
843 	/*
844 	 * Create minor node of type DDI_NT_NET_WIFI
845 	 */
846 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
847 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
848 	    instance + 1, DDI_NT_NET_WIFI, 0);
849 	if (err != DDI_SUCCESS) {
850 		cmn_err(CE_WARN, "iwp_attach(): "
851 		    "failed to do ddi_create_minor_node()\n");
852 	}
853 
854 	/*
855 	 * Notify link is down now
856 	 */
857 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
858 
859 	/*
860 	 * create the mf thread to handle the link status,
861 	 * recovery fatal error, etc.
862 	 */
863 	sc->sc_mf_thread_switch = 1;
864 	if (NULL == sc->sc_mf_thread) {
865 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
866 		    iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
867 	}
868 
869 	atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
870 
871 	return (DDI_SUCCESS);
872 
873 attach_fail15:
874 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
875 attach_fail14:
876 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
877 attach_fail13:
878 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
879 	sc->sc_soft_hdl = NULL;
880 attach_fail12:
881 	ieee80211_detach(ic);
882 	iwp_free_fw_dma(sc);
883 attach_fail10:
884 	iwp_ring_free(sc);
885 attach_fail9:
886 	iwp_free_kw(sc);
887 attach_fail8:
888 	iwp_free_shared(sc);
889 attach_fail7:
890 	iwp_destroy_locks(sc);
891 attach_fail6:
892 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
893 attach_fail5:
894 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
895 attach_fail4:
896 	ddi_regs_map_free(&sc->sc_handle);
897 attach_fail3:
898 	ddi_regs_map_free(&sc->sc_cfg_handle);
899 attach_fail2:
900 	ddi_soft_state_free(iwp_soft_state_p, instance);
901 attach_fail1:
902 	return (DDI_FAILURE);
903 }
904 
905 int
iwp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)906 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
907 {
908 	iwp_sc_t *sc;
909 	ieee80211com_t	*ic;
910 	int err;
911 
912 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
913 	ASSERT(sc != NULL);
914 	ic = &sc->sc_ic;
915 
916 	switch (cmd) {
917 	case DDI_DETACH:
918 		break;
919 	case DDI_SUSPEND:
920 		atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
921 		atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
922 
923 		atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
924 
925 		if (sc->sc_flags & IWP_F_RUNNING) {
926 			iwp_stop(sc);
927 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
928 
929 		}
930 
931 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
932 		    "suspend\n"));
933 		return (DDI_SUCCESS);
934 	default:
935 		return (DDI_FAILURE);
936 	}
937 
938 	if (!(sc->sc_flags & IWP_F_ATTACHED)) {
939 		return (DDI_FAILURE);
940 	}
941 
942 	/*
943 	 * Destroy the mf_thread
944 	 */
945 	sc->sc_mf_thread_switch = 0;
946 
947 	mutex_enter(&sc->sc_mt_lock);
948 	while (sc->sc_mf_thread != NULL) {
949 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
950 			break;
951 		}
952 	}
953 	mutex_exit(&sc->sc_mt_lock);
954 
955 	err = mac_disable(sc->sc_ic.ic_mach);
956 	if (err != DDI_SUCCESS) {
957 		return (err);
958 	}
959 
960 	/*
961 	 * stop chipset
962 	 */
963 	iwp_stop(sc);
964 
965 	DELAY(500000);
966 
967 	/*
968 	 * release buffer for calibration
969 	 */
970 	iwp_release_calib_buffer(sc);
971 
972 	/*
973 	 * Unregiste from GLD
974 	 */
975 	(void) mac_unregister(sc->sc_ic.ic_mach);
976 
977 	mutex_enter(&sc->sc_glock);
978 	iwp_free_fw_dma(sc);
979 	iwp_ring_free(sc);
980 	iwp_free_kw(sc);
981 	iwp_free_shared(sc);
982 	mutex_exit(&sc->sc_glock);
983 
984 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
985 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
986 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
987 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
988 
989 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
990 	sc->sc_soft_hdl = NULL;
991 
992 	/*
993 	 * detach from 80211 module
994 	 */
995 	ieee80211_detach(&sc->sc_ic);
996 
997 	iwp_destroy_locks(sc);
998 
999 	ddi_regs_map_free(&sc->sc_handle);
1000 	ddi_regs_map_free(&sc->sc_cfg_handle);
1001 	ddi_remove_minor_node(dip, NULL);
1002 	ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1003 
1004 	return (DDI_SUCCESS);
1005 }
1006 
1007 /*
1008  * destroy all locks
1009  */
1010 static void
iwp_destroy_locks(iwp_sc_t * sc)1011 iwp_destroy_locks(iwp_sc_t *sc)
1012 {
1013 	cv_destroy(&sc->sc_mt_cv);
1014 	cv_destroy(&sc->sc_cmd_cv);
1015 	cv_destroy(&sc->sc_put_seg_cv);
1016 	cv_destroy(&sc->sc_ucode_cv);
1017 	mutex_destroy(&sc->sc_mt_lock);
1018 	mutex_destroy(&sc->sc_tx_lock);
1019 	mutex_destroy(&sc->sc_glock);
1020 }
1021 
1022 /*
1023  * Allocate an area of memory and a DMA handle for accessing it
1024  */
1025 static int
iwp_alloc_dma_mem(iwp_sc_t * sc,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,iwp_dma_t * dma_p)1026 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1027     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1028     uint_t dma_flags, iwp_dma_t *dma_p)
1029 {
1030 	caddr_t vaddr;
1031 	int err = DDI_FAILURE;
1032 
1033 	/*
1034 	 * Allocate handle
1035 	 */
1036 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1037 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1038 	if (err != DDI_SUCCESS) {
1039 		dma_p->dma_hdl = NULL;
1040 		return (DDI_FAILURE);
1041 	}
1042 
1043 	/*
1044 	 * Allocate memory
1045 	 */
1046 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1047 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1048 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1049 	if (err != DDI_SUCCESS) {
1050 		ddi_dma_free_handle(&dma_p->dma_hdl);
1051 		dma_p->dma_hdl = NULL;
1052 		dma_p->acc_hdl = NULL;
1053 		return (DDI_FAILURE);
1054 	}
1055 
1056 	/*
1057 	 * Bind the two together
1058 	 */
1059 	dma_p->mem_va = vaddr;
1060 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1061 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1062 	    &dma_p->cookie, &dma_p->ncookies);
1063 	if (err != DDI_DMA_MAPPED) {
1064 		ddi_dma_mem_free(&dma_p->acc_hdl);
1065 		ddi_dma_free_handle(&dma_p->dma_hdl);
1066 		dma_p->acc_hdl = NULL;
1067 		dma_p->dma_hdl = NULL;
1068 		return (DDI_FAILURE);
1069 	}
1070 
1071 	dma_p->nslots = ~0U;
1072 	dma_p->size = ~0U;
1073 	dma_p->token = ~0U;
1074 	dma_p->offset = 0;
1075 	return (DDI_SUCCESS);
1076 }
1077 
1078 /*
1079  * Free one allocated area of DMAable memory
1080  */
1081 static void
iwp_free_dma_mem(iwp_dma_t * dma_p)1082 iwp_free_dma_mem(iwp_dma_t *dma_p)
1083 {
1084 	if (dma_p->dma_hdl != NULL) {
1085 		if (dma_p->ncookies) {
1086 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1087 			dma_p->ncookies = 0;
1088 		}
1089 		ddi_dma_free_handle(&dma_p->dma_hdl);
1090 		dma_p->dma_hdl = NULL;
1091 	}
1092 
1093 	if (dma_p->acc_hdl != NULL) {
1094 		ddi_dma_mem_free(&dma_p->acc_hdl);
1095 		dma_p->acc_hdl = NULL;
1096 	}
1097 }
1098 
1099 /*
1100  * copy ucode into dma buffers
1101  */
1102 static int
iwp_alloc_fw_dma(iwp_sc_t * sc)1103 iwp_alloc_fw_dma(iwp_sc_t *sc)
1104 {
1105 	int err = DDI_FAILURE;
1106 	iwp_dma_t *dma_p;
1107 	char *t;
1108 
1109 	/*
1110 	 * firmware image layout:
1111 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1112 	 */
1113 
1114 	/*
1115 	 * Check firmware image size.
1116 	 */
1117 	if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1118 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1119 		    "firmware init text size 0x%x is too large\n",
1120 		    LE_32(sc->sc_hdr->init_textsz));
1121 
1122 		goto fail;
1123 	}
1124 
1125 	if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1126 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1127 		    "firmware init data size 0x%x is too large\n",
1128 		    LE_32(sc->sc_hdr->init_datasz));
1129 
1130 		goto fail;
1131 	}
1132 
1133 	if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1134 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1135 		    "firmware text size 0x%x is too large\n",
1136 		    LE_32(sc->sc_hdr->textsz));
1137 
1138 		goto fail;
1139 	}
1140 
1141 	if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1142 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1143 		    "firmware data size 0x%x is too large\n",
1144 		    LE_32(sc->sc_hdr->datasz));
1145 
1146 		goto fail;
1147 	}
1148 
1149 	/*
1150 	 * copy text of runtime ucode
1151 	 */
1152 	t = (char *)(sc->sc_hdr + 1);
1153 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1154 	    &fw_dma_attr, &iwp_dma_accattr,
1155 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1156 	    &sc->sc_dma_fw_text);
1157 	if (err != DDI_SUCCESS) {
1158 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1159 		    "failed to allocate text dma memory.\n");
1160 		goto fail;
1161 	}
1162 
1163 	dma_p = &sc->sc_dma_fw_text;
1164 
1165 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1166 	    "text[ncookies:%d addr:%lx size:%lx]\n",
1167 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1168 	    dma_p->cookie.dmac_size));
1169 
1170 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1171 
1172 	/*
1173 	 * copy data and bak-data of runtime ucode
1174 	 */
1175 	t += LE_32(sc->sc_hdr->textsz);
1176 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1177 	    &fw_dma_attr, &iwp_dma_accattr,
1178 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1179 	    &sc->sc_dma_fw_data);
1180 	if (err != DDI_SUCCESS) {
1181 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1182 		    "failed to allocate data dma memory\n");
1183 		goto fail;
1184 	}
1185 
1186 	dma_p = &sc->sc_dma_fw_data;
1187 
1188 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1189 	    "data[ncookies:%d addr:%lx size:%lx]\n",
1190 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1191 	    dma_p->cookie.dmac_size));
1192 
1193 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1194 
1195 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1196 	    &fw_dma_attr, &iwp_dma_accattr,
1197 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1198 	    &sc->sc_dma_fw_data_bak);
1199 	if (err != DDI_SUCCESS) {
1200 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1201 		    "failed to allocate data bakup dma memory\n");
1202 		goto fail;
1203 	}
1204 
1205 	dma_p = &sc->sc_dma_fw_data_bak;
1206 
1207 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1208 	    "data_bak[ncookies:%d addr:%lx "
1209 	    "size:%lx]\n",
1210 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1211 	    dma_p->cookie.dmac_size));
1212 
1213 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1214 
1215 	/*
1216 	 * copy text of init ucode
1217 	 */
1218 	t += LE_32(sc->sc_hdr->datasz);
1219 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1220 	    &fw_dma_attr, &iwp_dma_accattr,
1221 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1222 	    &sc->sc_dma_fw_init_text);
1223 	if (err != DDI_SUCCESS) {
1224 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1225 		    "failed to allocate init text dma memory\n");
1226 		goto fail;
1227 	}
1228 
1229 	dma_p = &sc->sc_dma_fw_init_text;
1230 
1231 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1232 	    "init_text[ncookies:%d addr:%lx "
1233 	    "size:%lx]\n",
1234 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1235 	    dma_p->cookie.dmac_size));
1236 
1237 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1238 
1239 	/*
1240 	 * copy data of init ucode
1241 	 */
1242 	t += LE_32(sc->sc_hdr->init_textsz);
1243 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1244 	    &fw_dma_attr, &iwp_dma_accattr,
1245 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1246 	    &sc->sc_dma_fw_init_data);
1247 	if (err != DDI_SUCCESS) {
1248 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1249 		    "failed to allocate init data dma memory\n");
1250 		goto fail;
1251 	}
1252 
1253 	dma_p = &sc->sc_dma_fw_init_data;
1254 
1255 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1256 	    "init_data[ncookies:%d addr:%lx "
1257 	    "size:%lx]\n",
1258 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1259 	    dma_p->cookie.dmac_size));
1260 
1261 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1262 
1263 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1264 fail:
1265 	return (err);
1266 }
1267 
1268 static void
iwp_free_fw_dma(iwp_sc_t * sc)1269 iwp_free_fw_dma(iwp_sc_t *sc)
1270 {
1271 	iwp_free_dma_mem(&sc->sc_dma_fw_text);
1272 	iwp_free_dma_mem(&sc->sc_dma_fw_data);
1273 	iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1274 	iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1275 	iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1276 }
1277 
1278 /*
1279  * Allocate a shared buffer between host and NIC.
1280  */
1281 static int
iwp_alloc_shared(iwp_sc_t * sc)1282 iwp_alloc_shared(iwp_sc_t *sc)
1283 {
1284 #ifdef	DEBUG
1285 	iwp_dma_t *dma_p;
1286 #endif
1287 	int err = DDI_FAILURE;
1288 
1289 	/*
1290 	 * must be aligned on a 4K-page boundary
1291 	 */
1292 	err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1293 	    &sh_dma_attr, &iwp_dma_descattr,
1294 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1295 	    &sc->sc_dma_sh);
1296 	if (err != DDI_SUCCESS) {
1297 		goto fail;
1298 	}
1299 
1300 	sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1301 
1302 #ifdef	DEBUG
1303 	dma_p = &sc->sc_dma_sh;
1304 #endif
1305 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1306 	    "sh[ncookies:%d addr:%lx size:%lx]\n",
1307 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1308 	    dma_p->cookie.dmac_size));
1309 
1310 	return (err);
1311 fail:
1312 	iwp_free_shared(sc);
1313 	return (err);
1314 }
1315 
1316 static void
iwp_free_shared(iwp_sc_t * sc)1317 iwp_free_shared(iwp_sc_t *sc)
1318 {
1319 	iwp_free_dma_mem(&sc->sc_dma_sh);
1320 }
1321 
1322 /*
1323  * Allocate a keep warm page.
1324  */
1325 static int
iwp_alloc_kw(iwp_sc_t * sc)1326 iwp_alloc_kw(iwp_sc_t *sc)
1327 {
1328 #ifdef	DEBUG
1329 	iwp_dma_t *dma_p;
1330 #endif
1331 	int err = DDI_FAILURE;
1332 
1333 	/*
1334 	 * must be aligned on a 4K-page boundary
1335 	 */
1336 	err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1337 	    &kw_dma_attr, &iwp_dma_descattr,
1338 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1339 	    &sc->sc_dma_kw);
1340 	if (err != DDI_SUCCESS) {
1341 		goto fail;
1342 	}
1343 
1344 #ifdef	DEBUG
1345 	dma_p = &sc->sc_dma_kw;
1346 #endif
1347 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1348 	    "kw[ncookies:%d addr:%lx size:%lx]\n",
1349 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1350 	    dma_p->cookie.dmac_size));
1351 
1352 	return (err);
1353 fail:
1354 	iwp_free_kw(sc);
1355 	return (err);
1356 }
1357 
1358 static void
iwp_free_kw(iwp_sc_t * sc)1359 iwp_free_kw(iwp_sc_t *sc)
1360 {
1361 	iwp_free_dma_mem(&sc->sc_dma_kw);
1362 }
1363 
1364 /*
1365  * initialize RX ring buffers
1366  */
1367 static int
iwp_alloc_rx_ring(iwp_sc_t * sc)1368 iwp_alloc_rx_ring(iwp_sc_t *sc)
1369 {
1370 	iwp_rx_ring_t *ring;
1371 	iwp_rx_data_t *data;
1372 #ifdef	DEBUG
1373 	iwp_dma_t *dma_p;
1374 #endif
1375 	int i, err = DDI_FAILURE;
1376 
1377 	ring = &sc->sc_rxq;
1378 	ring->cur = 0;
1379 
1380 	/*
1381 	 * allocate RX description ring buffer
1382 	 */
1383 	err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1384 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1385 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1386 	    &ring->dma_desc);
1387 	if (err != DDI_SUCCESS) {
1388 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1389 		    "dma alloc rx ring desc "
1390 		    "failed\n"));
1391 		goto fail;
1392 	}
1393 
1394 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1395 #ifdef	DEBUG
1396 	dma_p = &ring->dma_desc;
1397 #endif
1398 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1399 	    "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1400 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1401 	    dma_p->cookie.dmac_size));
1402 
1403 	/*
1404 	 * Allocate Rx frame buffers.
1405 	 */
1406 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1407 		data = &ring->data[i];
1408 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1409 		    &rx_buffer_dma_attr, &iwp_dma_accattr,
1410 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1411 		    &data->dma_data);
1412 		if (err != DDI_SUCCESS) {
1413 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1414 			    "dma alloc rx ring "
1415 			    "buf[%d] failed\n", i));
1416 			goto fail;
1417 		}
1418 		/*
1419 		 * the physical address bit [8-36] are used,
1420 		 * instead of bit [0-31] in 3945.
1421 		 */
1422 		ring->desc[i] = (uint32_t)
1423 		    (data->dma_data.cookie.dmac_address >> 8);
1424 	}
1425 
1426 #ifdef	DEBUG
1427 	dma_p = &ring->data[0].dma_data;
1428 #endif
1429 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1430 	    "rx buffer[0][ncookies:%d addr:%lx "
1431 	    "size:%lx]\n",
1432 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1433 	    dma_p->cookie.dmac_size));
1434 
1435 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1436 
1437 	return (err);
1438 
1439 fail:
1440 	iwp_free_rx_ring(sc);
1441 	return (err);
1442 }
1443 
1444 /*
1445  * disable RX ring
1446  */
1447 static void
iwp_reset_rx_ring(iwp_sc_t * sc)1448 iwp_reset_rx_ring(iwp_sc_t *sc)
1449 {
1450 	int n;
1451 
1452 	iwp_mac_access_enter(sc);
1453 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1454 	for (n = 0; n < 2000; n++) {
1455 		if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1456 			break;
1457 		}
1458 		DELAY(1000);
1459 	}
1460 #ifdef DEBUG
1461 	if (2000 == n) {
1462 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1463 		    "timeout resetting Rx ring\n"));
1464 	}
1465 #endif
1466 	iwp_mac_access_exit(sc);
1467 
1468 	sc->sc_rxq.cur = 0;
1469 }
1470 
1471 static void
iwp_free_rx_ring(iwp_sc_t * sc)1472 iwp_free_rx_ring(iwp_sc_t *sc)
1473 {
1474 	int i;
1475 
1476 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1477 		if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1478 			IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1479 			    DDI_DMA_SYNC_FORCPU);
1480 		}
1481 
1482 		iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1483 	}
1484 
1485 	if (sc->sc_rxq.dma_desc.dma_hdl) {
1486 		IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1487 	}
1488 
1489 	iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1490 }
1491 
1492 /*
1493  * initialize TX ring buffers
1494  */
1495 static int
iwp_alloc_tx_ring(iwp_sc_t * sc,iwp_tx_ring_t * ring,int slots,int qid)1496 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1497     int slots, int qid)
1498 {
1499 	iwp_tx_data_t *data;
1500 	iwp_tx_desc_t *desc_h;
1501 	uint32_t paddr_desc_h;
1502 	iwp_cmd_t *cmd_h;
1503 	uint32_t paddr_cmd_h;
1504 #ifdef	DEBUG
1505 	iwp_dma_t *dma_p;
1506 #endif
1507 	int i, err = DDI_FAILURE;
1508 	ring->qid = qid;
1509 	ring->count = TFD_QUEUE_SIZE_MAX;
1510 	ring->window = slots;
1511 	ring->queued = 0;
1512 	ring->cur = 0;
1513 	ring->desc_cur = 0;
1514 
1515 	/*
1516 	 * allocate buffer for TX descriptor ring
1517 	 */
1518 	err = iwp_alloc_dma_mem(sc,
1519 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1520 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1521 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1522 	    &ring->dma_desc);
1523 	if (err != DDI_SUCCESS) {
1524 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1525 		    "dma alloc tx ring desc[%d] "
1526 		    "failed\n", qid));
1527 		goto fail;
1528 	}
1529 
1530 #ifdef	DEBUG
1531 	dma_p = &ring->dma_desc;
1532 #endif
1533 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1534 	    "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1535 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1536 	    dma_p->cookie.dmac_size));
1537 
1538 	desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1539 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1540 
1541 	/*
1542 	 * allocate buffer for ucode command
1543 	 */
1544 	err = iwp_alloc_dma_mem(sc,
1545 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1546 	    &cmd_dma_attr, &iwp_dma_accattr,
1547 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1548 	    &ring->dma_cmd);
1549 	if (err != DDI_SUCCESS) {
1550 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1551 		    "dma alloc tx ring cmd[%d]"
1552 		    " failed\n", qid));
1553 		goto fail;
1554 	}
1555 
1556 #ifdef	DEBUG
1557 	dma_p = &ring->dma_cmd;
1558 #endif
1559 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1560 	    "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1561 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1562 	    dma_p->cookie.dmac_size));
1563 
1564 	cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1565 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1566 
1567 	/*
1568 	 * Allocate Tx frame buffers.
1569 	 */
1570 	ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1571 	    KM_NOSLEEP);
1572 	if (NULL == ring->data) {
1573 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1574 		    "could not allocate "
1575 		    "tx data slots\n"));
1576 		goto fail;
1577 	}
1578 
1579 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1580 		data = &ring->data[i];
1581 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1582 		    &tx_buffer_dma_attr, &iwp_dma_accattr,
1583 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1584 		    &data->dma_data);
1585 		if (err != DDI_SUCCESS) {
1586 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1587 			    "dma alloc tx "
1588 			    "ring buf[%d] failed\n", i));
1589 			goto fail;
1590 		}
1591 
1592 		data->desc = desc_h + i;
1593 		data->paddr_desc = paddr_desc_h +
1594 		    _PTRDIFF(data->desc, desc_h);
1595 		data->cmd = cmd_h +  i;
1596 		data->paddr_cmd = paddr_cmd_h +
1597 		    _PTRDIFF(data->cmd, cmd_h);
1598 	}
1599 #ifdef	DEBUG
1600 	dma_p = &ring->data[0].dma_data;
1601 #endif
1602 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1603 	    "tx buffer[0][ncookies:%d addr:%lx "
1604 	    "size:%lx]\n",
1605 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1606 	    dma_p->cookie.dmac_size));
1607 
1608 	return (err);
1609 
1610 fail:
1611 	iwp_free_tx_ring(ring);
1612 
1613 	return (err);
1614 }
1615 
1616 /*
1617  * disable TX ring
1618  */
1619 static void
iwp_reset_tx_ring(iwp_sc_t * sc,iwp_tx_ring_t * ring)1620 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1621 {
1622 	iwp_tx_data_t *data;
1623 	int i, n;
1624 
1625 	iwp_mac_access_enter(sc);
1626 
1627 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1628 	for (n = 0; n < 200; n++) {
1629 		if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1630 		    IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1631 			break;
1632 		}
1633 		DELAY(10);
1634 	}
1635 
1636 #ifdef	DEBUG
1637 	if (200 == n) {
1638 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1639 		    "timeout reset tx ring %d\n",
1640 		    ring->qid));
1641 	}
1642 #endif
1643 
1644 	iwp_mac_access_exit(sc);
1645 
1646 	/* by pass, if it's quiesce */
1647 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1648 		for (i = 0; i < ring->count; i++) {
1649 			data = &ring->data[i];
1650 			IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1651 		}
1652 	}
1653 
1654 	ring->queued = 0;
1655 	ring->cur = 0;
1656 	ring->desc_cur = 0;
1657 }
1658 
1659 static void
iwp_free_tx_ring(iwp_tx_ring_t * ring)1660 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1661 {
1662 	int i;
1663 
1664 	if (ring->dma_desc.dma_hdl != NULL) {
1665 		IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1666 	}
1667 	iwp_free_dma_mem(&ring->dma_desc);
1668 
1669 	if (ring->dma_cmd.dma_hdl != NULL) {
1670 		IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1671 	}
1672 	iwp_free_dma_mem(&ring->dma_cmd);
1673 
1674 	if (ring->data != NULL) {
1675 		for (i = 0; i < ring->count; i++) {
1676 			if (ring->data[i].dma_data.dma_hdl) {
1677 				IWP_DMA_SYNC(ring->data[i].dma_data,
1678 				    DDI_DMA_SYNC_FORDEV);
1679 			}
1680 			iwp_free_dma_mem(&ring->data[i].dma_data);
1681 		}
1682 		kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1683 	}
1684 }
1685 
1686 /*
1687  * initialize TX and RX ring
1688  */
1689 static int
iwp_ring_init(iwp_sc_t * sc)1690 iwp_ring_init(iwp_sc_t *sc)
1691 {
1692 	int i, err = DDI_FAILURE;
1693 
1694 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
1695 		if (IWP_CMD_QUEUE_NUM == i) {
1696 			continue;
1697 		}
1698 
1699 		err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1700 		    i);
1701 		if (err != DDI_SUCCESS) {
1702 			goto fail;
1703 		}
1704 	}
1705 
1706 	/*
1707 	 * initialize command queue
1708 	 */
1709 	err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1710 	    TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1711 	if (err != DDI_SUCCESS) {
1712 		goto fail;
1713 	}
1714 
1715 	err = iwp_alloc_rx_ring(sc);
1716 	if (err != DDI_SUCCESS) {
1717 		goto fail;
1718 	}
1719 
1720 fail:
1721 	return (err);
1722 }
1723 
1724 static void
iwp_ring_free(iwp_sc_t * sc)1725 iwp_ring_free(iwp_sc_t *sc)
1726 {
1727 	int i = IWP_NUM_QUEUES;
1728 
1729 	iwp_free_rx_ring(sc);
1730 	while (--i >= 0) {
1731 		iwp_free_tx_ring(&sc->sc_txq[i]);
1732 	}
1733 }
1734 
1735 /* ARGSUSED */
1736 static ieee80211_node_t *
iwp_node_alloc(ieee80211com_t * ic)1737 iwp_node_alloc(ieee80211com_t *ic)
1738 {
1739 	iwp_amrr_t *amrr;
1740 
1741 	amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1742 	if (NULL == amrr) {
1743 		cmn_err(CE_WARN, "iwp_node_alloc(): "
1744 		    "failed to allocate memory for amrr structure\n");
1745 		return (NULL);
1746 	}
1747 
1748 	iwp_amrr_init(amrr);
1749 
1750 	return (&amrr->in);
1751 }
1752 
1753 static void
iwp_node_free(ieee80211_node_t * in)1754 iwp_node_free(ieee80211_node_t *in)
1755 {
1756 	ieee80211com_t *ic;
1757 
1758 	if ((NULL == in) ||
1759 	    (NULL == in->in_ic)) {
1760 		cmn_err(CE_WARN, "iwp_node_free() "
1761 		    "Got a NULL point from Net80211 module\n");
1762 		return;
1763 	}
1764 	ic = in->in_ic;
1765 
1766 	if (ic->ic_node_cleanup != NULL) {
1767 		ic->ic_node_cleanup(in);
1768 	}
1769 
1770 	if (in->in_wpa_ie != NULL) {
1771 		ieee80211_free(in->in_wpa_ie);
1772 	}
1773 
1774 	if (in->in_wme_ie != NULL) {
1775 		ieee80211_free(in->in_wme_ie);
1776 	}
1777 
1778 	if (in->in_htcap_ie != NULL) {
1779 		ieee80211_free(in->in_htcap_ie);
1780 	}
1781 
1782 	kmem_free(in, sizeof (iwp_amrr_t));
1783 }
1784 
1785 
1786 /*
1787  * change station's state. this function will be invoked by 80211 module
1788  * when need to change staton's state.
1789  */
1790 static int
iwp_newstate(ieee80211com_t * ic,enum ieee80211_state nstate,int arg)1791 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1792 {
1793 	iwp_sc_t *sc;
1794 	ieee80211_node_t *in;
1795 	enum ieee80211_state ostate;
1796 	iwp_add_sta_t node;
1797 	int i, err = IWP_FAIL;
1798 
1799 	if (NULL == ic) {
1800 		return (err);
1801 	}
1802 	sc = (iwp_sc_t *)ic;
1803 	in = ic->ic_bss;
1804 	ostate = ic->ic_state;
1805 
1806 	mutex_enter(&sc->sc_glock);
1807 
1808 	switch (nstate) {
1809 	case IEEE80211_S_SCAN:
1810 		switch (ostate) {
1811 		case IEEE80211_S_INIT:
1812 			atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1813 			iwp_set_led(sc, 2, 10, 2);
1814 
1815 			/*
1816 			 * clear association to receive beacons from
1817 			 * all BSS'es
1818 			 */
1819 			sc->sc_config.assoc_id = 0;
1820 			sc->sc_config.filter_flags &=
1821 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1822 
1823 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1824 			    "config chan %d "
1825 			    "flags %x filter_flags %x\n",
1826 			    LE_16(sc->sc_config.chan),
1827 			    LE_32(sc->sc_config.flags),
1828 			    LE_32(sc->sc_config.filter_flags)));
1829 
1830 			err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1831 			    sizeof (iwp_rxon_cmd_t), 1);
1832 			if (err != IWP_SUCCESS) {
1833 				cmn_err(CE_WARN, "iwp_newstate(): "
1834 				    "could not clear association\n");
1835 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1836 				mutex_exit(&sc->sc_glock);
1837 				return (err);
1838 			}
1839 
1840 			/* add broadcast node to send probe request */
1841 			(void) memset(&node, 0, sizeof (node));
1842 			(void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1843 			node.sta.sta_id = IWP_BROADCAST_ID;
1844 			err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1845 			    sizeof (node), 1);
1846 			if (err != IWP_SUCCESS) {
1847 				cmn_err(CE_WARN, "iwp_newstate(): "
1848 				    "could not add broadcast node\n");
1849 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1850 				mutex_exit(&sc->sc_glock);
1851 				return (err);
1852 			}
1853 			break;
1854 		case IEEE80211_S_SCAN:
1855 			mutex_exit(&sc->sc_glock);
1856 			/* step to next channel before actual FW scan */
1857 			err = sc->sc_newstate(ic, nstate, arg);
1858 			mutex_enter(&sc->sc_glock);
1859 			if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1860 				cmn_err(CE_WARN, "iwp_newstate(): "
1861 				    "could not initiate scan\n");
1862 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1863 				ieee80211_cancel_scan(ic);
1864 			}
1865 			mutex_exit(&sc->sc_glock);
1866 			return (err);
1867 		default:
1868 			break;
1869 		}
1870 		sc->sc_clk = 0;
1871 		break;
1872 
1873 	case IEEE80211_S_AUTH:
1874 		if (ostate == IEEE80211_S_SCAN) {
1875 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1876 		}
1877 
1878 		/*
1879 		 * reset state to handle reassociations correctly
1880 		 */
1881 		sc->sc_config.assoc_id = 0;
1882 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1883 
1884 		/*
1885 		 * before sending authentication and association request frame,
1886 		 * we need do something in the hardware, such as setting the
1887 		 * channel same to the target AP...
1888 		 */
1889 		if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1890 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1891 			    "could not send authentication request\n"));
1892 			mutex_exit(&sc->sc_glock);
1893 			return (err);
1894 		}
1895 		break;
1896 
1897 	case IEEE80211_S_RUN:
1898 		if (ostate == IEEE80211_S_SCAN) {
1899 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1900 		}
1901 
1902 		if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1903 			/* let LED blink when monitoring */
1904 			iwp_set_led(sc, 2, 10, 10);
1905 			break;
1906 		}
1907 
1908 		IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1909 		    "associated.\n"));
1910 
1911 		err = iwp_run_state_config(sc);
1912 		if (err != IWP_SUCCESS) {
1913 			cmn_err(CE_WARN, "iwp_newstate(): "
1914 			    "failed to set up association\n");
1915 			mutex_exit(&sc->sc_glock);
1916 			return (err);
1917 		}
1918 
1919 		/*
1920 		 * start automatic rate control
1921 		 */
1922 		if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1923 			atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1924 
1925 			/*
1926 			 * set rate to some reasonable initial value
1927 			 */
1928 			i = in->in_rates.ir_nrates - 1;
1929 			while (i > 0 && IEEE80211_RATE(i) > 72) {
1930 				i--;
1931 			}
1932 			in->in_txrate = i;
1933 
1934 		} else {
1935 			atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1936 		}
1937 
1938 		/*
1939 		 * set LED on after associated
1940 		 */
1941 		iwp_set_led(sc, 2, 0, 1);
1942 		break;
1943 
1944 	case IEEE80211_S_INIT:
1945 		if (ostate == IEEE80211_S_SCAN) {
1946 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1947 		}
1948 		/*
1949 		 * set LED off after init
1950 		 */
1951 		iwp_set_led(sc, 2, 1, 0);
1952 		break;
1953 
1954 	case IEEE80211_S_ASSOC:
1955 		if (ostate == IEEE80211_S_SCAN) {
1956 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1957 		}
1958 		break;
1959 	}
1960 
1961 	mutex_exit(&sc->sc_glock);
1962 
1963 	return (sc->sc_newstate(ic, nstate, arg));
1964 }
1965 
1966 /*
1967  * exclusive access to mac begin.
1968  */
1969 static void
iwp_mac_access_enter(iwp_sc_t * sc)1970 iwp_mac_access_enter(iwp_sc_t *sc)
1971 {
1972 	uint32_t tmp;
1973 	int n;
1974 
1975 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
1976 	IWP_WRITE(sc, CSR_GP_CNTRL,
1977 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1978 
1979 	/* wait until we succeed */
1980 	for (n = 0; n < 1000; n++) {
1981 		if ((IWP_READ(sc, CSR_GP_CNTRL) &
1982 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1983 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1984 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1985 			break;
1986 		}
1987 		DELAY(10);
1988 	}
1989 
1990 #ifdef	DEBUG
1991 	if (1000 == n) {
1992 		IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1993 		    "could not lock memory\n"));
1994 	}
1995 #endif
1996 }
1997 
1998 /*
1999  * exclusive access to mac end.
2000  */
2001 static void
iwp_mac_access_exit(iwp_sc_t * sc)2002 iwp_mac_access_exit(iwp_sc_t *sc)
2003 {
2004 	uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2005 	IWP_WRITE(sc, CSR_GP_CNTRL,
2006 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2007 }
2008 
2009 /*
2010  * this function defined here for future use.
2011  * static uint32_t
2012  * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2013  * {
2014  * 	IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2015  * 	return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2016  * }
2017  */
2018 
2019 /*
2020  * write mac memory
2021  */
2022 static void
iwp_mem_write(iwp_sc_t * sc,uint32_t addr,uint32_t data)2023 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2024 {
2025 	IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2026 	IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2027 }
2028 
2029 /*
2030  * read mac register
2031  */
2032 static uint32_t
iwp_reg_read(iwp_sc_t * sc,uint32_t addr)2033 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2034 {
2035 	IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2036 	return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2037 }
2038 
2039 /*
2040  * write mac register
2041  */
2042 static void
iwp_reg_write(iwp_sc_t * sc,uint32_t addr,uint32_t data)2043 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2044 {
2045 	IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2046 	IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2047 }
2048 
2049 
2050 /*
2051  * steps of loading ucode:
2052  * load init ucode=>init alive=>calibrate=>
2053  * receive calibration result=>reinitialize NIC=>
2054  * load runtime ucode=>runtime alive=>
2055  * send calibration result=>running.
2056  */
2057 static int
iwp_load_init_firmware(iwp_sc_t * sc)2058 iwp_load_init_firmware(iwp_sc_t *sc)
2059 {
2060 	int	err = IWP_FAIL;
2061 	clock_t	clk;
2062 
2063 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2064 
2065 	/*
2066 	 * load init_text section of uCode to hardware
2067 	 */
2068 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2069 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2070 	if (err != IWP_SUCCESS) {
2071 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2072 		    "failed to write init uCode.\n");
2073 		return (err);
2074 	}
2075 
2076 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2077 
2078 	/* wait loading init_text until completed or timeout */
2079 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2080 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2081 			break;
2082 		}
2083 	}
2084 
2085 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2086 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2087 		    "timeout waiting for init uCode load.\n");
2088 		return (IWP_FAIL);
2089 	}
2090 
2091 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2092 
2093 	/*
2094 	 * load init_data section of uCode to hardware
2095 	 */
2096 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2097 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2098 	if (err != IWP_SUCCESS) {
2099 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2100 		    "failed to write init_data uCode.\n");
2101 		return (err);
2102 	}
2103 
2104 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2105 
2106 	/*
2107 	 * wait loading init_data until completed or timeout
2108 	 */
2109 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2110 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2111 			break;
2112 		}
2113 	}
2114 
2115 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2116 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2117 		    "timeout waiting for init_data uCode load.\n");
2118 		return (IWP_FAIL);
2119 	}
2120 
2121 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2122 
2123 	return (err);
2124 }
2125 
2126 static int
iwp_load_run_firmware(iwp_sc_t * sc)2127 iwp_load_run_firmware(iwp_sc_t *sc)
2128 {
2129 	int	err = IWP_FAIL;
2130 	clock_t	clk;
2131 
2132 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2133 
2134 	/*
2135 	 * load init_text section of uCode to hardware
2136 	 */
2137 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2138 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2139 	if (err != IWP_SUCCESS) {
2140 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2141 		    "failed to write run uCode.\n");
2142 		return (err);
2143 	}
2144 
2145 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2146 
2147 	/* wait loading run_text until completed or timeout */
2148 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2149 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2150 			break;
2151 		}
2152 	}
2153 
2154 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2155 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2156 		    "timeout waiting for run uCode load.\n");
2157 		return (IWP_FAIL);
2158 	}
2159 
2160 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2161 
2162 	/*
2163 	 * load run_data section of uCode to hardware
2164 	 */
2165 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2166 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2167 	if (err != IWP_SUCCESS) {
2168 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2169 		    "failed to write run_data uCode.\n");
2170 		return (err);
2171 	}
2172 
2173 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2174 
2175 	/*
2176 	 * wait loading run_data until completed or timeout
2177 	 */
2178 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2179 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2180 			break;
2181 		}
2182 	}
2183 
2184 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2185 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2186 		    "timeout waiting for run_data uCode load.\n");
2187 		return (IWP_FAIL);
2188 	}
2189 
2190 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2191 
2192 	return (err);
2193 }
2194 
2195 /*
2196  * this function will be invoked to receive phy information
2197  * when a frame is received.
2198  */
2199 static void
iwp_rx_phy_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2200 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2201 {
2202 
2203 	sc->sc_rx_phy_res.flag = 1;
2204 
2205 	(void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2206 	    sizeof (iwp_rx_phy_res_t));
2207 }
2208 
2209 /*
2210  * this function will be invoked to receive body of frame when
2211  * a frame is received.
2212  */
2213 static void
iwp_rx_mpdu_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2214 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2215 {
2216 	ieee80211com_t	*ic = &sc->sc_ic;
2217 #ifdef	DEBUG
2218 	iwp_rx_ring_t	*ring = &sc->sc_rxq;
2219 #endif
2220 	struct ieee80211_frame		*wh;
2221 	struct iwp_rx_non_cfg_phy	*phyinfo;
2222 	struct	iwp_rx_mpdu_body_size	*mpdu_size;
2223 
2224 	mblk_t			*mp;
2225 	int16_t			t;
2226 	uint16_t		len, rssi, agc;
2227 	uint32_t		temp, crc, *tail;
2228 	uint32_t		arssi, brssi, crssi, mrssi;
2229 	iwp_rx_phy_res_t	*stat;
2230 	ieee80211_node_t	*in;
2231 
2232 	/*
2233 	 * assuming not 11n here. cope with 11n in phase-II
2234 	 */
2235 	mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2236 	stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2237 	if (stat->cfg_phy_cnt > 20) {
2238 		return;
2239 	}
2240 
2241 	phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2242 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2243 	agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2244 
2245 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2246 	arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2247 	brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2248 
2249 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2250 	crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2251 
2252 	mrssi = MAX(arssi, brssi);
2253 	mrssi = MAX(mrssi, crssi);
2254 
2255 	t = mrssi - agc - IWP_RSSI_OFFSET;
2256 	/*
2257 	 * convert dBm to percentage
2258 	 */
2259 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2260 	    / (75 * 75);
2261 	if (rssi > 100) {
2262 		rssi = 100;
2263 	}
2264 	if (rssi < 1) {
2265 		rssi = 1;
2266 	}
2267 
2268 	/*
2269 	 * size of frame, not include FCS
2270 	 */
2271 	len = LE_16(mpdu_size->byte_count);
2272 	tail = (uint32_t *)((uint8_t *)(desc + 1) +
2273 	    sizeof (struct iwp_rx_mpdu_body_size) + len);
2274 	bcopy(tail, &crc, 4);
2275 
2276 	IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2277 	    "rx intr: idx=%d phy_len=%x len=%d "
2278 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2279 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2280 	    len, stat->rate.r.s.rate, stat->channel,
2281 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2282 	    stat->cfg_phy_cnt, LE_32(crc)));
2283 
2284 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2285 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2286 		    "rx frame oversize\n"));
2287 		return;
2288 	}
2289 
2290 	/*
2291 	 * discard Rx frames with bad CRC
2292 	 */
2293 	if ((LE_32(crc) &
2294 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2295 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2296 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2297 		    "rx crc error tail: %x\n",
2298 		    LE_32(crc)));
2299 		sc->sc_rx_err++;
2300 		return;
2301 	}
2302 
2303 	wh = (struct ieee80211_frame *)
2304 	    ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2305 
2306 	if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2307 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2308 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2309 		    "rx : association id = %x\n",
2310 		    sc->sc_assoc_id));
2311 	}
2312 
2313 #ifdef DEBUG
2314 	if (iwp_dbg_flags & IWP_DEBUG_RX) {
2315 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2316 	}
2317 #endif
2318 
2319 	in = ieee80211_find_rxnode(ic, wh);
2320 	mp = allocb(len, BPRI_MED);
2321 	if (mp) {
2322 		(void) memcpy(mp->b_wptr, wh, len);
2323 		mp->b_wptr += len;
2324 
2325 		/*
2326 		 * send the frame to the 802.11 layer
2327 		 */
2328 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2329 	} else {
2330 		sc->sc_rx_nobuf++;
2331 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2332 		    "alloc rx buf failed\n"));
2333 	}
2334 
2335 	/*
2336 	 * release node reference
2337 	 */
2338 	ieee80211_free_node(in);
2339 }
2340 
2341 /*
2342  * process correlative affairs after a frame is sent.
2343  */
2344 static void
iwp_tx_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2345 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2346 {
2347 	ieee80211com_t *ic = &sc->sc_ic;
2348 	iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2349 	iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2350 	iwp_amrr_t *amrr;
2351 
2352 	if (NULL == ic->ic_bss) {
2353 		return;
2354 	}
2355 
2356 	amrr = (iwp_amrr_t *)ic->ic_bss;
2357 
2358 	amrr->txcnt++;
2359 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2360 	    "tx: %d cnt\n", amrr->txcnt));
2361 
2362 	if (stat->ntries > 0) {
2363 		amrr->retrycnt++;
2364 		sc->sc_tx_retries++;
2365 		IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2366 		    "tx: %d retries\n",
2367 		    sc->sc_tx_retries));
2368 	}
2369 
2370 	mutex_enter(&sc->sc_mt_lock);
2371 	sc->sc_tx_timer = 0;
2372 	mutex_exit(&sc->sc_mt_lock);
2373 
2374 	mutex_enter(&sc->sc_tx_lock);
2375 
2376 	ring->queued--;
2377 	if (ring->queued < 0) {
2378 		ring->queued = 0;
2379 	}
2380 
2381 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2382 		sc->sc_need_reschedule = 0;
2383 		mutex_exit(&sc->sc_tx_lock);
2384 		mac_tx_update(ic->ic_mach);
2385 		mutex_enter(&sc->sc_tx_lock);
2386 	}
2387 
2388 	mutex_exit(&sc->sc_tx_lock);
2389 }
2390 
2391 /*
2392  * inform a given command has been executed
2393  */
2394 static void
iwp_cmd_intr(iwp_sc_t * sc,iwp_rx_desc_t * desc)2395 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2396 {
2397 	if ((desc->hdr.qid & 7) != 4) {
2398 		return;
2399 	}
2400 
2401 	if (sc->sc_cmd_accum > 0) {
2402 		sc->sc_cmd_accum--;
2403 		return;
2404 	}
2405 
2406 	mutex_enter(&sc->sc_glock);
2407 
2408 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2409 
2410 	cv_signal(&sc->sc_cmd_cv);
2411 
2412 	mutex_exit(&sc->sc_glock);
2413 
2414 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2415 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2416 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2417 	    desc->hdr.type));
2418 }
2419 
2420 /*
2421  * this function will be invoked when alive notification occur.
2422  */
2423 static void
iwp_ucode_alive(iwp_sc_t * sc,iwp_rx_desc_t * desc)2424 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2425 {
2426 	uint32_t rv;
2427 	struct iwp_calib_cfg_cmd cmd;
2428 	struct iwp_alive_resp *ar =
2429 	    (struct iwp_alive_resp *)(desc + 1);
2430 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
2431 
2432 	/*
2433 	 * the microcontroller is ready
2434 	 */
2435 	IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2436 	    "microcode alive notification minor: %x major: %x type: "
2437 	    "%x subtype: %x\n",
2438 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2439 
2440 #ifdef	DEBUG
2441 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2442 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2443 		    "microcontroller initialization failed\n"));
2444 	}
2445 #endif
2446 
2447 	/*
2448 	 * determine if init alive or runtime alive.
2449 	 */
2450 	if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2451 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2452 		    "initialization alive received.\n"));
2453 
2454 		(void) memcpy(&sc->sc_card_alive_init, ar,
2455 		    sizeof (struct iwp_init_alive_resp));
2456 
2457 		/*
2458 		 * necessary configuration to NIC
2459 		 */
2460 		mutex_enter(&sc->sc_glock);
2461 
2462 		rv = iwp_alive_common(sc);
2463 		if (rv != IWP_SUCCESS) {
2464 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2465 			    "common alive process failed in init alive.\n");
2466 			mutex_exit(&sc->sc_glock);
2467 			return;
2468 		}
2469 
2470 		(void) memset(&cmd, 0, sizeof (cmd));
2471 
2472 		cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2473 		cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2474 		cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2475 		cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2476 
2477 		/*
2478 		 * require ucode execute calibration
2479 		 */
2480 		rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2481 		if (rv != IWP_SUCCESS) {
2482 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2483 			    "failed to send calibration configure command.\n");
2484 			mutex_exit(&sc->sc_glock);
2485 			return;
2486 		}
2487 
2488 		mutex_exit(&sc->sc_glock);
2489 
2490 	} else {	/* runtime alive */
2491 
2492 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2493 		    "runtime alive received.\n"));
2494 
2495 		(void) memcpy(&sc->sc_card_alive_run, ar,
2496 		    sizeof (struct iwp_alive_resp));
2497 
2498 		mutex_enter(&sc->sc_glock);
2499 
2500 		/*
2501 		 * necessary configuration to NIC
2502 		 */
2503 		rv = iwp_alive_common(sc);
2504 		if (rv != IWP_SUCCESS) {
2505 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2506 			    "common alive process failed in run alive.\n");
2507 			mutex_exit(&sc->sc_glock);
2508 			return;
2509 		}
2510 
2511 		/*
2512 		 * send the result of local oscilator calibration to uCode.
2513 		 */
2514 		if (res_p->lo_res != NULL) {
2515 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2516 			    res_p->lo_res, res_p->lo_res_len, 1);
2517 			if (rv != IWP_SUCCESS) {
2518 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2519 				    "failed to send local"
2520 				    "oscilator calibration command.\n");
2521 				mutex_exit(&sc->sc_glock);
2522 				return;
2523 			}
2524 
2525 			DELAY(1000);
2526 		}
2527 
2528 		/*
2529 		 * send the result of TX IQ calibration to uCode.
2530 		 */
2531 		if (res_p->tx_iq_res != NULL) {
2532 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2533 			    res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2534 			if (rv != IWP_SUCCESS) {
2535 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2536 				    "failed to send TX IQ"
2537 				    "calibration command.\n");
2538 				mutex_exit(&sc->sc_glock);
2539 				return;
2540 			}
2541 
2542 			DELAY(1000);
2543 		}
2544 
2545 		/*
2546 		 * send the result of TX IQ perd calibration to uCode.
2547 		 */
2548 		if (res_p->tx_iq_perd_res != NULL) {
2549 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2550 			    res_p->tx_iq_perd_res,
2551 			    res_p->tx_iq_perd_res_len, 1);
2552 			if (rv != IWP_SUCCESS) {
2553 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2554 				    "failed to send TX IQ perd"
2555 				    "calibration command.\n");
2556 				mutex_exit(&sc->sc_glock);
2557 				return;
2558 			}
2559 
2560 			DELAY(1000);
2561 		}
2562 
2563 		/*
2564 		 * send the result of Base Band calibration to uCode.
2565 		 */
2566 		if (res_p->base_band_res != NULL) {
2567 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2568 			    res_p->base_band_res,
2569 			    res_p->base_band_res_len, 1);
2570 			if (rv != IWP_SUCCESS) {
2571 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2572 				    "failed to send Base Band"
2573 				    "calibration command.\n");
2574 				mutex_exit(&sc->sc_glock);
2575 				return;
2576 			}
2577 
2578 			DELAY(1000);
2579 		}
2580 
2581 		atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2582 		cv_signal(&sc->sc_ucode_cv);
2583 
2584 		mutex_exit(&sc->sc_glock);
2585 	}
2586 
2587 }
2588 
2589 /*
2590  * deal with receiving frames, command response
2591  * and all notifications from ucode.
2592  */
2593 /* ARGSUSED */
2594 static uint_t
iwp_rx_softintr(caddr_t arg,caddr_t unused)2595 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2596 {
2597 	iwp_sc_t *sc;
2598 	ieee80211com_t *ic;
2599 	iwp_rx_desc_t *desc;
2600 	iwp_rx_data_t *data;
2601 	uint32_t index;
2602 
2603 	if (NULL == arg) {
2604 		return (DDI_INTR_UNCLAIMED);
2605 	}
2606 	sc = (iwp_sc_t *)arg;
2607 	ic = &sc->sc_ic;
2608 
2609 	/*
2610 	 * firmware has moved the index of the rx queue, driver get it,
2611 	 * and deal with it.
2612 	 */
2613 	index = (sc->sc_shared->val0) & 0xfff;
2614 
2615 	while (sc->sc_rxq.cur != index) {
2616 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2617 		desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2618 
2619 		IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2620 		    "rx notification index = %d"
2621 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2622 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2623 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2624 
2625 		/*
2626 		 * a command other than a tx need to be replied
2627 		 */
2628 		if (!(desc->hdr.qid & 0x80) &&
2629 		    (desc->hdr.type != REPLY_SCAN_CMD) &&
2630 		    (desc->hdr.type != REPLY_TX)) {
2631 			iwp_cmd_intr(sc, desc);
2632 		}
2633 
2634 		switch (desc->hdr.type) {
2635 		case REPLY_RX_PHY_CMD:
2636 			iwp_rx_phy_intr(sc, desc);
2637 			break;
2638 
2639 		case REPLY_RX_MPDU_CMD:
2640 			iwp_rx_mpdu_intr(sc, desc);
2641 			break;
2642 
2643 		case REPLY_TX:
2644 			iwp_tx_intr(sc, desc);
2645 			break;
2646 
2647 		case REPLY_ALIVE:
2648 			iwp_ucode_alive(sc, desc);
2649 			break;
2650 
2651 		case CARD_STATE_NOTIFICATION:
2652 		{
2653 			uint32_t *status = (uint32_t *)(desc + 1);
2654 
2655 			IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2656 			    "state changed to %x\n",
2657 			    LE_32(*status)));
2658 
2659 			if (LE_32(*status) & 1) {
2660 				/*
2661 				 * the radio button has to be pushed(OFF). It
2662 				 * is considered as a hw error, the
2663 				 * iwp_thread() tries to recover it after the
2664 				 * button is pushed again(ON)
2665 				 */
2666 				cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2667 				    "radio transmitter is off\n");
2668 				sc->sc_ostate = sc->sc_ic.ic_state;
2669 				ieee80211_new_state(&sc->sc_ic,
2670 				    IEEE80211_S_INIT, -1);
2671 				atomic_or_32(&sc->sc_flags,
2672 				    IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2673 			}
2674 
2675 			break;
2676 		}
2677 
2678 		case SCAN_START_NOTIFICATION:
2679 		{
2680 			iwp_start_scan_t *scan =
2681 			    (iwp_start_scan_t *)(desc + 1);
2682 
2683 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2684 			    "scanning channel %d status %x\n",
2685 			    scan->chan, LE_32(scan->status)));
2686 
2687 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2688 			break;
2689 		}
2690 
2691 		case SCAN_COMPLETE_NOTIFICATION:
2692 		{
2693 #ifdef	DEBUG
2694 			iwp_stop_scan_t *scan =
2695 			    (iwp_stop_scan_t *)(desc + 1);
2696 
2697 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2698 			    "completed channel %d (burst of %d) status %02x\n",
2699 			    scan->chan, scan->nchan, scan->status));
2700 #endif
2701 
2702 			sc->sc_scan_pending++;
2703 			break;
2704 		}
2705 
2706 		case STATISTICS_NOTIFICATION:
2707 		{
2708 			/*
2709 			 * handle statistics notification
2710 			 */
2711 			break;
2712 		}
2713 
2714 		case CALIBRATION_RES_NOTIFICATION:
2715 			iwp_save_calib_result(sc, desc);
2716 			break;
2717 
2718 		case CALIBRATION_COMPLETE_NOTIFICATION:
2719 			mutex_enter(&sc->sc_glock);
2720 			atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2721 			cv_signal(&sc->sc_ucode_cv);
2722 			mutex_exit(&sc->sc_glock);
2723 			break;
2724 
2725 		case MISSED_BEACONS_NOTIFICATION:
2726 		{
2727 			struct iwp_beacon_missed *miss =
2728 			    (struct iwp_beacon_missed *)(desc + 1);
2729 
2730 			if ((ic->ic_state == IEEE80211_S_RUN) &&
2731 			    (LE_32(miss->consecutive) > 50)) {
2732 				cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2733 				    "beacon missed %d/%d\n",
2734 				    LE_32(miss->consecutive),
2735 				    LE_32(miss->total));
2736 				(void) ieee80211_new_state(ic,
2737 				    IEEE80211_S_INIT, -1);
2738 			}
2739 			break;
2740 		}
2741 		}
2742 
2743 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2744 	}
2745 
2746 	/*
2747 	 * driver dealt with what received in rx queue and tell the information
2748 	 * to the firmware.
2749 	 */
2750 	index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2751 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2752 
2753 	/*
2754 	 * re-enable interrupts
2755 	 */
2756 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2757 
2758 	return (DDI_INTR_CLAIMED);
2759 }
2760 
2761 /*
2762  * the handle of interrupt
2763  */
2764 /* ARGSUSED */
2765 static uint_t
iwp_intr(caddr_t arg,caddr_t unused)2766 iwp_intr(caddr_t arg, caddr_t unused)
2767 {
2768 	iwp_sc_t *sc;
2769 	uint32_t r, rfh;
2770 
2771 	if (NULL == arg) {
2772 		return (DDI_INTR_UNCLAIMED);
2773 	}
2774 	sc = (iwp_sc_t *)arg;
2775 
2776 	r = IWP_READ(sc, CSR_INT);
2777 	if (0 == r || 0xffffffff == r) {
2778 		return (DDI_INTR_UNCLAIMED);
2779 	}
2780 
2781 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2782 	    "interrupt reg %x\n", r));
2783 
2784 	rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2785 
2786 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2787 	    "FH interrupt reg %x\n", rfh));
2788 
2789 	/*
2790 	 * disable interrupts
2791 	 */
2792 	IWP_WRITE(sc, CSR_INT_MASK, 0);
2793 
2794 	/*
2795 	 * ack interrupts
2796 	 */
2797 	IWP_WRITE(sc, CSR_INT, r);
2798 	IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2799 
2800 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2801 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2802 		    "fatal firmware error\n"));
2803 		iwp_stop(sc);
2804 		sc->sc_ostate = sc->sc_ic.ic_state;
2805 
2806 		/* notify upper layer */
2807 		if (!IWP_CHK_FAST_RECOVER(sc)) {
2808 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2809 		}
2810 
2811 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2812 		return (DDI_INTR_CLAIMED);
2813 	}
2814 
2815 	if (r & BIT_INT_RF_KILL) {
2816 		uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2817 		if (tmp & (1 << 27)) {
2818 			cmn_err(CE_NOTE, "RF switch: radio on\n");
2819 		}
2820 	}
2821 
2822 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2823 	    (rfh & FH_INT_RX_MASK)) {
2824 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2825 		return (DDI_INTR_CLAIMED);
2826 	}
2827 
2828 	if (r & BIT_INT_FH_TX) {
2829 		mutex_enter(&sc->sc_glock);
2830 		atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2831 		cv_signal(&sc->sc_put_seg_cv);
2832 		mutex_exit(&sc->sc_glock);
2833 	}
2834 
2835 #ifdef	DEBUG
2836 	if (r & BIT_INT_ALIVE)	{
2837 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2838 		    "firmware initialized.\n"));
2839 	}
2840 #endif
2841 
2842 	/*
2843 	 * re-enable interrupts
2844 	 */
2845 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2846 
2847 	return (DDI_INTR_CLAIMED);
2848 }
2849 
2850 static uint8_t
iwp_rate_to_plcp(int rate)2851 iwp_rate_to_plcp(int rate)
2852 {
2853 	uint8_t ret;
2854 
2855 	switch (rate) {
2856 	/*
2857 	 * CCK rates
2858 	 */
2859 	case 2:
2860 		ret = 0xa;
2861 		break;
2862 
2863 	case 4:
2864 		ret = 0x14;
2865 		break;
2866 
2867 	case 11:
2868 		ret = 0x37;
2869 		break;
2870 
2871 	case 22:
2872 		ret = 0x6e;
2873 		break;
2874 
2875 	/*
2876 	 * OFDM rates
2877 	 */
2878 	case 12:
2879 		ret = 0xd;
2880 		break;
2881 
2882 	case 18:
2883 		ret = 0xf;
2884 		break;
2885 
2886 	case 24:
2887 		ret = 0x5;
2888 		break;
2889 
2890 	case 36:
2891 		ret = 0x7;
2892 		break;
2893 
2894 	case 48:
2895 		ret = 0x9;
2896 		break;
2897 
2898 	case 72:
2899 		ret = 0xb;
2900 		break;
2901 
2902 	case 96:
2903 		ret = 0x1;
2904 		break;
2905 
2906 	case 108:
2907 		ret = 0x3;
2908 		break;
2909 
2910 	default:
2911 		ret = 0;
2912 		break;
2913 	}
2914 
2915 	return (ret);
2916 }
2917 
2918 /*
2919  * invoked by GLD send frames
2920  */
2921 static mblk_t *
iwp_m_tx(void * arg,mblk_t * mp)2922 iwp_m_tx(void *arg, mblk_t *mp)
2923 {
2924 	iwp_sc_t	*sc;
2925 	ieee80211com_t	*ic;
2926 	mblk_t		*next;
2927 
2928 	if (NULL == arg) {
2929 		return (NULL);
2930 	}
2931 	sc = (iwp_sc_t *)arg;
2932 	ic = &sc->sc_ic;
2933 
2934 	if (sc->sc_flags & IWP_F_SUSPEND) {
2935 		freemsgchain(mp);
2936 		return (NULL);
2937 	}
2938 
2939 	if (ic->ic_state != IEEE80211_S_RUN) {
2940 		freemsgchain(mp);
2941 		return (NULL);
2942 	}
2943 
2944 	if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2945 	    IWP_CHK_FAST_RECOVER(sc)) {
2946 		IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2947 		    "hold queue\n"));
2948 		return (mp);
2949 	}
2950 
2951 
2952 	while (mp != NULL) {
2953 		next = mp->b_next;
2954 		mp->b_next = NULL;
2955 		if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2956 			mp->b_next = next;
2957 			break;
2958 		}
2959 		mp = next;
2960 	}
2961 
2962 	return (mp);
2963 }
2964 
2965 /*
2966  * send frames
2967  */
2968 static int
iwp_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)2969 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2970 {
2971 	iwp_sc_t *sc;
2972 	iwp_tx_ring_t *ring;
2973 	iwp_tx_desc_t *desc;
2974 	iwp_tx_data_t *data;
2975 	iwp_tx_data_t *desc_data;
2976 	iwp_cmd_t *cmd;
2977 	iwp_tx_cmd_t *tx;
2978 	ieee80211_node_t *in;
2979 	struct ieee80211_frame *wh;
2980 	struct ieee80211_key *k = NULL;
2981 	mblk_t *m, *m0;
2982 	int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2983 	uint16_t masks = 0;
2984 	uint32_t rate, s_id = 0;
2985 
2986 	if (NULL == ic) {
2987 		return (IWP_FAIL);
2988 	}
2989 	sc = (iwp_sc_t *)ic;
2990 
2991 	if (sc->sc_flags & IWP_F_SUSPEND) {
2992 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2993 		    IEEE80211_FC0_TYPE_DATA) {
2994 			freemsg(mp);
2995 		}
2996 		err = IWP_FAIL;
2997 		goto exit;
2998 	}
2999 
3000 	mutex_enter(&sc->sc_tx_lock);
3001 	ring = &sc->sc_txq[0];
3002 	data = &ring->data[ring->cur];
3003 	cmd = data->cmd;
3004 	bzero(cmd, sizeof (*cmd));
3005 
3006 	ring->cur = (ring->cur + 1) % ring->count;
3007 
3008 	/*
3009 	 * Need reschedule TX if TX buffer is full.
3010 	 */
3011 	if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3012 		IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3013 		"no txbuf\n"));
3014 
3015 		sc->sc_need_reschedule = 1;
3016 		mutex_exit(&sc->sc_tx_lock);
3017 
3018 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3019 		    IEEE80211_FC0_TYPE_DATA) {
3020 			freemsg(mp);
3021 		}
3022 		sc->sc_tx_nobuf++;
3023 		err = IWP_FAIL;
3024 		goto exit;
3025 	}
3026 
3027 	ring->queued++;
3028 
3029 	mutex_exit(&sc->sc_tx_lock);
3030 
3031 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3032 
3033 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3034 	if (NULL == m) { /* can not alloc buf, drop this package */
3035 		cmn_err(CE_WARN, "iwp_send(): "
3036 		    "failed to allocate msgbuf\n");
3037 		freemsg(mp);
3038 
3039 		mutex_enter(&sc->sc_tx_lock);
3040 		ring->queued--;
3041 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3042 			sc->sc_need_reschedule = 0;
3043 			mutex_exit(&sc->sc_tx_lock);
3044 			mac_tx_update(ic->ic_mach);
3045 			mutex_enter(&sc->sc_tx_lock);
3046 		}
3047 		mutex_exit(&sc->sc_tx_lock);
3048 
3049 		err = IWP_SUCCESS;
3050 		goto exit;
3051 	}
3052 
3053 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3054 		mblen = MBLKL(m0);
3055 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3056 		off += mblen;
3057 	}
3058 
3059 	m->b_wptr += off;
3060 
3061 	wh = (struct ieee80211_frame *)m->b_rptr;
3062 
3063 	/*
3064 	 * determine send which AP or station in IBSS
3065 	 */
3066 	in = ieee80211_find_txnode(ic, wh->i_addr1);
3067 	if (NULL == in) {
3068 		cmn_err(CE_WARN, "iwp_send(): "
3069 		    "failed to find tx node\n");
3070 		freemsg(mp);
3071 		freemsg(m);
3072 		sc->sc_tx_err++;
3073 
3074 		mutex_enter(&sc->sc_tx_lock);
3075 		ring->queued--;
3076 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3077 			sc->sc_need_reschedule = 0;
3078 			mutex_exit(&sc->sc_tx_lock);
3079 			mac_tx_update(ic->ic_mach);
3080 			mutex_enter(&sc->sc_tx_lock);
3081 		}
3082 		mutex_exit(&sc->sc_tx_lock);
3083 
3084 		err = IWP_SUCCESS;
3085 		goto exit;
3086 	}
3087 
3088 	/*
3089 	 * Net80211 module encapsulate outbound data frames.
3090 	 * Add some feilds of 80211 frame.
3091 	 */
3092 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3093 	    IEEE80211_FC0_TYPE_DATA) {
3094 		(void) ieee80211_encap(ic, m, in);
3095 	}
3096 
3097 	freemsg(mp);
3098 
3099 	cmd->hdr.type = REPLY_TX;
3100 	cmd->hdr.flags = 0;
3101 	cmd->hdr.qid = ring->qid;
3102 
3103 	tx = (iwp_tx_cmd_t *)cmd->data;
3104 	tx->tx_flags = 0;
3105 
3106 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3107 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3108 	} else {
3109 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3110 	}
3111 
3112 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3113 		k = ieee80211_crypto_encap(ic, m);
3114 		if (NULL == k) {
3115 			freemsg(m);
3116 			sc->sc_tx_err++;
3117 
3118 			mutex_enter(&sc->sc_tx_lock);
3119 			ring->queued--;
3120 			if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3121 				sc->sc_need_reschedule = 0;
3122 				mutex_exit(&sc->sc_tx_lock);
3123 				mac_tx_update(ic->ic_mach);
3124 				mutex_enter(&sc->sc_tx_lock);
3125 			}
3126 			mutex_exit(&sc->sc_tx_lock);
3127 
3128 			err = IWP_SUCCESS;
3129 			goto exit;
3130 		}
3131 
3132 		/* packet header may have moved, reset our local pointer */
3133 		wh = (struct ieee80211_frame *)m->b_rptr;
3134 	}
3135 
3136 	len = msgdsize(m);
3137 
3138 #ifdef DEBUG
3139 	if (iwp_dbg_flags & IWP_DEBUG_TX) {
3140 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3141 	}
3142 #endif
3143 
3144 	tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3145 	tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3146 
3147 	/*
3148 	 * specific TX parameters for management frames
3149 	 */
3150 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3151 	    IEEE80211_FC0_TYPE_MGT) {
3152 		/*
3153 		 * mgmt frames are sent at 1M
3154 		 */
3155 		if ((in->in_rates.ir_rates[0] &
3156 		    IEEE80211_RATE_VAL) != 0) {
3157 			rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3158 		} else {
3159 			rate = 2;
3160 		}
3161 
3162 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3163 
3164 		/*
3165 		 * tell h/w to set timestamp in probe responses
3166 		 */
3167 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3168 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3169 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3170 
3171 			tx->data_retry_limit = 3;
3172 			if (tx->data_retry_limit < tx->rts_retry_limit) {
3173 				tx->rts_retry_limit = tx->data_retry_limit;
3174 			}
3175 		}
3176 
3177 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3178 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3179 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3180 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3181 			tx->timeout.pm_frame_timeout = LE_16(3);
3182 		} else {
3183 			tx->timeout.pm_frame_timeout = LE_16(2);
3184 		}
3185 
3186 	} else {
3187 		/*
3188 		 * do it here for the software way rate scaling.
3189 		 * later for rate scaling in hardware.
3190 		 *
3191 		 * now the txrate is determined in tx cmd flags, set to the
3192 		 * max value 54M for 11g and 11M for 11b originally.
3193 		 */
3194 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3195 			rate = ic->ic_fixed_rate;
3196 		} else {
3197 			if ((in->in_rates.ir_rates[in->in_txrate] &
3198 			    IEEE80211_RATE_VAL) != 0) {
3199 				rate = in->in_rates.
3200 				    ir_rates[in->in_txrate] &
3201 				    IEEE80211_RATE_VAL;
3202 			}
3203 		}
3204 
3205 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3206 
3207 		tx->timeout.pm_frame_timeout = 0;
3208 	}
3209 
3210 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3211 	    "tx rate[%d of %d] = %x",
3212 	    in->in_txrate, in->in_rates.ir_nrates, rate));
3213 
3214 	len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3215 	if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3216 		tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3217 	}
3218 
3219 	/*
3220 	 * retrieve destination node's id
3221 	 */
3222 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3223 		tx->sta_id = IWP_BROADCAST_ID;
3224 	} else {
3225 		tx->sta_id = IWP_AP_ID;
3226 	}
3227 
3228 	if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3229 		masks |= RATE_MCS_CCK_MSK;
3230 	}
3231 
3232 	masks |= RATE_MCS_ANT_B_MSK;
3233 	tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3234 
3235 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3236 	    "tx flag = %x",
3237 	    tx->tx_flags));
3238 
3239 	tx->stop_time.life_time  = LE_32(0xffffffff);
3240 
3241 	tx->len = LE_16(len);
3242 
3243 	tx->dram_lsb_ptr =
3244 	    LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3245 	tx->dram_msb_ptr = 0;
3246 	tx->driver_txop = 0;
3247 	tx->next_frame_len = 0;
3248 
3249 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
3250 	m->b_rptr += hdrlen;
3251 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3252 
3253 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3254 	    "sending data: qid=%d idx=%d len=%d",
3255 	    ring->qid, ring->cur, len));
3256 
3257 	/*
3258 	 * first segment includes the tx cmd plus the 802.11 header,
3259 	 * the second includes the remaining of the 802.11 frame.
3260 	 */
3261 	mutex_enter(&sc->sc_tx_lock);
3262 
3263 	cmd->hdr.idx = ring->desc_cur;
3264 
3265 	desc_data = &ring->data[ring->desc_cur];
3266 	desc = desc_data->desc;
3267 	bzero(desc, sizeof (*desc));
3268 	desc->val0 = 2 << 24;
3269 	desc->pa[0].tb1_addr = data->paddr_cmd;
3270 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3271 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3272 	desc->pa[0].val2 =
3273 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3274 	    ((len - hdrlen) << 20);
3275 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3276 	    "phy addr1 = 0x%x phy addr2 = 0x%x "
3277 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3278 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
3279 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3280 
3281 	/*
3282 	 * kick ring
3283 	 */
3284 	s_id = tx->sta_id;
3285 
3286 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3287 	    tfd_offset[ring->desc_cur].val =
3288 	    (8 + len) | (s_id << 12);
3289 	if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3290 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3291 		    tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3292 		    (8 + len) | (s_id << 12);
3293 	}
3294 
3295 	IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3296 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3297 
3298 	ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3299 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3300 
3301 	mutex_exit(&sc->sc_tx_lock);
3302 	freemsg(m);
3303 
3304 	/*
3305 	 * release node reference
3306 	 */
3307 	ieee80211_free_node(in);
3308 
3309 	ic->ic_stats.is_tx_bytes += len;
3310 	ic->ic_stats.is_tx_frags++;
3311 
3312 	mutex_enter(&sc->sc_mt_lock);
3313 	if (0 == sc->sc_tx_timer) {
3314 		sc->sc_tx_timer = 4;
3315 	}
3316 	mutex_exit(&sc->sc_mt_lock);
3317 
3318 exit:
3319 	return (err);
3320 }
3321 
3322 /*
3323  * invoked by GLD to deal with IOCTL affaires
3324  */
3325 static void
iwp_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3326 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3327 {
3328 	iwp_sc_t	*sc;
3329 	ieee80211com_t	*ic;
3330 	int		err = EINVAL;
3331 
3332 	if (NULL == arg) {
3333 		return;
3334 	}
3335 	sc = (iwp_sc_t *)arg;
3336 	ic = &sc->sc_ic;
3337 
3338 	err = ieee80211_ioctl(ic, wq, mp);
3339 	if (ENETRESET == err) {
3340 		/*
3341 		 * This is special for the hidden AP connection.
3342 		 * In any case, we should make sure only one 'scan'
3343 		 * in the driver for a 'connect' CLI command. So
3344 		 * when connecting to a hidden AP, the scan is just
3345 		 * sent out to the air when we know the desired
3346 		 * essid of the AP we want to connect.
3347 		 */
3348 		if (ic->ic_des_esslen) {
3349 			if (sc->sc_flags & IWP_F_RUNNING) {
3350 				iwp_m_stop(sc);
3351 				(void) iwp_m_start(sc);
3352 				(void) ieee80211_new_state(ic,
3353 				    IEEE80211_S_SCAN, -1);
3354 			}
3355 		}
3356 	}
3357 }
3358 
3359 /*
3360  * Call back functions for get/set proporty
3361  */
3362 static int
iwp_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)3363 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3364     uint_t wldp_length, void *wldp_buf)
3365 {
3366 	iwp_sc_t	*sc;
3367 	int		err = EINVAL;
3368 
3369 	if (NULL == arg) {
3370 		return (EINVAL);
3371 	}
3372 	sc = (iwp_sc_t *)arg;
3373 
3374 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3375 	    wldp_length, wldp_buf);
3376 
3377 	return (err);
3378 }
3379 
3380 static void
iwp_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t prh)3381 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3382     mac_prop_info_handle_t prh)
3383 {
3384 	iwp_sc_t	*sc;
3385 
3386 	sc = (iwp_sc_t *)arg;
3387 	ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3388 }
3389 
3390 static int
iwp_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)3391 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3392     uint_t wldp_length, const void *wldp_buf)
3393 {
3394 	iwp_sc_t		*sc;
3395 	ieee80211com_t		*ic;
3396 	int			err = EINVAL;
3397 
3398 	if (NULL == arg) {
3399 		return (EINVAL);
3400 	}
3401 	sc = (iwp_sc_t *)arg;
3402 	ic = &sc->sc_ic;
3403 
3404 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3405 	    wldp_buf);
3406 
3407 	if (err == ENETRESET) {
3408 		if (ic->ic_des_esslen) {
3409 			if (sc->sc_flags & IWP_F_RUNNING) {
3410 				iwp_m_stop(sc);
3411 				(void) iwp_m_start(sc);
3412 				(void) ieee80211_new_state(ic,
3413 				    IEEE80211_S_SCAN, -1);
3414 			}
3415 		}
3416 		err = 0;
3417 	}
3418 	return (err);
3419 }
3420 
3421 /*
3422  * invoked by GLD supply statistics NIC and driver
3423  */
3424 static int
iwp_m_stat(void * arg,uint_t stat,uint64_t * val)3425 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3426 {
3427 	iwp_sc_t	*sc;
3428 	ieee80211com_t	*ic;
3429 	ieee80211_node_t *in;
3430 
3431 	if (NULL == arg) {
3432 		return (EINVAL);
3433 	}
3434 	sc = (iwp_sc_t *)arg;
3435 	ic = &sc->sc_ic;
3436 
3437 	mutex_enter(&sc->sc_glock);
3438 
3439 	switch (stat) {
3440 	case MAC_STAT_IFSPEED:
3441 		in = ic->ic_bss;
3442 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3443 		    IEEE80211_RATE(in->in_txrate) :
3444 		    ic->ic_fixed_rate) / 2 * 1000000;
3445 		break;
3446 	case MAC_STAT_NOXMTBUF:
3447 		*val = sc->sc_tx_nobuf;
3448 		break;
3449 	case MAC_STAT_NORCVBUF:
3450 		*val = sc->sc_rx_nobuf;
3451 		break;
3452 	case MAC_STAT_IERRORS:
3453 		*val = sc->sc_rx_err;
3454 		break;
3455 	case MAC_STAT_RBYTES:
3456 		*val = ic->ic_stats.is_rx_bytes;
3457 		break;
3458 	case MAC_STAT_IPACKETS:
3459 		*val = ic->ic_stats.is_rx_frags;
3460 		break;
3461 	case MAC_STAT_OBYTES:
3462 		*val = ic->ic_stats.is_tx_bytes;
3463 		break;
3464 	case MAC_STAT_OPACKETS:
3465 		*val = ic->ic_stats.is_tx_frags;
3466 		break;
3467 	case MAC_STAT_OERRORS:
3468 	case WIFI_STAT_TX_FAILED:
3469 		*val = sc->sc_tx_err;
3470 		break;
3471 	case WIFI_STAT_TX_RETRANS:
3472 		*val = sc->sc_tx_retries;
3473 		break;
3474 	case WIFI_STAT_FCS_ERRORS:
3475 	case WIFI_STAT_WEP_ERRORS:
3476 	case WIFI_STAT_TX_FRAGS:
3477 	case WIFI_STAT_MCAST_TX:
3478 	case WIFI_STAT_RTS_SUCCESS:
3479 	case WIFI_STAT_RTS_FAILURE:
3480 	case WIFI_STAT_ACK_FAILURE:
3481 	case WIFI_STAT_RX_FRAGS:
3482 	case WIFI_STAT_MCAST_RX:
3483 	case WIFI_STAT_RX_DUPS:
3484 		mutex_exit(&sc->sc_glock);
3485 		return (ieee80211_stat(ic, stat, val));
3486 	default:
3487 		mutex_exit(&sc->sc_glock);
3488 		return (ENOTSUP);
3489 	}
3490 
3491 	mutex_exit(&sc->sc_glock);
3492 
3493 	return (IWP_SUCCESS);
3494 
3495 }
3496 
3497 /*
3498  * invoked by GLD to start or open NIC
3499  */
3500 static int
iwp_m_start(void * arg)3501 iwp_m_start(void *arg)
3502 {
3503 	iwp_sc_t *sc;
3504 	ieee80211com_t	*ic;
3505 	int err = IWP_FAIL;
3506 
3507 	if (NULL == arg) {
3508 		return (EINVAL);
3509 	}
3510 	sc = (iwp_sc_t *)arg;
3511 	ic = &sc->sc_ic;
3512 
3513 	err = iwp_init(sc);
3514 	if (err != IWP_SUCCESS) {
3515 		/*
3516 		 * The hw init err(eg. RF is OFF). Return Success to make
3517 		 * the 'plumb' succeed. The iwp_thread() tries to re-init
3518 		 * background.
3519 		 */
3520 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3521 		return (IWP_SUCCESS);
3522 	}
3523 
3524 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3525 
3526 	atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3527 
3528 	return (IWP_SUCCESS);
3529 }
3530 
3531 /*
3532  * invoked by GLD to stop or down NIC
3533  */
3534 static void
iwp_m_stop(void * arg)3535 iwp_m_stop(void *arg)
3536 {
3537 	iwp_sc_t *sc;
3538 	ieee80211com_t	*ic;
3539 
3540 	if (NULL == arg) {
3541 		return;
3542 	}
3543 	sc = (iwp_sc_t *)arg;
3544 	ic = &sc->sc_ic;
3545 
3546 	iwp_stop(sc);
3547 
3548 	/*
3549 	 * release buffer for calibration
3550 	 */
3551 	iwp_release_calib_buffer(sc);
3552 
3553 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3554 
3555 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3556 	atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3557 
3558 	atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3559 	atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3560 }
3561 
3562 /*
3563  * invoked by GLD to configure NIC
3564  */
3565 static int
iwp_m_unicst(void * arg,const uint8_t * macaddr)3566 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3567 {
3568 	iwp_sc_t *sc;
3569 	ieee80211com_t	*ic;
3570 	int err = IWP_SUCCESS;
3571 
3572 	if (NULL == arg) {
3573 		return (EINVAL);
3574 	}
3575 	sc = (iwp_sc_t *)arg;
3576 	ic = &sc->sc_ic;
3577 
3578 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3579 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3580 		mutex_enter(&sc->sc_glock);
3581 		err = iwp_config(sc);
3582 		mutex_exit(&sc->sc_glock);
3583 		if (err != IWP_SUCCESS) {
3584 			cmn_err(CE_WARN, "iwp_m_unicst(): "
3585 			    "failed to configure device\n");
3586 			goto fail;
3587 		}
3588 	}
3589 
3590 	return (err);
3591 
3592 fail:
3593 	return (err);
3594 }
3595 
3596 /* ARGSUSED */
3597 static int
iwp_m_multicst(void * arg,boolean_t add,const uint8_t * m)3598 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3599 {
3600 	return (IWP_SUCCESS);
3601 }
3602 
3603 /* ARGSUSED */
3604 static int
iwp_m_promisc(void * arg,boolean_t on)3605 iwp_m_promisc(void *arg, boolean_t on)
3606 {
3607 	return (IWP_SUCCESS);
3608 }
3609 
3610 /*
3611  * kernel thread to deal with exceptional situation
3612  */
3613 static void
iwp_thread(iwp_sc_t * sc)3614 iwp_thread(iwp_sc_t *sc)
3615 {
3616 	ieee80211com_t	*ic = &sc->sc_ic;
3617 	clock_t clk;
3618 	int err, n = 0, timeout = 0;
3619 	uint32_t tmp;
3620 #ifdef	DEBUG
3621 	int times = 0;
3622 #endif
3623 
3624 	while (sc->sc_mf_thread_switch) {
3625 		tmp = IWP_READ(sc, CSR_GP_CNTRL);
3626 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3627 			atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3628 		} else {
3629 			atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3630 		}
3631 
3632 		/*
3633 		 * If  in SUSPEND or the RF is OFF, do nothing.
3634 		 */
3635 		if (sc->sc_flags & IWP_F_RADIO_OFF) {
3636 			delay(drv_usectohz(100000));
3637 			continue;
3638 		}
3639 
3640 		/*
3641 		 * recovery fatal error
3642 		 */
3643 		if (ic->ic_mach &&
3644 		    (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3645 
3646 			IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3647 			    "try to recover fatal hw error: %d\n", times++));
3648 
3649 			iwp_stop(sc);
3650 
3651 			if (IWP_CHK_FAST_RECOVER(sc)) {
3652 				/* save runtime configuration */
3653 				bcopy(&sc->sc_config, &sc->sc_config_save,
3654 				    sizeof (sc->sc_config));
3655 			} else {
3656 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3657 				delay(drv_usectohz(2000000 + n*500000));
3658 			}
3659 
3660 			err = iwp_init(sc);
3661 			if (err != IWP_SUCCESS) {
3662 				n++;
3663 				if (n < 20) {
3664 					continue;
3665 				}
3666 			}
3667 
3668 			n = 0;
3669 			if (!err) {
3670 				atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3671 			}
3672 
3673 
3674 			if (!IWP_CHK_FAST_RECOVER(sc) ||
3675 			    iwp_fast_recover(sc) != IWP_SUCCESS) {
3676 				atomic_and_32(&sc->sc_flags,
3677 				    ~IWP_F_HW_ERR_RECOVER);
3678 
3679 				delay(drv_usectohz(2000000));
3680 				if (sc->sc_ostate != IEEE80211_S_INIT) {
3681 					ieee80211_new_state(ic,
3682 					    IEEE80211_S_SCAN, 0);
3683 				}
3684 			}
3685 		}
3686 
3687 		if (ic->ic_mach &&
3688 		    (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3689 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3690 			    "wait for probe response\n"));
3691 
3692 			sc->sc_scan_pending--;
3693 			delay(drv_usectohz(200000));
3694 			ieee80211_next_scan(ic);
3695 		}
3696 
3697 		/*
3698 		 * rate ctl
3699 		 */
3700 		if (ic->ic_mach &&
3701 		    (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3702 			clk = ddi_get_lbolt();
3703 			if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3704 				iwp_amrr_timeout(sc);
3705 			}
3706 		}
3707 
3708 		delay(drv_usectohz(100000));
3709 
3710 		mutex_enter(&sc->sc_mt_lock);
3711 		if (sc->sc_tx_timer) {
3712 			timeout++;
3713 			if (10 == timeout) {
3714 				sc->sc_tx_timer--;
3715 				if (0 == sc->sc_tx_timer) {
3716 					atomic_or_32(&sc->sc_flags,
3717 					    IWP_F_HW_ERR_RECOVER);
3718 					sc->sc_ostate = IEEE80211_S_RUN;
3719 					IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3720 					    "try to recover from "
3721 					    "send fail\n"));
3722 				}
3723 				timeout = 0;
3724 			}
3725 		}
3726 		mutex_exit(&sc->sc_mt_lock);
3727 	}
3728 
3729 	mutex_enter(&sc->sc_mt_lock);
3730 	sc->sc_mf_thread = NULL;
3731 	cv_signal(&sc->sc_mt_cv);
3732 	mutex_exit(&sc->sc_mt_lock);
3733 }
3734 
3735 
3736 /*
3737  * Send a command to the ucode.
3738  */
3739 static int
iwp_cmd(iwp_sc_t * sc,int code,const void * buf,int size,int async)3740 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3741 {
3742 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3743 	iwp_tx_desc_t *desc;
3744 	iwp_cmd_t *cmd;
3745 
3746 	ASSERT(size <= sizeof (cmd->data));
3747 	ASSERT(mutex_owned(&sc->sc_glock));
3748 
3749 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3750 	    "code[%d]", code));
3751 	desc = ring->data[ring->cur].desc;
3752 	cmd = ring->data[ring->cur].cmd;
3753 
3754 	cmd->hdr.type = (uint8_t)code;
3755 	cmd->hdr.flags = 0;
3756 	cmd->hdr.qid = ring->qid;
3757 	cmd->hdr.idx = ring->cur;
3758 	(void) memcpy(cmd->data, buf, size);
3759 	(void) memset(desc, 0, sizeof (*desc));
3760 
3761 	desc->val0 = 1 << 24;
3762 	desc->pa[0].tb1_addr =
3763 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3764 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3765 
3766 	if (async) {
3767 		sc->sc_cmd_accum++;
3768 	}
3769 
3770 	/*
3771 	 * kick cmd ring XXX
3772 	 */
3773 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3774 	    tfd_offset[ring->cur].val = 8;
3775 	if (ring->cur < IWP_MAX_WIN_SIZE) {
3776 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3777 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3778 	}
3779 	ring->cur = (ring->cur + 1) % ring->count;
3780 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3781 
3782 	if (async) {
3783 		return (IWP_SUCCESS);
3784 	} else {
3785 		clock_t clk;
3786 
3787 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3788 		while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3789 			if (cv_timedwait(&sc->sc_cmd_cv,
3790 			    &sc->sc_glock, clk) < 0) {
3791 				break;
3792 			}
3793 		}
3794 
3795 		if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3796 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3797 			return (IWP_SUCCESS);
3798 		} else {
3799 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3800 			return (IWP_FAIL);
3801 		}
3802 	}
3803 }
3804 
3805 /*
3806  * require ucode seting led of NIC
3807  */
3808 static void
iwp_set_led(iwp_sc_t * sc,uint8_t id,uint8_t off,uint8_t on)3809 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3810 {
3811 	iwp_led_cmd_t led;
3812 
3813 	led.interval = LE_32(100000);	/* unit: 100ms */
3814 	led.id = id;
3815 	led.off = off;
3816 	led.on = on;
3817 
3818 	(void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3819 }
3820 
3821 /*
3822  * necessary setting to NIC before authentication
3823  */
3824 static int
iwp_hw_set_before_auth(iwp_sc_t * sc)3825 iwp_hw_set_before_auth(iwp_sc_t *sc)
3826 {
3827 	ieee80211com_t *ic = &sc->sc_ic;
3828 	ieee80211_node_t *in = ic->ic_bss;
3829 	int err = IWP_FAIL;
3830 
3831 	/*
3832 	 * update adapter's configuration according
3833 	 * the info of target AP
3834 	 */
3835 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3836 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3837 
3838 	sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3839 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3840 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3841 
3842 	if (IEEE80211_MODE_11B == ic->ic_curmode) {
3843 		sc->sc_config.cck_basic_rates  = 0x03;
3844 		sc->sc_config.ofdm_basic_rates = 0;
3845 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3846 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3847 		sc->sc_config.cck_basic_rates  = 0;
3848 		sc->sc_config.ofdm_basic_rates = 0x15;
3849 	} else { /* assume 802.11b/g */
3850 		sc->sc_config.cck_basic_rates  = 0x0f;
3851 		sc->sc_config.ofdm_basic_rates = 0xff;
3852 	}
3853 
3854 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3855 	    RXON_FLG_SHORT_SLOT_MSK);
3856 
3857 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3858 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3859 	} else {
3860 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3861 	}
3862 
3863 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3864 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3865 	} else {
3866 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3867 	}
3868 
3869 	IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3870 	    "config chan %d flags %x "
3871 	    "filter_flags %x  cck %x ofdm %x"
3872 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3873 	    LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3874 	    LE_32(sc->sc_config.filter_flags),
3875 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3876 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3877 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3878 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3879 
3880 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3881 	    sizeof (iwp_rxon_cmd_t), 1);
3882 	if (err != IWP_SUCCESS) {
3883 		cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3884 		    "failed to config chan%d\n", sc->sc_config.chan);
3885 		return (err);
3886 	}
3887 
3888 	/*
3889 	 * add default AP node
3890 	 */
3891 	err = iwp_add_ap_sta(sc);
3892 	if (err != IWP_SUCCESS) {
3893 		return (err);
3894 	}
3895 
3896 
3897 	return (err);
3898 }
3899 
3900 /*
3901  * Send a scan request(assembly scan cmd) to the firmware.
3902  */
3903 static int
iwp_scan(iwp_sc_t * sc)3904 iwp_scan(iwp_sc_t *sc)
3905 {
3906 	ieee80211com_t *ic = &sc->sc_ic;
3907 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3908 	iwp_tx_desc_t *desc;
3909 	iwp_tx_data_t *data;
3910 	iwp_cmd_t *cmd;
3911 	iwp_scan_hdr_t *hdr;
3912 	iwp_scan_chan_t chan;
3913 	struct ieee80211_frame *wh;
3914 	ieee80211_node_t *in = ic->ic_bss;
3915 	uint8_t essid[IEEE80211_NWID_LEN+1];
3916 	struct ieee80211_rateset *rs;
3917 	enum ieee80211_phymode mode;
3918 	uint8_t *frm;
3919 	int i, pktlen, nrates;
3920 
3921 	data = &ring->data[ring->cur];
3922 	desc = data->desc;
3923 	cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3924 
3925 	cmd->hdr.type = REPLY_SCAN_CMD;
3926 	cmd->hdr.flags = 0;
3927 	cmd->hdr.qid = ring->qid;
3928 	cmd->hdr.idx = ring->cur | 0x40;
3929 
3930 	hdr = (iwp_scan_hdr_t *)cmd->data;
3931 	(void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3932 	hdr->nchan = 1;
3933 	hdr->quiet_time = LE_16(50);
3934 	hdr->quiet_plcp_th = LE_16(1);
3935 
3936 	hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3937 	hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3938 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
3939 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3940 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3941 
3942 	hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3943 	hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3944 	hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3945 	hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3946 	hdr->tx_cmd.rate.r.rate_n_flags |=
3947 	    LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3948 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3949 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3950 
3951 	hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3952 	    RXON_FILTER_BCON_AWARE_MSK);
3953 
3954 	if (ic->ic_des_esslen) {
3955 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3956 		essid[ic->ic_des_esslen] = '\0';
3957 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3958 		    "directed scan %s\n", essid));
3959 
3960 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3961 		    ic->ic_des_esslen);
3962 	} else {
3963 		bzero(hdr->direct_scan[0].ssid,
3964 		    sizeof (hdr->direct_scan[0].ssid));
3965 	}
3966 
3967 	/*
3968 	 * a probe request frame is required after the REPLY_SCAN_CMD
3969 	 */
3970 	wh = (struct ieee80211_frame *)(hdr + 1);
3971 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3972 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3973 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3974 	(void) memset(wh->i_addr1, 0xff, 6);
3975 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3976 	(void) memset(wh->i_addr3, 0xff, 6);
3977 	*(uint16_t *)&wh->i_dur[0] = 0;
3978 	*(uint16_t *)&wh->i_seq[0] = 0;
3979 
3980 	frm = (uint8_t *)(wh + 1);
3981 
3982 	/*
3983 	 * essid IE
3984 	 */
3985 	if (in->in_esslen) {
3986 		bcopy(in->in_essid, essid, in->in_esslen);
3987 		essid[in->in_esslen] = '\0';
3988 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3989 		    "probe with ESSID %s\n",
3990 		    essid));
3991 	}
3992 	*frm++ = IEEE80211_ELEMID_SSID;
3993 	*frm++ = in->in_esslen;
3994 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3995 	frm += in->in_esslen;
3996 
3997 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3998 	rs = &ic->ic_sup_rates[mode];
3999 
4000 	/*
4001 	 * supported rates IE
4002 	 */
4003 	*frm++ = IEEE80211_ELEMID_RATES;
4004 	nrates = rs->ir_nrates;
4005 	if (nrates > IEEE80211_RATE_SIZE) {
4006 		nrates = IEEE80211_RATE_SIZE;
4007 	}
4008 
4009 	*frm++ = (uint8_t)nrates;
4010 	(void) memcpy(frm, rs->ir_rates, nrates);
4011 	frm += nrates;
4012 
4013 	/*
4014 	 * supported xrates IE
4015 	 */
4016 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4017 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4018 		*frm++ = IEEE80211_ELEMID_XRATES;
4019 		*frm++ = (uint8_t)nrates;
4020 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4021 		frm += nrates;
4022 	}
4023 
4024 	/*
4025 	 * optionnal IE (usually for wpa)
4026 	 */
4027 	if (ic->ic_opt_ie != NULL) {
4028 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4029 		frm += ic->ic_opt_ie_len;
4030 	}
4031 
4032 	/* setup length of probe request */
4033 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4034 	hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4035 	    LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4036 
4037 	/*
4038 	 * the attribute of the scan channels are required after the probe
4039 	 * request frame.
4040 	 */
4041 	for (i = 1; i <= hdr->nchan; i++) {
4042 		if (ic->ic_des_esslen) {
4043 			chan.type = LE_32(3);
4044 		} else {
4045 			chan.type = LE_32(1);
4046 		}
4047 
4048 		chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4049 		chan.tpc.tx_gain = 0x28;
4050 		chan.tpc.dsp_atten = 110;
4051 		chan.active_dwell = LE_16(50);
4052 		chan.passive_dwell = LE_16(120);
4053 
4054 		bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4055 		frm += sizeof (iwp_scan_chan_t);
4056 	}
4057 
4058 	pktlen = _PTRDIFF(frm, cmd);
4059 
4060 	(void) memset(desc, 0, sizeof (*desc));
4061 	desc->val0 = 1 << 24;
4062 	desc->pa[0].tb1_addr =
4063 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4064 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4065 
4066 	/*
4067 	 * maybe for cmd, filling the byte cnt table is not necessary.
4068 	 * anyway, we fill it here.
4069 	 */
4070 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4071 	    .tfd_offset[ring->cur].val = 8;
4072 	if (ring->cur < IWP_MAX_WIN_SIZE) {
4073 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4074 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4075 	}
4076 
4077 	/*
4078 	 * kick cmd ring
4079 	 */
4080 	ring->cur = (ring->cur + 1) % ring->count;
4081 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4082 
4083 	return (IWP_SUCCESS);
4084 }
4085 
4086 /*
4087  * configure NIC by using ucode commands after loading ucode.
4088  */
4089 static int
iwp_config(iwp_sc_t * sc)4090 iwp_config(iwp_sc_t *sc)
4091 {
4092 	ieee80211com_t *ic = &sc->sc_ic;
4093 	iwp_powertable_cmd_t powertable;
4094 	iwp_bt_cmd_t bt;
4095 	iwp_add_sta_t node;
4096 	iwp_rem_sta_t	rm_sta;
4097 	const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4098 	int err = IWP_FAIL;
4099 
4100 	/*
4101 	 * set power mode. Disable power management at present, do it later
4102 	 */
4103 	(void) memset(&powertable, 0, sizeof (powertable));
4104 	powertable.flags = LE_16(0x8);
4105 	err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4106 	    sizeof (powertable), 0);
4107 	if (err != IWP_SUCCESS) {
4108 		cmn_err(CE_WARN, "iwp_config(): "
4109 		    "failed to set power mode\n");
4110 		return (err);
4111 	}
4112 
4113 	/*
4114 	 * configure bt coexistence
4115 	 */
4116 	(void) memset(&bt, 0, sizeof (bt));
4117 	bt.flags = 3;
4118 	bt.lead_time = 0xaa;
4119 	bt.max_kill = 1;
4120 	err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4121 	    sizeof (bt), 0);
4122 	if (err != IWP_SUCCESS) {
4123 		cmn_err(CE_WARN, "iwp_config(): "
4124 		    "failed to configurate bt coexistence\n");
4125 		return (err);
4126 	}
4127 
4128 	/*
4129 	 * configure rxon
4130 	 */
4131 	(void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4132 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4133 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4134 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4135 	sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4136 	sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4137 	    RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4138 
4139 	switch (ic->ic_opmode) {
4140 	case IEEE80211_M_STA:
4141 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4142 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4143 		    RXON_FILTER_DIS_DECRYPT_MSK |
4144 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4145 		break;
4146 	case IEEE80211_M_IBSS:
4147 	case IEEE80211_M_AHDEMO:
4148 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4149 
4150 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4151 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4152 		    RXON_FILTER_DIS_DECRYPT_MSK |
4153 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4154 		break;
4155 	case IEEE80211_M_HOSTAP:
4156 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4157 		break;
4158 	case IEEE80211_M_MONITOR:
4159 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4160 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4161 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4162 		break;
4163 	}
4164 
4165 	/*
4166 	 * Support all CCK rates.
4167 	 */
4168 	sc->sc_config.cck_basic_rates  = 0x0f;
4169 
4170 	/*
4171 	 * Support all OFDM rates.
4172 	 */
4173 	sc->sc_config.ofdm_basic_rates = 0xff;
4174 
4175 	sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4176 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
4177 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4178 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4179 
4180 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4181 	    sizeof (iwp_rxon_cmd_t), 0);
4182 	if (err != IWP_SUCCESS) {
4183 		cmn_err(CE_WARN, "iwp_config(): "
4184 		    "failed to set configure command\n");
4185 		return (err);
4186 	}
4187 
4188 	/*
4189 	 * remove all nodes in NIC
4190 	 */
4191 	(void) memset(&rm_sta, 0, sizeof (rm_sta));
4192 	rm_sta.num_sta = 1;
4193 	(void) memcpy(rm_sta.addr, bcast, 6);
4194 
4195 	err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4196 	if (err != IWP_SUCCESS) {
4197 		cmn_err(CE_WARN, "iwp_config(): "
4198 		    "failed to remove broadcast node in hardware.\n");
4199 		return (err);
4200 	}
4201 
4202 	/*
4203 	 * add broadcast node so that we can send broadcast frame
4204 	 */
4205 	(void) memset(&node, 0, sizeof (node));
4206 	(void) memset(node.sta.addr, 0xff, 6);
4207 	node.mode = 0;
4208 	node.sta.sta_id = IWP_BROADCAST_ID;
4209 	node.station_flags = 0;
4210 
4211 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4212 	if (err != IWP_SUCCESS) {
4213 		cmn_err(CE_WARN, "iwp_config(): "
4214 		    "failed to add broadcast node\n");
4215 		return (err);
4216 	}
4217 
4218 	return (err);
4219 }
4220 
4221 /*
4222  * quiesce(9E) entry point.
4223  * This function is called when the system is single-threaded at high
4224  * PIL with preemption disabled. Therefore, this function must not be
4225  * blocked.
4226  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4227  * DDI_FAILURE indicates an error condition and should almost never happen.
4228  */
4229 static int
iwp_quiesce(dev_info_t * dip)4230 iwp_quiesce(dev_info_t *dip)
4231 {
4232 	iwp_sc_t *sc;
4233 
4234 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4235 	if (NULL == sc) {
4236 		return (DDI_FAILURE);
4237 	}
4238 
4239 #ifdef DEBUG
4240 	/* by pass any messages, if it's quiesce */
4241 	iwp_dbg_flags = 0;
4242 #endif
4243 
4244 	/*
4245 	 * No more blocking is allowed while we are in the
4246 	 * quiesce(9E) entry point.
4247 	 */
4248 	atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4249 
4250 	/*
4251 	 * Disable and mask all interrupts.
4252 	 */
4253 	iwp_stop(sc);
4254 
4255 	return (DDI_SUCCESS);
4256 }
4257 
4258 static void
iwp_stop_master(iwp_sc_t * sc)4259 iwp_stop_master(iwp_sc_t *sc)
4260 {
4261 	uint32_t tmp;
4262 	int n;
4263 
4264 	tmp = IWP_READ(sc, CSR_RESET);
4265 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4266 
4267 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4268 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4269 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4270 		return;
4271 	}
4272 
4273 	for (n = 0; n < 2000; n++) {
4274 		if (IWP_READ(sc, CSR_RESET) &
4275 		    CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4276 			break;
4277 		}
4278 		DELAY(1000);
4279 	}
4280 
4281 #ifdef	DEBUG
4282 	if (2000 == n) {
4283 		IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4284 		    "timeout waiting for master stop\n"));
4285 	}
4286 #endif
4287 }
4288 
4289 static int
iwp_power_up(iwp_sc_t * sc)4290 iwp_power_up(iwp_sc_t *sc)
4291 {
4292 	uint32_t tmp;
4293 
4294 	iwp_mac_access_enter(sc);
4295 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4296 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4297 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4298 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4299 	iwp_mac_access_exit(sc);
4300 
4301 	DELAY(5000);
4302 	return (IWP_SUCCESS);
4303 }
4304 
4305 /*
4306  * hardware initialization
4307  */
4308 static int
iwp_preinit(iwp_sc_t * sc)4309 iwp_preinit(iwp_sc_t *sc)
4310 {
4311 	int		n;
4312 	uint8_t		vlink;
4313 	uint16_t	radio_cfg;
4314 	uint32_t	tmp;
4315 
4316 	/*
4317 	 * clear any pending interrupts
4318 	 */
4319 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
4320 
4321 	tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4322 	IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4323 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4324 
4325 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4326 	IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4327 
4328 	/*
4329 	 * wait for clock ready
4330 	 */
4331 	for (n = 0; n < 1000; n++) {
4332 		if (IWP_READ(sc, CSR_GP_CNTRL) &
4333 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4334 			break;
4335 		}
4336 		DELAY(10);
4337 	}
4338 
4339 	if (1000 == n) {
4340 		return (ETIMEDOUT);
4341 	}
4342 
4343 	iwp_mac_access_enter(sc);
4344 
4345 	iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4346 
4347 	DELAY(20);
4348 	tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4349 	iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4350 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4351 	iwp_mac_access_exit(sc);
4352 
4353 	radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4354 	if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4355 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4356 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4357 		    tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4358 		    SP_RADIO_STEP_MSK(radio_cfg) |
4359 		    SP_RADIO_DASH_MSK(radio_cfg));
4360 	} else {
4361 		cmn_err(CE_WARN, "iwp_preinit(): "
4362 		    "radio configuration information in eeprom is wrong\n");
4363 		return (IWP_FAIL);
4364 	}
4365 
4366 
4367 	IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4368 
4369 	(void) iwp_power_up(sc);
4370 
4371 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4372 		tmp = ddi_get32(sc->sc_cfg_handle,
4373 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
4374 		ddi_put32(sc->sc_cfg_handle,
4375 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
4376 		    tmp & ~(1 << 11));
4377 	}
4378 
4379 	vlink = ddi_get8(sc->sc_cfg_handle,
4380 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
4381 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4382 	    vlink & ~2);
4383 
4384 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4385 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4386 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4387 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4388 
4389 	/*
4390 	 * make sure power supply on each part of the hardware
4391 	 */
4392 	iwp_mac_access_enter(sc);
4393 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4394 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4395 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4396 	DELAY(5);
4397 
4398 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4399 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4400 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4401 	iwp_mac_access_exit(sc);
4402 
4403 	if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4404 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4405 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4406 	}
4407 
4408 	if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4409 
4410 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4411 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4412 	}
4413 
4414 	return (IWP_SUCCESS);
4415 }
4416 
4417 /*
4418  * set up semphore flag to own EEPROM
4419  */
4420 static int
iwp_eep_sem_down(iwp_sc_t * sc)4421 iwp_eep_sem_down(iwp_sc_t *sc)
4422 {
4423 	int count1, count2;
4424 	uint32_t tmp;
4425 
4426 	for (count1 = 0; count1 < 1000; count1++) {
4427 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4428 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4429 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4430 
4431 		for (count2 = 0; count2 < 2; count2++) {
4432 			if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4433 			    CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4434 				return (IWP_SUCCESS);
4435 			}
4436 			DELAY(10000);
4437 		}
4438 	}
4439 	return (IWP_FAIL);
4440 }
4441 
4442 /*
4443  * reset semphore flag to release EEPROM
4444  */
4445 static void
iwp_eep_sem_up(iwp_sc_t * sc)4446 iwp_eep_sem_up(iwp_sc_t *sc)
4447 {
4448 	uint32_t tmp;
4449 
4450 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4451 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4452 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4453 }
4454 
4455 /*
4456  * This function read all infomation from eeprom
4457  */
4458 static int
iwp_eep_load(iwp_sc_t * sc)4459 iwp_eep_load(iwp_sc_t *sc)
4460 {
4461 	int i, rr;
4462 	uint32_t rv, tmp, eep_gp;
4463 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4464 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4465 
4466 	/*
4467 	 * read eeprom gp register in CSR
4468 	 */
4469 	eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4470 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4471 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4472 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4473 		    "not find eeprom\n"));
4474 		return (IWP_FAIL);
4475 	}
4476 
4477 	rr = iwp_eep_sem_down(sc);
4478 	if (rr != 0) {
4479 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4480 		    "driver failed to own EEPROM\n"));
4481 		return (IWP_FAIL);
4482 	}
4483 
4484 	for (addr = 0; addr < eep_sz; addr += 2) {
4485 		IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4486 		tmp = IWP_READ(sc, CSR_EEPROM_REG);
4487 		IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4488 
4489 		for (i = 0; i < 10; i++) {
4490 			rv = IWP_READ(sc, CSR_EEPROM_REG);
4491 			if (rv & 1) {
4492 				break;
4493 			}
4494 			DELAY(10);
4495 		}
4496 
4497 		if (!(rv & 1)) {
4498 			IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4499 			    "time out when read eeprome\n"));
4500 			iwp_eep_sem_up(sc);
4501 			return (IWP_FAIL);
4502 		}
4503 
4504 		eep_p[addr/2] = LE_16(rv >> 16);
4505 	}
4506 
4507 	iwp_eep_sem_up(sc);
4508 	return (IWP_SUCCESS);
4509 }
4510 
4511 /*
4512  * initialize mac address in ieee80211com_t struct
4513  */
4514 static void
iwp_get_mac_from_eep(iwp_sc_t * sc)4515 iwp_get_mac_from_eep(iwp_sc_t *sc)
4516 {
4517 	ieee80211com_t *ic = &sc->sc_ic;
4518 
4519 	IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4520 
4521 	IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4522 	    "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4523 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4524 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4525 }
4526 
4527 /*
4528  * main initialization function
4529  */
4530 static int
iwp_init(iwp_sc_t * sc)4531 iwp_init(iwp_sc_t *sc)
4532 {
4533 	int err = IWP_FAIL;
4534 	clock_t clk;
4535 
4536 	/*
4537 	 * release buffer for calibration
4538 	 */
4539 	iwp_release_calib_buffer(sc);
4540 
4541 	mutex_enter(&sc->sc_glock);
4542 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4543 
4544 	err = iwp_init_common(sc);
4545 	if (err != IWP_SUCCESS) {
4546 		mutex_exit(&sc->sc_glock);
4547 		return (IWP_FAIL);
4548 	}
4549 
4550 	/*
4551 	 * backup ucode data part for future use.
4552 	 */
4553 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4554 	    sc->sc_dma_fw_data.mem_va,
4555 	    sc->sc_dma_fw_data.alength);
4556 
4557 	/* load firmware init segment into NIC */
4558 	err = iwp_load_init_firmware(sc);
4559 	if (err != IWP_SUCCESS) {
4560 		cmn_err(CE_WARN, "iwp_init(): "
4561 		    "failed to setup init firmware\n");
4562 		mutex_exit(&sc->sc_glock);
4563 		return (IWP_FAIL);
4564 	}
4565 
4566 	/*
4567 	 * now press "execute" start running
4568 	 */
4569 	IWP_WRITE(sc, CSR_RESET, 0);
4570 
4571 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4572 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4573 		if (cv_timedwait(&sc->sc_ucode_cv,
4574 		    &sc->sc_glock, clk) < 0) {
4575 			break;
4576 		}
4577 	}
4578 
4579 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4580 		cmn_err(CE_WARN, "iwp_init(): "
4581 		    "failed to process init alive.\n");
4582 		mutex_exit(&sc->sc_glock);
4583 		return (IWP_FAIL);
4584 	}
4585 
4586 	mutex_exit(&sc->sc_glock);
4587 
4588 	/*
4589 	 * stop chipset for initializing chipset again
4590 	 */
4591 	iwp_stop(sc);
4592 
4593 	mutex_enter(&sc->sc_glock);
4594 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4595 
4596 	err = iwp_init_common(sc);
4597 	if (err != IWP_SUCCESS) {
4598 		mutex_exit(&sc->sc_glock);
4599 		return (IWP_FAIL);
4600 	}
4601 
4602 	/*
4603 	 * load firmware run segment into NIC
4604 	 */
4605 	err = iwp_load_run_firmware(sc);
4606 	if (err != IWP_SUCCESS) {
4607 		cmn_err(CE_WARN, "iwp_init(): "
4608 		    "failed to setup run firmware\n");
4609 		mutex_exit(&sc->sc_glock);
4610 		return (IWP_FAIL);
4611 	}
4612 
4613 	/*
4614 	 * now press "execute" start running
4615 	 */
4616 	IWP_WRITE(sc, CSR_RESET, 0);
4617 
4618 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4619 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4620 		if (cv_timedwait(&sc->sc_ucode_cv,
4621 		    &sc->sc_glock, clk) < 0) {
4622 			break;
4623 		}
4624 	}
4625 
4626 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4627 		cmn_err(CE_WARN, "iwp_init(): "
4628 		    "failed to process runtime alive.\n");
4629 		mutex_exit(&sc->sc_glock);
4630 		return (IWP_FAIL);
4631 	}
4632 
4633 	mutex_exit(&sc->sc_glock);
4634 
4635 	DELAY(1000);
4636 
4637 	mutex_enter(&sc->sc_glock);
4638 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4639 
4640 	/*
4641 	 * at this point, the firmware is loaded OK, then config the hardware
4642 	 * with the ucode API, including rxon, txpower, etc.
4643 	 */
4644 	err = iwp_config(sc);
4645 	if (err) {
4646 		cmn_err(CE_WARN, "iwp_init(): "
4647 		    "failed to configure device\n");
4648 		mutex_exit(&sc->sc_glock);
4649 		return (IWP_FAIL);
4650 	}
4651 
4652 	/*
4653 	 * at this point, hardware may receive beacons :)
4654 	 */
4655 	mutex_exit(&sc->sc_glock);
4656 	return (IWP_SUCCESS);
4657 }
4658 
4659 /*
4660  * stop or disable NIC
4661  */
4662 static void
iwp_stop(iwp_sc_t * sc)4663 iwp_stop(iwp_sc_t *sc)
4664 {
4665 	uint32_t tmp;
4666 	int i;
4667 
4668 	/* by pass if it's quiesced */
4669 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4670 		mutex_enter(&sc->sc_glock);
4671 	}
4672 
4673 	IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4674 	/*
4675 	 * disable interrupts
4676 	 */
4677 	IWP_WRITE(sc, CSR_INT_MASK, 0);
4678 	IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4679 	IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4680 
4681 	/*
4682 	 * reset all Tx rings
4683 	 */
4684 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4685 		iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4686 	}
4687 
4688 	/*
4689 	 * reset Rx ring
4690 	 */
4691 	iwp_reset_rx_ring(sc);
4692 
4693 	iwp_mac_access_enter(sc);
4694 	iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4695 	iwp_mac_access_exit(sc);
4696 
4697 	DELAY(5);
4698 
4699 	iwp_stop_master(sc);
4700 
4701 	mutex_enter(&sc->sc_mt_lock);
4702 	sc->sc_tx_timer = 0;
4703 	mutex_exit(&sc->sc_mt_lock);
4704 
4705 	tmp = IWP_READ(sc, CSR_RESET);
4706 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4707 
4708 	/* by pass if it's quiesced */
4709 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4710 		mutex_exit(&sc->sc_glock);
4711 	}
4712 }
4713 
4714 /*
4715  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4716  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4717  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4718  * INRIA Sophia - Projet Planete
4719  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4720  */
4721 #define	is_success(amrr)	\
4722 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4723 #define	is_failure(amrr)	\
4724 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4725 #define	is_enough(amrr)		\
4726 	((amrr)->txcnt > 200)
4727 #define	not_very_few(amrr)	\
4728 	((amrr)->txcnt > 40)
4729 #define	is_min_rate(in)		\
4730 	(0 == (in)->in_txrate)
4731 #define	is_max_rate(in)		\
4732 	((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4733 #define	increase_rate(in)	\
4734 	((in)->in_txrate++)
4735 #define	decrease_rate(in)	\
4736 	((in)->in_txrate--)
4737 #define	reset_cnt(amrr)		\
4738 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4739 
4740 #define	IWP_AMRR_MIN_SUCCESS_THRESHOLD	 1
4741 #define	IWP_AMRR_MAX_SUCCESS_THRESHOLD	15
4742 
4743 static void
iwp_amrr_init(iwp_amrr_t * amrr)4744 iwp_amrr_init(iwp_amrr_t *amrr)
4745 {
4746 	amrr->success = 0;
4747 	amrr->recovery = 0;
4748 	amrr->txcnt = amrr->retrycnt = 0;
4749 	amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4750 }
4751 
4752 static void
iwp_amrr_timeout(iwp_sc_t * sc)4753 iwp_amrr_timeout(iwp_sc_t *sc)
4754 {
4755 	ieee80211com_t *ic = &sc->sc_ic;
4756 
4757 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4758 	    "enter\n"));
4759 
4760 	if (IEEE80211_M_STA == ic->ic_opmode) {
4761 		iwp_amrr_ratectl(NULL, ic->ic_bss);
4762 	} else {
4763 		ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4764 	}
4765 
4766 	sc->sc_clk = ddi_get_lbolt();
4767 }
4768 
4769 /* ARGSUSED */
4770 static void
iwp_amrr_ratectl(void * arg,ieee80211_node_t * in)4771 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4772 {
4773 	iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4774 	int need_change = 0;
4775 
4776 	if (is_success(amrr) && is_enough(amrr)) {
4777 		amrr->success++;
4778 		if (amrr->success >= amrr->success_threshold &&
4779 		    !is_max_rate(in)) {
4780 			amrr->recovery = 1;
4781 			amrr->success = 0;
4782 			increase_rate(in);
4783 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4784 			    "AMRR increasing rate %d "
4785 			    "(txcnt=%d retrycnt=%d)\n",
4786 			    in->in_txrate, amrr->txcnt,
4787 			    amrr->retrycnt));
4788 			need_change = 1;
4789 		} else {
4790 			amrr->recovery = 0;
4791 		}
4792 	} else if (not_very_few(amrr) && is_failure(amrr)) {
4793 		amrr->success = 0;
4794 		if (!is_min_rate(in)) {
4795 			if (amrr->recovery) {
4796 				amrr->success_threshold++;
4797 				if (amrr->success_threshold >
4798 				    IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4799 					amrr->success_threshold =
4800 					    IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4801 				}
4802 			} else {
4803 				amrr->success_threshold =
4804 				    IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4805 			}
4806 			decrease_rate(in);
4807 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4808 			    "AMRR decreasing rate %d "
4809 			    "(txcnt=%d retrycnt=%d)\n",
4810 			    in->in_txrate, amrr->txcnt,
4811 			    amrr->retrycnt));
4812 			need_change = 1;
4813 		}
4814 		amrr->recovery = 0;	/* paper is incorrect */
4815 	}
4816 
4817 	if (is_enough(amrr) || need_change) {
4818 		reset_cnt(amrr);
4819 	}
4820 }
4821 
4822 /*
4823  * translate indirect address in eeprom to direct address
4824  * in eeprom and return address of entry whos indirect address
4825  * is indi_addr
4826  */
4827 static uint8_t *
iwp_eep_addr_trans(iwp_sc_t * sc,uint32_t indi_addr)4828 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4829 {
4830 	uint32_t	di_addr;
4831 	uint16_t	temp;
4832 
4833 	if (!(indi_addr & INDIRECT_ADDRESS)) {
4834 		di_addr = indi_addr;
4835 		return (&sc->sc_eep_map[di_addr]);
4836 	}
4837 
4838 	switch (indi_addr & INDIRECT_TYPE_MSK) {
4839 	case INDIRECT_GENERAL:
4840 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4841 		break;
4842 	case	INDIRECT_HOST:
4843 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4844 		break;
4845 	case	INDIRECT_REGULATORY:
4846 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4847 		break;
4848 	case	INDIRECT_CALIBRATION:
4849 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4850 		break;
4851 	case	INDIRECT_PROCESS_ADJST:
4852 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4853 		break;
4854 	case	INDIRECT_OTHERS:
4855 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4856 		break;
4857 	default:
4858 		temp = 0;
4859 		cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4860 		    "incorrect indirect eeprom address.\n");
4861 		break;
4862 	}
4863 
4864 	di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4865 
4866 	return (&sc->sc_eep_map[di_addr]);
4867 }
4868 
4869 /*
4870  * loade a section of ucode into NIC
4871  */
4872 static int
iwp_put_seg_fw(iwp_sc_t * sc,uint32_t addr_s,uint32_t addr_d,uint32_t len)4873 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4874 {
4875 
4876 	iwp_mac_access_enter(sc);
4877 
4878 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4879 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4880 
4881 	IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4882 
4883 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4884 	    (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4885 
4886 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4887 
4888 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4889 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4890 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4891 	    IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4892 
4893 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4894 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4895 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4896 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4897 
4898 	iwp_mac_access_exit(sc);
4899 
4900 	return (IWP_SUCCESS);
4901 }
4902 
4903 /*
4904  * necessary setting during alive notification
4905  */
4906 static int
iwp_alive_common(iwp_sc_t * sc)4907 iwp_alive_common(iwp_sc_t *sc)
4908 {
4909 	uint32_t	base;
4910 	uint32_t	i;
4911 	iwp_wimax_coex_cmd_t	w_cmd;
4912 	iwp_calibration_crystal_cmd_t	c_cmd;
4913 	uint32_t	rv = IWP_FAIL;
4914 
4915 	/*
4916 	 * initialize SCD related registers to make TX work.
4917 	 */
4918 	iwp_mac_access_enter(sc);
4919 
4920 	/*
4921 	 * read sram address of data base.
4922 	 */
4923 	sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4924 
4925 	for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4926 	    base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4927 	    base += 4) {
4928 		iwp_mem_write(sc, base, 0);
4929 	}
4930 
4931 	for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4932 	    base += 4) {
4933 		iwp_mem_write(sc, base, 0);
4934 	}
4935 
4936 	for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4937 		iwp_mem_write(sc, base + i, 0);
4938 	}
4939 
4940 	iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4941 	    sc->sc_dma_sh.cookie.dmac_address >> 10);
4942 
4943 	iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4944 	    IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4945 
4946 	iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4947 
4948 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4949 		iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4950 		IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4951 		iwp_mem_write(sc, sc->sc_scd_base +
4952 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4953 		iwp_mem_write(sc, sc->sc_scd_base +
4954 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4955 		    sizeof (uint32_t),
4956 		    ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4957 		    IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4958 		    ((SCD_FRAME_LIMIT <<
4959 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4960 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4961 	}
4962 
4963 	iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4964 
4965 	iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4966 	    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4967 
4968 	IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4969 	iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4970 
4971 	/*
4972 	 * queue 0-7 map to FIFO 0-7 and
4973 	 * all queues work under FIFO mode(none-scheduler_ack)
4974 	 */
4975 	for (i = 0; i < 4; i++) {
4976 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4977 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4978 		    ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4979 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4980 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4981 	}
4982 
4983 	iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4984 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4985 	    (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4986 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4987 	    IWP_SCD_QUEUE_STTS_REG_MSK);
4988 
4989 	for (i = 5; i < 7; i++) {
4990 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4991 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4992 		    (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4993 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4994 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4995 	}
4996 
4997 	iwp_mac_access_exit(sc);
4998 
4999 	(void) memset(&w_cmd, 0, sizeof (w_cmd));
5000 
5001 	rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5002 	if (rv != IWP_SUCCESS) {
5003 		cmn_err(CE_WARN, "iwp_alive_common(): "
5004 		    "failed to send wimax coexist command.\n");
5005 		return (rv);
5006 	}
5007 
5008 	(void) memset(&c_cmd, 0, sizeof (c_cmd));
5009 
5010 	c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5011 	c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5012 	c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5013 
5014 	rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5015 	if (rv != IWP_SUCCESS) {
5016 		cmn_err(CE_WARN, "iwp_alive_common(): "
5017 		    "failed to send crystal frq calibration command.\n");
5018 		return (rv);
5019 	}
5020 
5021 	/*
5022 	 * make sure crystal frequency calibration ready
5023 	 * before next operations.
5024 	 */
5025 	DELAY(1000);
5026 
5027 	return (IWP_SUCCESS);
5028 }
5029 
5030 /*
5031  * save results of calibration from ucode
5032  */
5033 static void
iwp_save_calib_result(iwp_sc_t * sc,iwp_rx_desc_t * desc)5034 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5035 {
5036 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
5037 	struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5038 	int len = LE_32(desc->len);
5039 
5040 	/*
5041 	 * ensure the size of buffer is not too big
5042 	 */
5043 	len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5044 
5045 	switch (calib_hdr->op_code) {
5046 	case PHY_CALIBRATE_LO_CMD:
5047 		if (NULL == res_p->lo_res) {
5048 			res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5049 		}
5050 
5051 		if (NULL == res_p->lo_res) {
5052 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5053 			    "failed to allocate memory.\n");
5054 			return;
5055 		}
5056 
5057 		res_p->lo_res_len = len;
5058 		(void) memcpy(res_p->lo_res, calib_hdr, len);
5059 		break;
5060 	case PHY_CALIBRATE_TX_IQ_CMD:
5061 		if (NULL == res_p->tx_iq_res) {
5062 			res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5063 		}
5064 
5065 		if (NULL == res_p->tx_iq_res) {
5066 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5067 			    "failed to allocate memory.\n");
5068 			return;
5069 		}
5070 
5071 		res_p->tx_iq_res_len = len;
5072 		(void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5073 		break;
5074 	case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5075 		if (NULL == res_p->tx_iq_perd_res) {
5076 			res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5077 		}
5078 
5079 		if (NULL == res_p->tx_iq_perd_res) {
5080 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5081 			    "failed to allocate memory.\n");
5082 		}
5083 
5084 		res_p->tx_iq_perd_res_len = len;
5085 		(void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5086 		break;
5087 	case PHY_CALIBRATE_BASE_BAND_CMD:
5088 		if (NULL == res_p->base_band_res) {
5089 			res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5090 		}
5091 
5092 		if (NULL == res_p->base_band_res) {
5093 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5094 			    "failed to allocate memory.\n");
5095 		}
5096 
5097 		res_p->base_band_res_len = len;
5098 		(void) memcpy(res_p->base_band_res, calib_hdr, len);
5099 		break;
5100 	default:
5101 		cmn_err(CE_WARN, "iwp_save_calib_result(): "
5102 		    "incorrect calibration type(%d).\n", calib_hdr->op_code);
5103 		break;
5104 	}
5105 
5106 }
5107 
5108 static void
iwp_release_calib_buffer(iwp_sc_t * sc)5109 iwp_release_calib_buffer(iwp_sc_t *sc)
5110 {
5111 	if (sc->sc_calib_results.lo_res != NULL) {
5112 		kmem_free(sc->sc_calib_results.lo_res,
5113 		    sc->sc_calib_results.lo_res_len);
5114 		sc->sc_calib_results.lo_res = NULL;
5115 	}
5116 
5117 	if (sc->sc_calib_results.tx_iq_res != NULL) {
5118 		kmem_free(sc->sc_calib_results.tx_iq_res,
5119 		    sc->sc_calib_results.tx_iq_res_len);
5120 		sc->sc_calib_results.tx_iq_res = NULL;
5121 	}
5122 
5123 	if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5124 		kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5125 		    sc->sc_calib_results.tx_iq_perd_res_len);
5126 		sc->sc_calib_results.tx_iq_perd_res = NULL;
5127 	}
5128 
5129 	if (sc->sc_calib_results.base_band_res != NULL) {
5130 		kmem_free(sc->sc_calib_results.base_band_res,
5131 		    sc->sc_calib_results.base_band_res_len);
5132 		sc->sc_calib_results.base_band_res = NULL;
5133 	}
5134 
5135 }
5136 
5137 /*
5138  * common section of intialization
5139  */
5140 static int
iwp_init_common(iwp_sc_t * sc)5141 iwp_init_common(iwp_sc_t *sc)
5142 {
5143 	int32_t	qid;
5144 	uint32_t tmp;
5145 
5146 	(void) iwp_preinit(sc);
5147 
5148 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
5149 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5150 		cmn_err(CE_NOTE, "iwp_init_common(): "
5151 		    "radio transmitter is off\n");
5152 		return (IWP_FAIL);
5153 	}
5154 
5155 	/*
5156 	 * init Rx ring
5157 	 */
5158 	iwp_mac_access_enter(sc);
5159 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5160 
5161 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5162 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5163 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5164 
5165 	IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5166 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5167 	    offsetof(struct iwp_shared, val0)) >> 4));
5168 
5169 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5170 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5171 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5172 	    IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5173 	    (RX_QUEUE_SIZE_LOG <<
5174 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5175 	iwp_mac_access_exit(sc);
5176 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5177 	    (RX_QUEUE_SIZE - 1) & ~0x7);
5178 
5179 	/*
5180 	 * init Tx rings
5181 	 */
5182 	iwp_mac_access_enter(sc);
5183 	iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5184 
5185 	/*
5186 	 * keep warm page
5187 	 */
5188 	IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5189 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
5190 
5191 	for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5192 		IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5193 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5194 		IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5195 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5196 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5197 	}
5198 
5199 	iwp_mac_access_exit(sc);
5200 
5201 	/*
5202 	 * clear "radio off" and "disable command" bits
5203 	 */
5204 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5205 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5206 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5207 
5208 	/*
5209 	 * clear any pending interrupts
5210 	 */
5211 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
5212 
5213 	/*
5214 	 * enable interrupts
5215 	 */
5216 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5217 
5218 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5219 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5220 
5221 	return (IWP_SUCCESS);
5222 }
5223 
5224 static int
iwp_fast_recover(iwp_sc_t * sc)5225 iwp_fast_recover(iwp_sc_t *sc)
5226 {
5227 	ieee80211com_t *ic = &sc->sc_ic;
5228 	int err = IWP_FAIL;
5229 
5230 	mutex_enter(&sc->sc_glock);
5231 
5232 	/* restore runtime configuration */
5233 	bcopy(&sc->sc_config_save, &sc->sc_config,
5234 	    sizeof (sc->sc_config));
5235 
5236 	sc->sc_config.assoc_id = 0;
5237 	sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5238 
5239 	if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5240 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5241 		    "could not setup authentication\n");
5242 		mutex_exit(&sc->sc_glock);
5243 		return (err);
5244 	}
5245 
5246 	bcopy(&sc->sc_config_save, &sc->sc_config,
5247 	    sizeof (sc->sc_config));
5248 
5249 	/* update adapter's configuration */
5250 	err = iwp_run_state_config(sc);
5251 	if (err != IWP_SUCCESS) {
5252 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5253 		    "failed to setup association\n");
5254 		mutex_exit(&sc->sc_glock);
5255 		return (err);
5256 	}
5257 	/* set LED on */
5258 	iwp_set_led(sc, 2, 0, 1);
5259 
5260 	mutex_exit(&sc->sc_glock);
5261 
5262 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5263 
5264 	/* start queue */
5265 	IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5266 	    "resume xmit\n"));
5267 	mac_tx_update(ic->ic_mach);
5268 
5269 	return (IWP_SUCCESS);
5270 }
5271 
5272 static int
iwp_run_state_config(iwp_sc_t * sc)5273 iwp_run_state_config(iwp_sc_t *sc)
5274 {
5275 	struct ieee80211com *ic = &sc->sc_ic;
5276 	ieee80211_node_t *in = ic->ic_bss;
5277 	int err = IWP_FAIL;
5278 
5279 	/*
5280 	 * update adapter's configuration
5281 	 */
5282 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5283 
5284 	/*
5285 	 * short preamble/slot time are
5286 	 * negotiated when associating
5287 	 */
5288 	sc->sc_config.flags &=
5289 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5290 	    RXON_FLG_SHORT_SLOT_MSK);
5291 
5292 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5293 		sc->sc_config.flags |=
5294 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
5295 	}
5296 
5297 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5298 		sc->sc_config.flags |=
5299 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5300 	}
5301 
5302 	sc->sc_config.filter_flags |=
5303 	    LE_32(RXON_FILTER_ASSOC_MSK);
5304 
5305 	if (ic->ic_opmode != IEEE80211_M_STA) {
5306 		sc->sc_config.filter_flags |=
5307 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
5308 	}
5309 
5310 	IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5311 	    "config chan %d flags %x"
5312 	    " filter_flags %x\n",
5313 	    sc->sc_config.chan, sc->sc_config.flags,
5314 	    sc->sc_config.filter_flags));
5315 
5316 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5317 	    sizeof (iwp_rxon_cmd_t), 1);
5318 	if (err != IWP_SUCCESS) {
5319 		cmn_err(CE_WARN, "iwp_run_state_config(): "
5320 		    "could not update configuration\n");
5321 		return (err);
5322 	}
5323 
5324 	return (err);
5325 }
5326 
5327 /*
5328  * This function overwrites default configurations of
5329  * ieee80211com structure in Net80211 module.
5330  */
5331 static void
iwp_overwrite_ic_default(iwp_sc_t * sc)5332 iwp_overwrite_ic_default(iwp_sc_t *sc)
5333 {
5334 	ieee80211com_t *ic = &sc->sc_ic;
5335 
5336 	sc->sc_newstate = ic->ic_newstate;
5337 	ic->ic_newstate = iwp_newstate;
5338 	ic->ic_node_alloc = iwp_node_alloc;
5339 	ic->ic_node_free = iwp_node_free;
5340 }
5341 
5342 
5343 /*
5344  * This function adds AP station into hardware.
5345  */
5346 static int
iwp_add_ap_sta(iwp_sc_t * sc)5347 iwp_add_ap_sta(iwp_sc_t *sc)
5348 {
5349 	ieee80211com_t *ic = &sc->sc_ic;
5350 	ieee80211_node_t *in = ic->ic_bss;
5351 	iwp_add_sta_t node;
5352 	int err = IWP_FAIL;
5353 
5354 	/*
5355 	 * Add AP node into hardware.
5356 	 */
5357 	(void) memset(&node, 0, sizeof (node));
5358 	IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5359 	node.mode = STA_MODE_ADD_MSK;
5360 	node.sta.sta_id = IWP_AP_ID;
5361 
5362 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5363 	if (err != IWP_SUCCESS) {
5364 		cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5365 		    "failed to add AP node\n");
5366 		return (err);
5367 	}
5368 
5369 	return (err);
5370 }
5371 
5372 /*
5373  * Check EEPROM version and Calibration version.
5374  */
5375 static int
iwp_eep_ver_chk(iwp_sc_t * sc)5376 iwp_eep_ver_chk(iwp_sc_t *sc)
5377 {
5378 	if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5379 	    (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5380 		cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5381 		    "unsupported eeprom detected\n");
5382 		return (IWP_FAIL);
5383 	}
5384 
5385 	return (IWP_SUCCESS);
5386 }
5387 
5388 /*
5389  * Determine parameters for all supported chips.
5390  */
5391 static void
iwp_set_chip_param(iwp_sc_t * sc)5392 iwp_set_chip_param(iwp_sc_t *sc)
5393 {
5394 	if ((0x008d == sc->sc_dev_id) ||
5395 	    (0x008e == sc->sc_dev_id)) {
5396 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5397 		    PHY_MODE_A | PHY_MODE_N;
5398 
5399 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5400 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5401 
5402 		sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5403 	}
5404 
5405 	if ((0x422c == sc->sc_dev_id) ||
5406 	    (0x4239 == sc->sc_dev_id)) {
5407 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5408 		    PHY_MODE_A | PHY_MODE_N;
5409 
5410 		sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5411 		sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5412 
5413 		sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5414 	}
5415 
5416 	if ((0x422b == sc->sc_dev_id) ||
5417 	    (0x4238 == sc->sc_dev_id)) {
5418 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5419 		    PHY_MODE_A | PHY_MODE_N;
5420 
5421 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5422 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5423 
5424 		sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5425 	}
5426 }
5427