xref: /freebsd/sys/dev/iwn/if_iwn.c (revision 5f0216bd883edee71bf81051e3c20505e4820903)
1 /*-
2  * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr>
3  * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org>
4  * Copyright (c) 2008 Sam Leffler, Errno Consulting
5  * Copyright (c) 2011 Intel Corporation
6  * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
7  * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
24  * adapters.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_wlan.h"
31 #include "opt_iwn.h"
32 
33 #include <sys/param.h>
34 #include <sys/sockio.h>
35 #include <sys/sysctl.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/bus.h>
42 #include <sys/rman.h>
43 #include <sys/endian.h>
44 #include <sys/firmware.h>
45 #include <sys/limits.h>
46 #include <sys/module.h>
47 #include <sys/queue.h>
48 #include <sys/taskqueue.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <machine/clock.h>
53 
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 
57 #include <net/bpf.h>
58 #include <net/if.h>
59 #include <net/if_var.h>
60 #include <net/if_arp.h>
61 #include <net/ethernet.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_radiotap.h>
74 #include <net80211/ieee80211_regdomain.h>
75 #include <net80211/ieee80211_ratectl.h>
76 
77 #include <dev/iwn/if_iwnreg.h>
78 #include <dev/iwn/if_iwnvar.h>
79 #include <dev/iwn/if_iwn_devid.h>
80 #include <dev/iwn/if_iwn_chip_cfg.h>
81 #include <dev/iwn/if_iwn_debug.h>
82 #include <dev/iwn/if_iwn_ioctl.h>
83 
84 struct iwn_ident {
85 	uint16_t	vendor;
86 	uint16_t	device;
87 	const char	*name;
88 };
89 
90 static const struct iwn_ident iwn_ident_table[] = {
91 	{ 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205"		},
92 	{ 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000"		},
93 	{ 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000"		},
94 	{ 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205"		},
95 	{ 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250"	},
96 	{ 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250"	},
97 	{ 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030"		},
98 	{ 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030"		},
99 	{ 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230"		},
100 	{ 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230"		},
101 	{ 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150"	},
102 	{ 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150"	},
103 	{ 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN"	},
104 	{ 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN"	},
105 	/* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */
106 	{ 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230"		},
107 	{ 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230"		},
108 	{ 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130"		},
109 	{ 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130"		},
110 	{ 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100"		},
111 	{ 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100"		},
112 	{ 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105"		},
113 	{ 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105"		},
114 	{ 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135"		},
115 	{ 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135"		},
116 	{ 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965"		},
117 	{ 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300"		},
118 	{ 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200"		},
119 	{ 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965"		},
120 	{ 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965"		},
121 	{ 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100"			},
122 	{ 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965"		},
123 	{ 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300"		},
124 	{ 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300"		},
125 	{ 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100"			},
126 	{ 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300"		},
127 	{ 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200"		},
128 	{ 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350"			},
129 	{ 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350"			},
130 	{ 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150"			},
131 	{ 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150"			},
132 	{ 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235"		},
133 	{ 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235"		},
134 	{ 0, 0, NULL }
135 };
136 
137 static int	iwn_probe(device_t);
138 static int	iwn_attach(device_t);
139 static int	iwn4965_attach(struct iwn_softc *, uint16_t);
140 static int	iwn5000_attach(struct iwn_softc *, uint16_t);
141 static int	iwn_config_specific(struct iwn_softc *, uint16_t);
142 static void	iwn_radiotap_attach(struct iwn_softc *);
143 static void	iwn_sysctlattach(struct iwn_softc *);
144 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
145 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
146 		    const uint8_t [IEEE80211_ADDR_LEN],
147 		    const uint8_t [IEEE80211_ADDR_LEN]);
148 static void	iwn_vap_delete(struct ieee80211vap *);
149 static int	iwn_detach(device_t);
150 static int	iwn_shutdown(device_t);
151 static int	iwn_suspend(device_t);
152 static int	iwn_resume(device_t);
153 static int	iwn_nic_lock(struct iwn_softc *);
154 static int	iwn_eeprom_lock(struct iwn_softc *);
155 static int	iwn_init_otprom(struct iwn_softc *);
156 static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
157 static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
158 static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
159 		    void **, bus_size_t, bus_size_t);
160 static void	iwn_dma_contig_free(struct iwn_dma_info *);
161 static int	iwn_alloc_sched(struct iwn_softc *);
162 static void	iwn_free_sched(struct iwn_softc *);
163 static int	iwn_alloc_kw(struct iwn_softc *);
164 static void	iwn_free_kw(struct iwn_softc *);
165 static int	iwn_alloc_ict(struct iwn_softc *);
166 static void	iwn_free_ict(struct iwn_softc *);
167 static int	iwn_alloc_fwmem(struct iwn_softc *);
168 static void	iwn_free_fwmem(struct iwn_softc *);
169 static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
170 static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
171 static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
172 static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
173 		    int);
174 static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
175 static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
176 static void	iwn5000_ict_reset(struct iwn_softc *);
177 static int	iwn_read_eeprom(struct iwn_softc *,
178 		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
179 static void	iwn4965_read_eeprom(struct iwn_softc *);
180 #ifdef	IWN_DEBUG
181 static void	iwn4965_print_power_group(struct iwn_softc *, int);
182 #endif
183 static void	iwn5000_read_eeprom(struct iwn_softc *);
184 static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
185 static void	iwn_read_eeprom_band(struct iwn_softc *, int);
186 static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
187 static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
188 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
189 		    struct ieee80211_channel *);
190 static int	iwn_setregdomain(struct ieee80211com *,
191 		    struct ieee80211_regdomain *, int,
192 		    struct ieee80211_channel[]);
193 static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
194 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
195 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
196 static void	iwn_newassoc(struct ieee80211_node *, int);
197 static int	iwn_media_change(struct ifnet *);
198 static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
199 static void	iwn_calib_timeout(void *);
200 static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
201 		    struct iwn_rx_data *);
202 static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
203 		    struct iwn_rx_data *);
204 static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
205 		    struct iwn_rx_data *);
206 static void	iwn5000_rx_calib_results(struct iwn_softc *,
207 		    struct iwn_rx_desc *, struct iwn_rx_data *);
208 static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
209 		    struct iwn_rx_data *);
210 static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
211 		    struct iwn_rx_data *);
212 static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
213 		    struct iwn_rx_data *);
214 static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
215 		    uint8_t);
216 static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *);
217 static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
218 static void	iwn_notif_intr(struct iwn_softc *);
219 static void	iwn_wakeup_intr(struct iwn_softc *);
220 static void	iwn_rftoggle_intr(struct iwn_softc *);
221 static void	iwn_fatal_intr(struct iwn_softc *);
222 static void	iwn_intr(void *);
223 static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
224 		    uint16_t);
225 static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
226 		    uint16_t);
227 #ifdef notyet
228 static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
229 #endif
230 static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
231 		    struct ieee80211_node *);
232 static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
233 		    struct ieee80211_node *,
234 		    const struct ieee80211_bpf_params *params);
235 static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
236 		    const struct ieee80211_bpf_params *);
237 static void	iwn_start(struct ifnet *);
238 static void	iwn_start_locked(struct ifnet *);
239 static void	iwn_watchdog(void *);
240 static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
241 static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
242 static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
243 		    int);
244 static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
245 		    int);
246 static int	iwn_set_link_quality(struct iwn_softc *,
247 		    struct ieee80211_node *);
248 static int	iwn_add_broadcast_node(struct iwn_softc *, int);
249 static int	iwn_updateedca(struct ieee80211com *);
250 static void	iwn_update_mcast(struct ieee80211com *);
251 static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
252 static int	iwn_set_critical_temp(struct iwn_softc *);
253 static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
254 static void	iwn4965_power_calibration(struct iwn_softc *, int);
255 static int	iwn4965_set_txpower(struct iwn_softc *,
256 		    struct ieee80211_channel *, int);
257 static int	iwn5000_set_txpower(struct iwn_softc *,
258 		    struct ieee80211_channel *, int);
259 static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
260 static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
261 static int	iwn_get_noise(const struct iwn_rx_general_stats *);
262 static int	iwn4965_get_temperature(struct iwn_softc *);
263 static int	iwn5000_get_temperature(struct iwn_softc *);
264 static int	iwn_init_sensitivity(struct iwn_softc *);
265 static void	iwn_collect_noise(struct iwn_softc *,
266 		    const struct iwn_rx_general_stats *);
267 static int	iwn4965_init_gains(struct iwn_softc *);
268 static int	iwn5000_init_gains(struct iwn_softc *);
269 static int	iwn4965_set_gains(struct iwn_softc *);
270 static int	iwn5000_set_gains(struct iwn_softc *);
271 static void	iwn_tune_sensitivity(struct iwn_softc *,
272 		    const struct iwn_rx_stats *);
273 static void	iwn_save_stats_counters(struct iwn_softc *,
274 		    const struct iwn_stats *);
275 static int	iwn_send_sensitivity(struct iwn_softc *);
276 static void	iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *);
277 static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
278 static int	iwn_send_btcoex(struct iwn_softc *);
279 static int	iwn_send_advanced_btcoex(struct iwn_softc *);
280 static int	iwn5000_runtime_calib(struct iwn_softc *);
281 static int	iwn_config(struct iwn_softc *);
282 static int	iwn_scan(struct iwn_softc *, struct ieee80211vap *,
283 		    struct ieee80211_scan_state *, struct ieee80211_channel *);
284 static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
285 static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
286 static int	iwn_ampdu_rx_start(struct ieee80211_node *,
287 		    struct ieee80211_rx_ampdu *, int, int, int);
288 static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
289 		    struct ieee80211_rx_ampdu *);
290 static int	iwn_addba_request(struct ieee80211_node *,
291 		    struct ieee80211_tx_ampdu *, int, int, int);
292 static int	iwn_addba_response(struct ieee80211_node *,
293 		    struct ieee80211_tx_ampdu *, int, int, int);
294 static int	iwn_ampdu_tx_start(struct ieee80211com *,
295 		    struct ieee80211_node *, uint8_t);
296 static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
297 		    struct ieee80211_tx_ampdu *);
298 static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
299 		    struct ieee80211_node *, int, uint8_t, uint16_t);
300 static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
301 		    uint8_t, uint16_t);
302 static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
303 		    struct ieee80211_node *, int, uint8_t, uint16_t);
304 static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
305 		    uint8_t, uint16_t);
306 static int	iwn5000_query_calibration(struct iwn_softc *);
307 static int	iwn5000_send_calibration(struct iwn_softc *);
308 static int	iwn5000_send_wimax_coex(struct iwn_softc *);
309 static int	iwn5000_crystal_calib(struct iwn_softc *);
310 static int	iwn5000_temp_offset_calib(struct iwn_softc *);
311 static int	iwn5000_temp_offset_calibv2(struct iwn_softc *);
312 static int	iwn4965_post_alive(struct iwn_softc *);
313 static int	iwn5000_post_alive(struct iwn_softc *);
314 static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
315 		    int);
316 static int	iwn4965_load_firmware(struct iwn_softc *);
317 static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
318 		    const uint8_t *, int);
319 static int	iwn5000_load_firmware(struct iwn_softc *);
320 static int	iwn_read_firmware_leg(struct iwn_softc *,
321 		    struct iwn_fw_info *);
322 static int	iwn_read_firmware_tlv(struct iwn_softc *,
323 		    struct iwn_fw_info *, uint16_t);
324 static int	iwn_read_firmware(struct iwn_softc *);
325 static int	iwn_clock_wait(struct iwn_softc *);
326 static int	iwn_apm_init(struct iwn_softc *);
327 static void	iwn_apm_stop_master(struct iwn_softc *);
328 static void	iwn_apm_stop(struct iwn_softc *);
329 static int	iwn4965_nic_config(struct iwn_softc *);
330 static int	iwn5000_nic_config(struct iwn_softc *);
331 static int	iwn_hw_prepare(struct iwn_softc *);
332 static int	iwn_hw_init(struct iwn_softc *);
333 static void	iwn_hw_stop(struct iwn_softc *);
334 static void	iwn_radio_on(void *, int);
335 static void	iwn_radio_off(void *, int);
336 static void	iwn_panicked(void *, int);
337 static void	iwn_init_locked(struct iwn_softc *);
338 static void	iwn_init(void *);
339 static void	iwn_stop_locked(struct iwn_softc *);
340 static void	iwn_stop(struct iwn_softc *);
341 static void	iwn_scan_start(struct ieee80211com *);
342 static void	iwn_scan_end(struct ieee80211com *);
343 static void	iwn_set_channel(struct ieee80211com *);
344 static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
345 static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
346 static void	iwn_hw_reset(void *, int);
347 #ifdef	IWN_DEBUG
348 static char	*iwn_get_csr_string(int);
349 static void	iwn_debug_register(struct iwn_softc *);
350 #endif
351 
352 static device_method_t iwn_methods[] = {
353 	/* Device interface */
354 	DEVMETHOD(device_probe,		iwn_probe),
355 	DEVMETHOD(device_attach,	iwn_attach),
356 	DEVMETHOD(device_detach,	iwn_detach),
357 	DEVMETHOD(device_shutdown,	iwn_shutdown),
358 	DEVMETHOD(device_suspend,	iwn_suspend),
359 	DEVMETHOD(device_resume,	iwn_resume),
360 
361 	DEVMETHOD_END
362 };
363 
364 static driver_t iwn_driver = {
365 	"iwn",
366 	iwn_methods,
367 	sizeof(struct iwn_softc)
368 };
369 static devclass_t iwn_devclass;
370 
371 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
372 
373 MODULE_VERSION(iwn, 1);
374 
375 MODULE_DEPEND(iwn, firmware, 1, 1, 1);
376 MODULE_DEPEND(iwn, pci, 1, 1, 1);
377 MODULE_DEPEND(iwn, wlan, 1, 1, 1);
378 
379 static int
380 iwn_probe(device_t dev)
381 {
382 	const struct iwn_ident *ident;
383 
384 	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
385 		if (pci_get_vendor(dev) == ident->vendor &&
386 		    pci_get_device(dev) == ident->device) {
387 			device_set_desc(dev, ident->name);
388 			return (BUS_PROBE_DEFAULT);
389 		}
390 	}
391 	return ENXIO;
392 }
393 
394 static int
395 iwn_is_3stream_device(struct iwn_softc *sc)
396 {
397 	/* XXX for now only 5300, until the 5350 can be tested */
398 	if (sc->hw_type == IWN_HW_REV_TYPE_5300)
399 		return (1);
400 	return (0);
401 }
402 
403 static int
404 iwn_attach(device_t dev)
405 {
406 	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
407 	struct ieee80211com *ic;
408 	struct ifnet *ifp;
409 	int i, error, rid;
410 	uint8_t macaddr[IEEE80211_ADDR_LEN];
411 
412 	sc->sc_dev = dev;
413 
414 #ifdef	IWN_DEBUG
415 	error = resource_int_value(device_get_name(sc->sc_dev),
416 	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
417 	if (error != 0)
418 		sc->sc_debug = 0;
419 #else
420 	sc->sc_debug = 0;
421 #endif
422 
423 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
424 
425 	/*
426 	 * Get the offset of the PCI Express Capability Structure in PCI
427 	 * Configuration Space.
428 	 */
429 	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
430 	if (error != 0) {
431 		device_printf(dev, "PCIe capability structure not found!\n");
432 		return error;
433 	}
434 
435 	/* Clear device-specific "PCI retry timeout" register (41h). */
436 	pci_write_config(dev, 0x41, 0, 1);
437 
438 	/* Enable bus-mastering. */
439 	pci_enable_busmaster(dev);
440 
441 	rid = PCIR_BAR(0);
442 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
443 	    RF_ACTIVE);
444 	if (sc->mem == NULL) {
445 		device_printf(dev, "can't map mem space\n");
446 		error = ENOMEM;
447 		return error;
448 	}
449 	sc->sc_st = rman_get_bustag(sc->mem);
450 	sc->sc_sh = rman_get_bushandle(sc->mem);
451 
452 	i = 1;
453 	rid = 0;
454 	if (pci_alloc_msi(dev, &i) == 0)
455 		rid = 1;
456 	/* Install interrupt handler. */
457 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
458 	    (rid != 0 ? 0 : RF_SHAREABLE));
459 	if (sc->irq == NULL) {
460 		device_printf(dev, "can't map interrupt\n");
461 		error = ENOMEM;
462 		goto fail;
463 	}
464 
465 	IWN_LOCK_INIT(sc);
466 
467 	/* Read hardware revision and attach. */
468 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
469 	    & IWN_HW_REV_TYPE_MASK;
470 	sc->subdevice_id = pci_get_subdevice(dev);
471 
472 	/*
473 	 * 4965 versus 5000 and later have different methods.
474 	 * Let's set those up first.
475 	 */
476 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
477 		error = iwn4965_attach(sc, pci_get_device(dev));
478 	else
479 		error = iwn5000_attach(sc, pci_get_device(dev));
480 	if (error != 0) {
481 		device_printf(dev, "could not attach device, error %d\n",
482 		    error);
483 		goto fail;
484 	}
485 
486 	/*
487 	 * Next, let's setup the various parameters of each NIC.
488 	 */
489 	error = iwn_config_specific(sc, pci_get_device(dev));
490 	if (error != 0) {
491 		device_printf(dev, "could not attach device, error %d\n",
492 		    error);
493 		goto fail;
494 	}
495 
496 	if ((error = iwn_hw_prepare(sc)) != 0) {
497 		device_printf(dev, "hardware not ready, error %d\n", error);
498 		goto fail;
499 	}
500 
501 	/* Allocate DMA memory for firmware transfers. */
502 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
503 		device_printf(dev,
504 		    "could not allocate memory for firmware, error %d\n",
505 		    error);
506 		goto fail;
507 	}
508 
509 	/* Allocate "Keep Warm" page. */
510 	if ((error = iwn_alloc_kw(sc)) != 0) {
511 		device_printf(dev,
512 		    "could not allocate keep warm page, error %d\n", error);
513 		goto fail;
514 	}
515 
516 	/* Allocate ICT table for 5000 Series. */
517 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
518 	    (error = iwn_alloc_ict(sc)) != 0) {
519 		device_printf(dev, "could not allocate ICT table, error %d\n",
520 		    error);
521 		goto fail;
522 	}
523 
524 	/* Allocate TX scheduler "rings". */
525 	if ((error = iwn_alloc_sched(sc)) != 0) {
526 		device_printf(dev,
527 		    "could not allocate TX scheduler rings, error %d\n", error);
528 		goto fail;
529 	}
530 
531 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
532 	for (i = 0; i < sc->ntxqs; i++) {
533 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
534 			device_printf(dev,
535 			    "could not allocate TX ring %d, error %d\n", i,
536 			    error);
537 			goto fail;
538 		}
539 	}
540 
541 	/* Allocate RX ring. */
542 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
543 		device_printf(dev, "could not allocate RX ring, error %d\n",
544 		    error);
545 		goto fail;
546 	}
547 
548 	/* Clear pending interrupts. */
549 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
550 
551 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
552 	if (ifp == NULL) {
553 		device_printf(dev, "can not allocate ifnet structure\n");
554 		goto fail;
555 	}
556 
557 	ic = ifp->if_l2com;
558 	ic->ic_ifp = ifp;
559 	ic->ic_softc = sc;
560 	ic->ic_name = device_get_nameunit(dev);
561 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
562 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
563 
564 	/* Set device capabilities. */
565 	ic->ic_caps =
566 		  IEEE80211_C_STA		/* station mode supported */
567 		| IEEE80211_C_MONITOR		/* monitor mode supported */
568 #if 0
569 		| IEEE80211_C_BGSCAN		/* background scanning */
570 #endif
571 		| IEEE80211_C_TXPMGT		/* tx power management */
572 		| IEEE80211_C_SHSLOT		/* short slot time supported */
573 		| IEEE80211_C_WPA
574 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
575 #if 0
576 		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
577 #endif
578 		| IEEE80211_C_WME		/* WME */
579 		| IEEE80211_C_PMGT		/* Station-side power mgmt */
580 		;
581 
582 	/* Read MAC address, channels, etc from EEPROM. */
583 	if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
584 		device_printf(dev, "could not read EEPROM, error %d\n",
585 		    error);
586 		goto fail;
587 	}
588 
589 	/* Count the number of available chains. */
590 	sc->ntxchains =
591 	    ((sc->txchainmask >> 2) & 1) +
592 	    ((sc->txchainmask >> 1) & 1) +
593 	    ((sc->txchainmask >> 0) & 1);
594 	sc->nrxchains =
595 	    ((sc->rxchainmask >> 2) & 1) +
596 	    ((sc->rxchainmask >> 1) & 1) +
597 	    ((sc->rxchainmask >> 0) & 1);
598 	if (bootverbose) {
599 		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
600 		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
601 		    macaddr, ":");
602 	}
603 
604 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
605 		ic->ic_rxstream = sc->nrxchains;
606 		ic->ic_txstream = sc->ntxchains;
607 
608 		/*
609 		 * Some of the 3 antenna devices (ie, the 4965) only supports
610 		 * 2x2 operation.  So correct the number of streams if
611 		 * it's not a 3-stream device.
612 		 */
613 		if (! iwn_is_3stream_device(sc)) {
614 			if (ic->ic_rxstream > 2)
615 				ic->ic_rxstream = 2;
616 			if (ic->ic_txstream > 2)
617 				ic->ic_txstream = 2;
618 		}
619 
620 		ic->ic_htcaps =
621 			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
622 			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
623 			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
624 			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
625 #ifdef notyet
626 			| IEEE80211_HTCAP_GREENFIELD
627 #if IWN_RBUF_SIZE == 8192
628 			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
629 #else
630 			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
631 #endif
632 #endif
633 			/* s/w capabilities */
634 			| IEEE80211_HTC_HT		/* HT operation */
635 			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
636 #ifdef notyet
637 			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
638 #endif
639 			;
640 	}
641 
642 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
643 	ifp->if_softc = sc;
644 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
645 	ifp->if_init = iwn_init;
646 	ifp->if_ioctl = iwn_ioctl;
647 	ifp->if_start = iwn_start;
648 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
649 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
650 	IFQ_SET_READY(&ifp->if_snd);
651 
652 	ieee80211_ifattach(ic, macaddr);
653 	ic->ic_vap_create = iwn_vap_create;
654 	ic->ic_vap_delete = iwn_vap_delete;
655 	ic->ic_raw_xmit = iwn_raw_xmit;
656 	ic->ic_node_alloc = iwn_node_alloc;
657 	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
658 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
659 	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
660 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
661 	sc->sc_addba_request = ic->ic_addba_request;
662 	ic->ic_addba_request = iwn_addba_request;
663 	sc->sc_addba_response = ic->ic_addba_response;
664 	ic->ic_addba_response = iwn_addba_response;
665 	sc->sc_addba_stop = ic->ic_addba_stop;
666 	ic->ic_addba_stop = iwn_ampdu_tx_stop;
667 	ic->ic_newassoc = iwn_newassoc;
668 	ic->ic_wme.wme_update = iwn_updateedca;
669 	ic->ic_update_mcast = iwn_update_mcast;
670 	ic->ic_scan_start = iwn_scan_start;
671 	ic->ic_scan_end = iwn_scan_end;
672 	ic->ic_set_channel = iwn_set_channel;
673 	ic->ic_scan_curchan = iwn_scan_curchan;
674 	ic->ic_scan_mindwell = iwn_scan_mindwell;
675 	ic->ic_setregdomain = iwn_setregdomain;
676 
677 	iwn_radiotap_attach(sc);
678 
679 	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
680 	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
681 	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
682 	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
683 	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
684 	TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc);
685 
686 	sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK,
687 	    taskqueue_thread_enqueue, &sc->sc_tq);
688 	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq");
689 	if (error != 0) {
690 		device_printf(dev, "can't start threads, error %d\n", error);
691 		goto fail;
692 	}
693 
694 	iwn_sysctlattach(sc);
695 
696 	/*
697 	 * Hook our interrupt after all initialization is complete.
698 	 */
699 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
700 	    NULL, iwn_intr, sc, &sc->sc_ih);
701 	if (error != 0) {
702 		device_printf(dev, "can't establish interrupt, error %d\n",
703 		    error);
704 		goto fail;
705 	}
706 
707 #if 0
708 	device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n",
709 	    __func__,
710 	    sizeof(struct iwn_stats),
711 	    sizeof(struct iwn_stats_bt));
712 #endif
713 
714 	if (bootverbose)
715 		ieee80211_announce(ic);
716 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
717 	return 0;
718 fail:
719 	iwn_detach(dev);
720 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
721 	return error;
722 }
723 
724 /*
725  * Define specific configuration based on device id and subdevice id
726  * pid : PCI device id
727  */
728 static int
729 iwn_config_specific(struct iwn_softc *sc, uint16_t pid)
730 {
731 
732 	switch (pid) {
733 /* 4965 series */
734 	case IWN_DID_4965_1:
735 	case IWN_DID_4965_2:
736 	case IWN_DID_4965_3:
737 	case IWN_DID_4965_4:
738 		sc->base_params = &iwn4965_base_params;
739 		sc->limits = &iwn4965_sensitivity_limits;
740 		sc->fwname = "iwn4965fw";
741 		/* Override chains masks, ROM is known to be broken. */
742 		sc->txchainmask = IWN_ANT_AB;
743 		sc->rxchainmask = IWN_ANT_ABC;
744 		/* Enable normal btcoex */
745 		sc->sc_flags |= IWN_FLAG_BTCOEX;
746 		break;
747 /* 1000 Series */
748 	case IWN_DID_1000_1:
749 	case IWN_DID_1000_2:
750 		switch(sc->subdevice_id) {
751 			case	IWN_SDID_1000_1:
752 			case	IWN_SDID_1000_2:
753 			case	IWN_SDID_1000_3:
754 			case	IWN_SDID_1000_4:
755 			case	IWN_SDID_1000_5:
756 			case	IWN_SDID_1000_6:
757 			case	IWN_SDID_1000_7:
758 			case	IWN_SDID_1000_8:
759 			case	IWN_SDID_1000_9:
760 			case	IWN_SDID_1000_10:
761 			case	IWN_SDID_1000_11:
762 			case	IWN_SDID_1000_12:
763 				sc->limits = &iwn1000_sensitivity_limits;
764 				sc->base_params = &iwn1000_base_params;
765 				sc->fwname = "iwn1000fw";
766 				break;
767 			default:
768 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
769 				    "0x%04x rev %d not supported (subdevice)\n", pid,
770 				    sc->subdevice_id,sc->hw_type);
771 				return ENOTSUP;
772 		}
773 		break;
774 /* 6x00 Series */
775 	case IWN_DID_6x00_2:
776 	case IWN_DID_6x00_4:
777 	case IWN_DID_6x00_1:
778 	case IWN_DID_6x00_3:
779 		sc->fwname = "iwn6000fw";
780 		sc->limits = &iwn6000_sensitivity_limits;
781 		switch(sc->subdevice_id) {
782 			case IWN_SDID_6x00_1:
783 			case IWN_SDID_6x00_2:
784 			case IWN_SDID_6x00_8:
785 				//iwl6000_3agn_cfg
786 				sc->base_params = &iwn_6000_base_params;
787 				break;
788 			case IWN_SDID_6x00_3:
789 			case IWN_SDID_6x00_6:
790 			case IWN_SDID_6x00_9:
791 				////iwl6000i_2agn
792 			case IWN_SDID_6x00_4:
793 			case IWN_SDID_6x00_7:
794 			case IWN_SDID_6x00_10:
795 				//iwl6000i_2abg_cfg
796 			case IWN_SDID_6x00_5:
797 				//iwl6000i_2bg_cfg
798 				sc->base_params = &iwn_6000i_base_params;
799 				sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
800 				sc->txchainmask = IWN_ANT_BC;
801 				sc->rxchainmask = IWN_ANT_BC;
802 				break;
803 			default:
804 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
805 				    "0x%04x rev %d not supported (subdevice)\n", pid,
806 				    sc->subdevice_id,sc->hw_type);
807 				return ENOTSUP;
808 		}
809 		break;
810 /* 6x05 Series */
811 	case IWN_DID_6x05_1:
812 	case IWN_DID_6x05_2:
813 		switch(sc->subdevice_id) {
814 			case IWN_SDID_6x05_1:
815 			case IWN_SDID_6x05_4:
816 			case IWN_SDID_6x05_6:
817 				//iwl6005_2agn_cfg
818 			case IWN_SDID_6x05_2:
819 			case IWN_SDID_6x05_5:
820 			case IWN_SDID_6x05_7:
821 				//iwl6005_2abg_cfg
822 			case IWN_SDID_6x05_3:
823 				//iwl6005_2bg_cfg
824 			case IWN_SDID_6x05_8:
825 			case IWN_SDID_6x05_9:
826 				//iwl6005_2agn_sff_cfg
827 			case IWN_SDID_6x05_10:
828 				//iwl6005_2agn_d_cfg
829 			case IWN_SDID_6x05_11:
830 				//iwl6005_2agn_mow1_cfg
831 			case IWN_SDID_6x05_12:
832 				//iwl6005_2agn_mow2_cfg
833 				sc->fwname = "iwn6000g2afw";
834 				sc->limits = &iwn6000_sensitivity_limits;
835 				sc->base_params = &iwn_6000g2_base_params;
836 				break;
837 			default:
838 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
839 				    "0x%04x rev %d not supported (subdevice)\n", pid,
840 				    sc->subdevice_id,sc->hw_type);
841 				return ENOTSUP;
842 		}
843 		break;
844 /* 6x35 Series */
845 	case IWN_DID_6035_1:
846 	case IWN_DID_6035_2:
847 		switch(sc->subdevice_id) {
848 			case IWN_SDID_6035_1:
849 			case IWN_SDID_6035_2:
850 			case IWN_SDID_6035_3:
851 			case IWN_SDID_6035_4:
852 				sc->fwname = "iwn6000g2bfw";
853 				sc->limits = &iwn6235_sensitivity_limits;
854 				sc->base_params = &iwn_6235_base_params;
855 				break;
856 			default:
857 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
858 				    "0x%04x rev %d not supported (subdevice)\n", pid,
859 				    sc->subdevice_id,sc->hw_type);
860 				return ENOTSUP;
861 		}
862 		break;
863 /* 6x50 WiFi/WiMax Series */
864 	case IWN_DID_6050_1:
865 	case IWN_DID_6050_2:
866 		switch(sc->subdevice_id) {
867 			case IWN_SDID_6050_1:
868 			case IWN_SDID_6050_3:
869 			case IWN_SDID_6050_5:
870 				//iwl6050_2agn_cfg
871 			case IWN_SDID_6050_2:
872 			case IWN_SDID_6050_4:
873 			case IWN_SDID_6050_6:
874 				//iwl6050_2abg_cfg
875 				sc->fwname = "iwn6050fw";
876 				sc->txchainmask = IWN_ANT_AB;
877 				sc->rxchainmask = IWN_ANT_AB;
878 				sc->limits = &iwn6000_sensitivity_limits;
879 				sc->base_params = &iwn_6050_base_params;
880 				break;
881 			default:
882 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
883 				    "0x%04x rev %d not supported (subdevice)\n", pid,
884 				    sc->subdevice_id,sc->hw_type);
885 				return ENOTSUP;
886 		}
887 		break;
888 /* 6150 WiFi/WiMax Series */
889 	case IWN_DID_6150_1:
890 	case IWN_DID_6150_2:
891 		switch(sc->subdevice_id) {
892 			case IWN_SDID_6150_1:
893 			case IWN_SDID_6150_3:
894 			case IWN_SDID_6150_5:
895 				// iwl6150_bgn_cfg
896 			case IWN_SDID_6150_2:
897 			case IWN_SDID_6150_4:
898 			case IWN_SDID_6150_6:
899 				//iwl6150_bg_cfg
900 				sc->fwname = "iwn6050fw";
901 				sc->limits = &iwn6000_sensitivity_limits;
902 				sc->base_params = &iwn_6150_base_params;
903 				break;
904 			default:
905 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
906 				    "0x%04x rev %d not supported (subdevice)\n", pid,
907 				    sc->subdevice_id,sc->hw_type);
908 				return ENOTSUP;
909 		}
910 		break;
911 /* 6030 Series and 1030 Series */
912 	case IWN_DID_x030_1:
913 	case IWN_DID_x030_2:
914 	case IWN_DID_x030_3:
915 	case IWN_DID_x030_4:
916 		switch(sc->subdevice_id) {
917 			case IWN_SDID_x030_1:
918 			case IWN_SDID_x030_3:
919 			case IWN_SDID_x030_5:
920 			// iwl1030_bgn_cfg
921 			case IWN_SDID_x030_2:
922 			case IWN_SDID_x030_4:
923 			case IWN_SDID_x030_6:
924 			//iwl1030_bg_cfg
925 			case IWN_SDID_x030_7:
926 			case IWN_SDID_x030_10:
927 			case IWN_SDID_x030_14:
928 			//iwl6030_2agn_cfg
929 			case IWN_SDID_x030_8:
930 			case IWN_SDID_x030_11:
931 			case IWN_SDID_x030_15:
932 			// iwl6030_2bgn_cfg
933 			case IWN_SDID_x030_9:
934 			case IWN_SDID_x030_12:
935 			case IWN_SDID_x030_16:
936 			// iwl6030_2abg_cfg
937 			case IWN_SDID_x030_13:
938 			//iwl6030_2bg_cfg
939 				sc->fwname = "iwn6000g2bfw";
940 				sc->limits = &iwn6000_sensitivity_limits;
941 				sc->base_params = &iwn_6000g2b_base_params;
942 				break;
943 			default:
944 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
945 				    "0x%04x rev %d not supported (subdevice)\n", pid,
946 				    sc->subdevice_id,sc->hw_type);
947 				return ENOTSUP;
948 		}
949 		break;
950 /* 130 Series WiFi */
951 /* XXX: This series will need adjustment for rate.
952  * see rx_with_siso_diversity in linux kernel
953  */
954 	case IWN_DID_130_1:
955 	case IWN_DID_130_2:
956 		switch(sc->subdevice_id) {
957 			case IWN_SDID_130_1:
958 			case IWN_SDID_130_3:
959 			case IWN_SDID_130_5:
960 			//iwl130_bgn_cfg
961 			case IWN_SDID_130_2:
962 			case IWN_SDID_130_4:
963 			case IWN_SDID_130_6:
964 			//iwl130_bg_cfg
965 				sc->fwname = "iwn6000g2bfw";
966 				sc->limits = &iwn6000_sensitivity_limits;
967 				sc->base_params = &iwn_6000g2b_base_params;
968 				break;
969 			default:
970 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
971 				    "0x%04x rev %d not supported (subdevice)\n", pid,
972 				    sc->subdevice_id,sc->hw_type);
973 				return ENOTSUP;
974 		}
975 		break;
976 /* 100 Series WiFi */
977 	case IWN_DID_100_1:
978 	case IWN_DID_100_2:
979 		switch(sc->subdevice_id) {
980 			case IWN_SDID_100_1:
981 			case IWN_SDID_100_2:
982 			case IWN_SDID_100_3:
983 			case IWN_SDID_100_4:
984 			case IWN_SDID_100_5:
985 			case IWN_SDID_100_6:
986 				sc->limits = &iwn1000_sensitivity_limits;
987 				sc->base_params = &iwn1000_base_params;
988 				sc->fwname = "iwn100fw";
989 				break;
990 			default:
991 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
992 				    "0x%04x rev %d not supported (subdevice)\n", pid,
993 				    sc->subdevice_id,sc->hw_type);
994 				return ENOTSUP;
995 		}
996 		break;
997 
998 /* 105 Series */
999 /* XXX: This series will need adjustment for rate.
1000  * see rx_with_siso_diversity in linux kernel
1001  */
1002 	case IWN_DID_105_1:
1003 	case IWN_DID_105_2:
1004 		switch(sc->subdevice_id) {
1005 			case IWN_SDID_105_1:
1006 			case IWN_SDID_105_2:
1007 			case IWN_SDID_105_3:
1008 			//iwl105_bgn_cfg
1009 			case IWN_SDID_105_4:
1010 			//iwl105_bgn_d_cfg
1011 				sc->limits = &iwn2030_sensitivity_limits;
1012 				sc->base_params = &iwn2000_base_params;
1013 				sc->fwname = "iwn105fw";
1014 				break;
1015 			default:
1016 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1017 				    "0x%04x rev %d not supported (subdevice)\n", pid,
1018 				    sc->subdevice_id,sc->hw_type);
1019 				return ENOTSUP;
1020 		}
1021 		break;
1022 
1023 /* 135 Series */
1024 /* XXX: This series will need adjustment for rate.
1025  * see rx_with_siso_diversity in linux kernel
1026  */
1027 	case IWN_DID_135_1:
1028 	case IWN_DID_135_2:
1029 		switch(sc->subdevice_id) {
1030 			case IWN_SDID_135_1:
1031 			case IWN_SDID_135_2:
1032 			case IWN_SDID_135_3:
1033 				sc->limits = &iwn2030_sensitivity_limits;
1034 				sc->base_params = &iwn2030_base_params;
1035 				sc->fwname = "iwn135fw";
1036 				break;
1037 			default:
1038 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1039 				    "0x%04x rev %d not supported (subdevice)\n", pid,
1040 				    sc->subdevice_id,sc->hw_type);
1041 				return ENOTSUP;
1042 		}
1043 		break;
1044 
1045 /* 2x00 Series */
1046 	case IWN_DID_2x00_1:
1047 	case IWN_DID_2x00_2:
1048 		switch(sc->subdevice_id) {
1049 			case IWN_SDID_2x00_1:
1050 			case IWN_SDID_2x00_2:
1051 			case IWN_SDID_2x00_3:
1052 			//iwl2000_2bgn_cfg
1053 			case IWN_SDID_2x00_4:
1054 			//iwl2000_2bgn_d_cfg
1055 				sc->limits = &iwn2030_sensitivity_limits;
1056 				sc->base_params = &iwn2000_base_params;
1057 				sc->fwname = "iwn2000fw";
1058 				break;
1059 			default:
1060 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1061 				    "0x%04x rev %d not supported (subdevice) \n",
1062 				    pid, sc->subdevice_id, sc->hw_type);
1063 				return ENOTSUP;
1064 		}
1065 		break;
1066 /* 2x30 Series */
1067 	case IWN_DID_2x30_1:
1068 	case IWN_DID_2x30_2:
1069 		switch(sc->subdevice_id) {
1070 			case IWN_SDID_2x30_1:
1071 			case IWN_SDID_2x30_3:
1072 			case IWN_SDID_2x30_5:
1073 			//iwl100_bgn_cfg
1074 			case IWN_SDID_2x30_2:
1075 			case IWN_SDID_2x30_4:
1076 			case IWN_SDID_2x30_6:
1077 			//iwl100_bg_cfg
1078 				sc->limits = &iwn2030_sensitivity_limits;
1079 				sc->base_params = &iwn2030_base_params;
1080 				sc->fwname = "iwn2030fw";
1081 				break;
1082 			default:
1083 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1084 				    "0x%04x rev %d not supported (subdevice)\n", pid,
1085 				    sc->subdevice_id,sc->hw_type);
1086 				return ENOTSUP;
1087 		}
1088 		break;
1089 /* 5x00 Series */
1090 	case IWN_DID_5x00_1:
1091 	case IWN_DID_5x00_2:
1092 	case IWN_DID_5x00_3:
1093 	case IWN_DID_5x00_4:
1094 		sc->limits = &iwn5000_sensitivity_limits;
1095 		sc->base_params = &iwn5000_base_params;
1096 		sc->fwname = "iwn5000fw";
1097 		switch(sc->subdevice_id) {
1098 			case IWN_SDID_5x00_1:
1099 			case IWN_SDID_5x00_2:
1100 			case IWN_SDID_5x00_3:
1101 			case IWN_SDID_5x00_4:
1102 			case IWN_SDID_5x00_9:
1103 			case IWN_SDID_5x00_10:
1104 			case IWN_SDID_5x00_11:
1105 			case IWN_SDID_5x00_12:
1106 			case IWN_SDID_5x00_17:
1107 			case IWN_SDID_5x00_18:
1108 			case IWN_SDID_5x00_19:
1109 			case IWN_SDID_5x00_20:
1110 			//iwl5100_agn_cfg
1111 				sc->txchainmask = IWN_ANT_B;
1112 				sc->rxchainmask = IWN_ANT_AB;
1113 				break;
1114 			case IWN_SDID_5x00_5:
1115 			case IWN_SDID_5x00_6:
1116 			case IWN_SDID_5x00_13:
1117 			case IWN_SDID_5x00_14:
1118 			case IWN_SDID_5x00_21:
1119 			case IWN_SDID_5x00_22:
1120 			//iwl5100_bgn_cfg
1121 				sc->txchainmask = IWN_ANT_B;
1122 				sc->rxchainmask = IWN_ANT_AB;
1123 				break;
1124 			case IWN_SDID_5x00_7:
1125 			case IWN_SDID_5x00_8:
1126 			case IWN_SDID_5x00_15:
1127 			case IWN_SDID_5x00_16:
1128 			case IWN_SDID_5x00_23:
1129 			case IWN_SDID_5x00_24:
1130 			//iwl5100_abg_cfg
1131 				sc->txchainmask = IWN_ANT_B;
1132 				sc->rxchainmask = IWN_ANT_AB;
1133 				break;
1134 			case IWN_SDID_5x00_25:
1135 			case IWN_SDID_5x00_26:
1136 			case IWN_SDID_5x00_27:
1137 			case IWN_SDID_5x00_28:
1138 			case IWN_SDID_5x00_29:
1139 			case IWN_SDID_5x00_30:
1140 			case IWN_SDID_5x00_31:
1141 			case IWN_SDID_5x00_32:
1142 			case IWN_SDID_5x00_33:
1143 			case IWN_SDID_5x00_34:
1144 			case IWN_SDID_5x00_35:
1145 			case IWN_SDID_5x00_36:
1146 			//iwl5300_agn_cfg
1147 				sc->txchainmask = IWN_ANT_ABC;
1148 				sc->rxchainmask = IWN_ANT_ABC;
1149 				break;
1150 			default:
1151 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1152 				    "0x%04x rev %d not supported (subdevice)\n", pid,
1153 				    sc->subdevice_id,sc->hw_type);
1154 				return ENOTSUP;
1155 		}
1156 		break;
1157 /* 5x50 Series */
1158 	case IWN_DID_5x50_1:
1159 	case IWN_DID_5x50_2:
1160 	case IWN_DID_5x50_3:
1161 	case IWN_DID_5x50_4:
1162 		sc->limits = &iwn5000_sensitivity_limits;
1163 		sc->base_params = &iwn5000_base_params;
1164 		sc->fwname = "iwn5000fw";
1165 		switch(sc->subdevice_id) {
1166 			case IWN_SDID_5x50_1:
1167 			case IWN_SDID_5x50_2:
1168 			case IWN_SDID_5x50_3:
1169 			//iwl5350_agn_cfg
1170 				sc->limits = &iwn5000_sensitivity_limits;
1171 				sc->base_params = &iwn5000_base_params;
1172 				sc->fwname = "iwn5000fw";
1173 				break;
1174 			case IWN_SDID_5x50_4:
1175 			case IWN_SDID_5x50_5:
1176 			case IWN_SDID_5x50_8:
1177 			case IWN_SDID_5x50_9:
1178 			case IWN_SDID_5x50_10:
1179 			case IWN_SDID_5x50_11:
1180 			//iwl5150_agn_cfg
1181 			case IWN_SDID_5x50_6:
1182 			case IWN_SDID_5x50_7:
1183 			case IWN_SDID_5x50_12:
1184 			case IWN_SDID_5x50_13:
1185 			//iwl5150_abg_cfg
1186 				sc->limits = &iwn5000_sensitivity_limits;
1187 				sc->fwname = "iwn5150fw";
1188 				sc->base_params = &iwn_5x50_base_params;
1189 				break;
1190 			default:
1191 				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1192 				    "0x%04x rev %d not supported (subdevice)\n", pid,
1193 				    sc->subdevice_id,sc->hw_type);
1194 				return ENOTSUP;
1195 		}
1196 		break;
1197 	default:
1198 		device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x"
1199 		    "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id,
1200 		     sc->hw_type);
1201 		return ENOTSUP;
1202 	}
1203 	return 0;
1204 }
1205 
1206 static int
1207 iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
1208 {
1209 	struct iwn_ops *ops = &sc->ops;
1210 
1211 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1212 	ops->load_firmware = iwn4965_load_firmware;
1213 	ops->read_eeprom = iwn4965_read_eeprom;
1214 	ops->post_alive = iwn4965_post_alive;
1215 	ops->nic_config = iwn4965_nic_config;
1216 	ops->update_sched = iwn4965_update_sched;
1217 	ops->get_temperature = iwn4965_get_temperature;
1218 	ops->get_rssi = iwn4965_get_rssi;
1219 	ops->set_txpower = iwn4965_set_txpower;
1220 	ops->init_gains = iwn4965_init_gains;
1221 	ops->set_gains = iwn4965_set_gains;
1222 	ops->add_node = iwn4965_add_node;
1223 	ops->tx_done = iwn4965_tx_done;
1224 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1225 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1226 	sc->ntxqs = IWN4965_NTXQUEUES;
1227 	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
1228 	sc->ndmachnls = IWN4965_NDMACHNLS;
1229 	sc->broadcast_id = IWN4965_ID_BROADCAST;
1230 	sc->rxonsz = IWN4965_RXONSZ;
1231 	sc->schedsz = IWN4965_SCHEDSZ;
1232 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1233 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1234 	sc->fwsz = IWN4965_FWSZ;
1235 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1236 	sc->limits = &iwn4965_sensitivity_limits;
1237 	sc->fwname = "iwn4965fw";
1238 	/* Override chains masks, ROM is known to be broken. */
1239 	sc->txchainmask = IWN_ANT_AB;
1240 	sc->rxchainmask = IWN_ANT_ABC;
1241 	/* Enable normal btcoex */
1242 	sc->sc_flags |= IWN_FLAG_BTCOEX;
1243 
1244 	DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
1245 
1246 	return 0;
1247 }
1248 
1249 static int
1250 iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1251 {
1252 	struct iwn_ops *ops = &sc->ops;
1253 
1254 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1255 
1256 	ops->load_firmware = iwn5000_load_firmware;
1257 	ops->read_eeprom = iwn5000_read_eeprom;
1258 	ops->post_alive = iwn5000_post_alive;
1259 	ops->nic_config = iwn5000_nic_config;
1260 	ops->update_sched = iwn5000_update_sched;
1261 	ops->get_temperature = iwn5000_get_temperature;
1262 	ops->get_rssi = iwn5000_get_rssi;
1263 	ops->set_txpower = iwn5000_set_txpower;
1264 	ops->init_gains = iwn5000_init_gains;
1265 	ops->set_gains = iwn5000_set_gains;
1266 	ops->add_node = iwn5000_add_node;
1267 	ops->tx_done = iwn5000_tx_done;
1268 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1269 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1270 	sc->ntxqs = IWN5000_NTXQUEUES;
1271 	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
1272 	sc->ndmachnls = IWN5000_NDMACHNLS;
1273 	sc->broadcast_id = IWN5000_ID_BROADCAST;
1274 	sc->rxonsz = IWN5000_RXONSZ;
1275 	sc->schedsz = IWN5000_SCHEDSZ;
1276 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1277 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1278 	sc->fwsz = IWN5000_FWSZ;
1279 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1280 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
1281 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
1282 
1283 	return 0;
1284 }
1285 
1286 /*
1287  * Attach the interface to 802.11 radiotap.
1288  */
1289 static void
1290 iwn_radiotap_attach(struct iwn_softc *sc)
1291 {
1292 	struct ifnet *ifp = sc->sc_ifp;
1293 	struct ieee80211com *ic = ifp->if_l2com;
1294 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1295 	ieee80211_radiotap_attach(ic,
1296 	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
1297 		IWN_TX_RADIOTAP_PRESENT,
1298 	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
1299 		IWN_RX_RADIOTAP_PRESENT);
1300 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1301 }
1302 
1303 static void
1304 iwn_sysctlattach(struct iwn_softc *sc)
1305 {
1306 #ifdef	IWN_DEBUG
1307 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
1308 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
1309 
1310 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1311 	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
1312 		"control debugging printfs");
1313 #endif
1314 }
1315 
1316 static struct ieee80211vap *
1317 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1318     enum ieee80211_opmode opmode, int flags,
1319     const uint8_t bssid[IEEE80211_ADDR_LEN],
1320     const uint8_t mac[IEEE80211_ADDR_LEN])
1321 {
1322 	struct iwn_softc *sc = ic->ic_softc;
1323 	struct iwn_vap *ivp;
1324 	struct ieee80211vap *vap;
1325 	uint8_t mac1[IEEE80211_ADDR_LEN];
1326 
1327 	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
1328 		return NULL;
1329 
1330 	IEEE80211_ADDR_COPY(mac1, mac);
1331 
1332 	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
1333 	    M_80211_VAP, M_NOWAIT | M_ZERO);
1334 	if (ivp == NULL)
1335 		return NULL;
1336 	vap = &ivp->iv_vap;
1337 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
1338 	ivp->ctx = IWN_RXON_BSS_CTX;
1339 	IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
1340 	vap->iv_bmissthreshold = 10;		/* override default */
1341 	/* Override with driver methods. */
1342 	ivp->iv_newstate = vap->iv_newstate;
1343 	vap->iv_newstate = iwn_newstate;
1344 	sc->ivap[IWN_RXON_BSS_CTX] = vap;
1345 
1346 	ieee80211_ratectl_init(vap);
1347 	/* Complete setup. */
1348 	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
1349 	ic->ic_opmode = opmode;
1350 	return vap;
1351 }
1352 
1353 static void
1354 iwn_vap_delete(struct ieee80211vap *vap)
1355 {
1356 	struct iwn_vap *ivp = IWN_VAP(vap);
1357 
1358 	ieee80211_ratectl_deinit(vap);
1359 	ieee80211_vap_detach(vap);
1360 	free(ivp, M_80211_VAP);
1361 }
1362 
1363 static int
1364 iwn_detach(device_t dev)
1365 {
1366 	struct iwn_softc *sc = device_get_softc(dev);
1367 	struct ifnet *ifp = sc->sc_ifp;
1368 	struct ieee80211com *ic;
1369 	int qid;
1370 
1371 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1372 
1373 	if (ifp != NULL) {
1374 		ic = ifp->if_l2com;
1375 
1376 		ieee80211_draintask(ic, &sc->sc_reinit_task);
1377 		ieee80211_draintask(ic, &sc->sc_radioon_task);
1378 		ieee80211_draintask(ic, &sc->sc_radiooff_task);
1379 
1380 		iwn_stop(sc);
1381 
1382 		taskqueue_drain_all(sc->sc_tq);
1383 		taskqueue_free(sc->sc_tq);
1384 
1385 		callout_drain(&sc->watchdog_to);
1386 		callout_drain(&sc->calib_to);
1387 		ieee80211_ifdetach(ic);
1388 	}
1389 
1390 	/* Uninstall interrupt handler. */
1391 	if (sc->irq != NULL) {
1392 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
1393 		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
1394 		    sc->irq);
1395 		pci_release_msi(dev);
1396 	}
1397 
1398 	/* Free DMA resources. */
1399 	iwn_free_rx_ring(sc, &sc->rxq);
1400 	for (qid = 0; qid < sc->ntxqs; qid++)
1401 		iwn_free_tx_ring(sc, &sc->txq[qid]);
1402 	iwn_free_sched(sc);
1403 	iwn_free_kw(sc);
1404 	if (sc->ict != NULL)
1405 		iwn_free_ict(sc);
1406 	iwn_free_fwmem(sc);
1407 
1408 	if (sc->mem != NULL)
1409 		bus_release_resource(dev, SYS_RES_MEMORY,
1410 		    rman_get_rid(sc->mem), sc->mem);
1411 
1412 	if (ifp != NULL)
1413 		if_free(ifp);
1414 
1415 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
1416 	IWN_LOCK_DESTROY(sc);
1417 	return 0;
1418 }
1419 
1420 static int
1421 iwn_shutdown(device_t dev)
1422 {
1423 	struct iwn_softc *sc = device_get_softc(dev);
1424 
1425 	iwn_stop(sc);
1426 	return 0;
1427 }
1428 
1429 static int
1430 iwn_suspend(device_t dev)
1431 {
1432 	struct iwn_softc *sc = device_get_softc(dev);
1433 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1434 
1435 	ieee80211_suspend_all(ic);
1436 	return 0;
1437 }
1438 
1439 static int
1440 iwn_resume(device_t dev)
1441 {
1442 	struct iwn_softc *sc = device_get_softc(dev);
1443 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1444 
1445 	/* Clear device-specific "PCI retry timeout" register (41h). */
1446 	pci_write_config(dev, 0x41, 0, 1);
1447 
1448 	ieee80211_resume_all(ic);
1449 	return 0;
1450 }
1451 
1452 static int
1453 iwn_nic_lock(struct iwn_softc *sc)
1454 {
1455 	int ntries;
1456 
1457 	/* Request exclusive access to NIC. */
1458 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1459 
1460 	/* Spin until we actually get the lock. */
1461 	for (ntries = 0; ntries < 1000; ntries++) {
1462 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
1463 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1464 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
1465 			return 0;
1466 		DELAY(10);
1467 	}
1468 	return ETIMEDOUT;
1469 }
1470 
1471 static __inline void
1472 iwn_nic_unlock(struct iwn_softc *sc)
1473 {
1474 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1475 }
1476 
1477 static __inline uint32_t
1478 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1479 {
1480 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1481 	IWN_BARRIER_READ_WRITE(sc);
1482 	return IWN_READ(sc, IWN_PRPH_RDATA);
1483 }
1484 
1485 static __inline void
1486 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1487 {
1488 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1489 	IWN_BARRIER_WRITE(sc);
1490 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1491 }
1492 
1493 static __inline void
1494 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1495 {
1496 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1497 }
1498 
1499 static __inline void
1500 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1501 {
1502 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1503 }
1504 
1505 static __inline void
1506 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1507     const uint32_t *data, int count)
1508 {
1509 	for (; count > 0; count--, data++, addr += 4)
1510 		iwn_prph_write(sc, addr, *data);
1511 }
1512 
1513 static __inline uint32_t
1514 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1515 {
1516 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1517 	IWN_BARRIER_READ_WRITE(sc);
1518 	return IWN_READ(sc, IWN_MEM_RDATA);
1519 }
1520 
1521 static __inline void
1522 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1523 {
1524 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1525 	IWN_BARRIER_WRITE(sc);
1526 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1527 }
1528 
1529 static __inline void
1530 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1531 {
1532 	uint32_t tmp;
1533 
1534 	tmp = iwn_mem_read(sc, addr & ~3);
1535 	if (addr & 3)
1536 		tmp = (tmp & 0x0000ffff) | data << 16;
1537 	else
1538 		tmp = (tmp & 0xffff0000) | data;
1539 	iwn_mem_write(sc, addr & ~3, tmp);
1540 }
1541 
1542 static __inline void
1543 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1544     int count)
1545 {
1546 	for (; count > 0; count--, addr += 4)
1547 		*data++ = iwn_mem_read(sc, addr);
1548 }
1549 
1550 static __inline void
1551 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1552     int count)
1553 {
1554 	for (; count > 0; count--, addr += 4)
1555 		iwn_mem_write(sc, addr, val);
1556 }
1557 
1558 static int
1559 iwn_eeprom_lock(struct iwn_softc *sc)
1560 {
1561 	int i, ntries;
1562 
1563 	for (i = 0; i < 100; i++) {
1564 		/* Request exclusive access to EEPROM. */
1565 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1566 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1567 
1568 		/* Spin until we actually get the lock. */
1569 		for (ntries = 0; ntries < 100; ntries++) {
1570 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1571 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1572 				return 0;
1573 			DELAY(10);
1574 		}
1575 	}
1576 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1577 	return ETIMEDOUT;
1578 }
1579 
1580 static __inline void
1581 iwn_eeprom_unlock(struct iwn_softc *sc)
1582 {
1583 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1584 }
1585 
1586 /*
1587  * Initialize access by host to One Time Programmable ROM.
1588  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1589  */
1590 static int
1591 iwn_init_otprom(struct iwn_softc *sc)
1592 {
1593 	uint16_t prev, base, next;
1594 	int count, error;
1595 
1596 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1597 
1598 	/* Wait for clock stabilization before accessing prph. */
1599 	if ((error = iwn_clock_wait(sc)) != 0)
1600 		return error;
1601 
1602 	if ((error = iwn_nic_lock(sc)) != 0)
1603 		return error;
1604 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1605 	DELAY(5);
1606 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1607 	iwn_nic_unlock(sc);
1608 
1609 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1610 	if (sc->base_params->shadow_ram_support) {
1611 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1612 		    IWN_RESET_LINK_PWR_MGMT_DIS);
1613 	}
1614 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1615 	/* Clear ECC status. */
1616 	IWN_SETBITS(sc, IWN_OTP_GP,
1617 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1618 
1619 	/*
1620 	 * Find the block before last block (contains the EEPROM image)
1621 	 * for HW without OTP shadow RAM.
1622 	 */
1623 	if (! sc->base_params->shadow_ram_support) {
1624 		/* Switch to absolute addressing mode. */
1625 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1626 		base = prev = 0;
1627 		for (count = 0; count < sc->base_params->max_ll_items;
1628 		    count++) {
1629 			error = iwn_read_prom_data(sc, base, &next, 2);
1630 			if (error != 0)
1631 				return error;
1632 			if (next == 0)	/* End of linked-list. */
1633 				break;
1634 			prev = base;
1635 			base = le16toh(next);
1636 		}
1637 		if (count == 0 || count == sc->base_params->max_ll_items)
1638 			return EIO;
1639 		/* Skip "next" word. */
1640 		sc->prom_base = prev + 1;
1641 	}
1642 
1643 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1644 
1645 	return 0;
1646 }
1647 
1648 static int
1649 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1650 {
1651 	uint8_t *out = data;
1652 	uint32_t val, tmp;
1653 	int ntries;
1654 
1655 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1656 
1657 	addr += sc->prom_base;
1658 	for (; count > 0; count -= 2, addr++) {
1659 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1660 		for (ntries = 0; ntries < 10; ntries++) {
1661 			val = IWN_READ(sc, IWN_EEPROM);
1662 			if (val & IWN_EEPROM_READ_VALID)
1663 				break;
1664 			DELAY(5);
1665 		}
1666 		if (ntries == 10) {
1667 			device_printf(sc->sc_dev,
1668 			    "timeout reading ROM at 0x%x\n", addr);
1669 			return ETIMEDOUT;
1670 		}
1671 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1672 			/* OTPROM, check for ECC errors. */
1673 			tmp = IWN_READ(sc, IWN_OTP_GP);
1674 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1675 				device_printf(sc->sc_dev,
1676 				    "OTPROM ECC error at 0x%x\n", addr);
1677 				return EIO;
1678 			}
1679 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1680 				/* Correctable ECC error, clear bit. */
1681 				IWN_SETBITS(sc, IWN_OTP_GP,
1682 				    IWN_OTP_GP_ECC_CORR_STTS);
1683 			}
1684 		}
1685 		*out++ = val >> 16;
1686 		if (count > 1)
1687 			*out++ = val >> 24;
1688 	}
1689 
1690 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1691 
1692 	return 0;
1693 }
1694 
1695 static void
1696 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1697 {
1698 	if (error != 0)
1699 		return;
1700 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1701 	*(bus_addr_t *)arg = segs[0].ds_addr;
1702 }
1703 
1704 static int
1705 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1706     void **kvap, bus_size_t size, bus_size_t alignment)
1707 {
1708 	int error;
1709 
1710 	dma->tag = NULL;
1711 	dma->size = size;
1712 
1713 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1714 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1715 	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1716 	if (error != 0)
1717 		goto fail;
1718 
1719 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1720 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1721 	if (error != 0)
1722 		goto fail;
1723 
1724 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1725 	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1726 	if (error != 0)
1727 		goto fail;
1728 
1729 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1730 
1731 	if (kvap != NULL)
1732 		*kvap = dma->vaddr;
1733 
1734 	return 0;
1735 
1736 fail:	iwn_dma_contig_free(dma);
1737 	return error;
1738 }
1739 
1740 static void
1741 iwn_dma_contig_free(struct iwn_dma_info *dma)
1742 {
1743 	if (dma->vaddr != NULL) {
1744 		bus_dmamap_sync(dma->tag, dma->map,
1745 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1746 		bus_dmamap_unload(dma->tag, dma->map);
1747 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1748 		dma->vaddr = NULL;
1749 	}
1750 	if (dma->tag != NULL) {
1751 		bus_dma_tag_destroy(dma->tag);
1752 		dma->tag = NULL;
1753 	}
1754 }
1755 
1756 static int
1757 iwn_alloc_sched(struct iwn_softc *sc)
1758 {
1759 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1760 	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1761 	    sc->schedsz, 1024);
1762 }
1763 
1764 static void
1765 iwn_free_sched(struct iwn_softc *sc)
1766 {
1767 	iwn_dma_contig_free(&sc->sched_dma);
1768 }
1769 
1770 static int
1771 iwn_alloc_kw(struct iwn_softc *sc)
1772 {
1773 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1774 	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1775 }
1776 
1777 static void
1778 iwn_free_kw(struct iwn_softc *sc)
1779 {
1780 	iwn_dma_contig_free(&sc->kw_dma);
1781 }
1782 
1783 static int
1784 iwn_alloc_ict(struct iwn_softc *sc)
1785 {
1786 	/* ICT table must be aligned on a 4KB boundary. */
1787 	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1788 	    IWN_ICT_SIZE, 4096);
1789 }
1790 
1791 static void
1792 iwn_free_ict(struct iwn_softc *sc)
1793 {
1794 	iwn_dma_contig_free(&sc->ict_dma);
1795 }
1796 
1797 static int
1798 iwn_alloc_fwmem(struct iwn_softc *sc)
1799 {
1800 	/* Must be aligned on a 16-byte boundary. */
1801 	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1802 }
1803 
1804 static void
1805 iwn_free_fwmem(struct iwn_softc *sc)
1806 {
1807 	iwn_dma_contig_free(&sc->fw_dma);
1808 }
1809 
1810 static int
1811 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1812 {
1813 	bus_size_t size;
1814 	int i, error;
1815 
1816 	ring->cur = 0;
1817 
1818 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1819 
1820 	/* Allocate RX descriptors (256-byte aligned). */
1821 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1822 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1823 	    size, 256);
1824 	if (error != 0) {
1825 		device_printf(sc->sc_dev,
1826 		    "%s: could not allocate RX ring DMA memory, error %d\n",
1827 		    __func__, error);
1828 		goto fail;
1829 	}
1830 
1831 	/* Allocate RX status area (16-byte aligned). */
1832 	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1833 	    sizeof (struct iwn_rx_status), 16);
1834 	if (error != 0) {
1835 		device_printf(sc->sc_dev,
1836 		    "%s: could not allocate RX status DMA memory, error %d\n",
1837 		    __func__, error);
1838 		goto fail;
1839 	}
1840 
1841 	/* Create RX buffer DMA tag. */
1842 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1843 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1844 	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1845 	    &ring->data_dmat);
1846 	if (error != 0) {
1847 		device_printf(sc->sc_dev,
1848 		    "%s: could not create RX buf DMA tag, error %d\n",
1849 		    __func__, error);
1850 		goto fail;
1851 	}
1852 
1853 	/*
1854 	 * Allocate and map RX buffers.
1855 	 */
1856 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1857 		struct iwn_rx_data *data = &ring->data[i];
1858 		bus_addr_t paddr;
1859 
1860 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1861 		if (error != 0) {
1862 			device_printf(sc->sc_dev,
1863 			    "%s: could not create RX buf DMA map, error %d\n",
1864 			    __func__, error);
1865 			goto fail;
1866 		}
1867 
1868 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1869 		    IWN_RBUF_SIZE);
1870 		if (data->m == NULL) {
1871 			device_printf(sc->sc_dev,
1872 			    "%s: could not allocate RX mbuf\n", __func__);
1873 			error = ENOBUFS;
1874 			goto fail;
1875 		}
1876 
1877 		error = bus_dmamap_load(ring->data_dmat, data->map,
1878 		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1879 		    &paddr, BUS_DMA_NOWAIT);
1880 		if (error != 0 && error != EFBIG) {
1881 			device_printf(sc->sc_dev,
1882 			    "%s: can't map mbuf, error %d\n", __func__,
1883 			    error);
1884 			goto fail;
1885 		}
1886 
1887 		/* Set physical address of RX buffer (256-byte aligned). */
1888 		ring->desc[i] = htole32(paddr >> 8);
1889 	}
1890 
1891 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1892 	    BUS_DMASYNC_PREWRITE);
1893 
1894 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1895 
1896 	return 0;
1897 
1898 fail:	iwn_free_rx_ring(sc, ring);
1899 
1900 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1901 
1902 	return error;
1903 }
1904 
1905 static void
1906 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1907 {
1908 	int ntries;
1909 
1910 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1911 
1912 	if (iwn_nic_lock(sc) == 0) {
1913 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1914 		for (ntries = 0; ntries < 1000; ntries++) {
1915 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1916 			    IWN_FH_RX_STATUS_IDLE)
1917 				break;
1918 			DELAY(10);
1919 		}
1920 		iwn_nic_unlock(sc);
1921 	}
1922 	ring->cur = 0;
1923 	sc->last_rx_valid = 0;
1924 }
1925 
1926 static void
1927 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1928 {
1929 	int i;
1930 
1931 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1932 
1933 	iwn_dma_contig_free(&ring->desc_dma);
1934 	iwn_dma_contig_free(&ring->stat_dma);
1935 
1936 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1937 		struct iwn_rx_data *data = &ring->data[i];
1938 
1939 		if (data->m != NULL) {
1940 			bus_dmamap_sync(ring->data_dmat, data->map,
1941 			    BUS_DMASYNC_POSTREAD);
1942 			bus_dmamap_unload(ring->data_dmat, data->map);
1943 			m_freem(data->m);
1944 			data->m = NULL;
1945 		}
1946 		if (data->map != NULL)
1947 			bus_dmamap_destroy(ring->data_dmat, data->map);
1948 	}
1949 	if (ring->data_dmat != NULL) {
1950 		bus_dma_tag_destroy(ring->data_dmat);
1951 		ring->data_dmat = NULL;
1952 	}
1953 }
1954 
1955 static int
1956 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1957 {
1958 	bus_addr_t paddr;
1959 	bus_size_t size;
1960 	int i, error;
1961 
1962 	ring->qid = qid;
1963 	ring->queued = 0;
1964 	ring->cur = 0;
1965 
1966 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1967 
1968 	/* Allocate TX descriptors (256-byte aligned). */
1969 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1970 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1971 	    size, 256);
1972 	if (error != 0) {
1973 		device_printf(sc->sc_dev,
1974 		    "%s: could not allocate TX ring DMA memory, error %d\n",
1975 		    __func__, error);
1976 		goto fail;
1977 	}
1978 
1979 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1980 	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1981 	    size, 4);
1982 	if (error != 0) {
1983 		device_printf(sc->sc_dev,
1984 		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1985 		    __func__, error);
1986 		goto fail;
1987 	}
1988 
1989 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1990 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1991 	    IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1992 	    &ring->data_dmat);
1993 	if (error != 0) {
1994 		device_printf(sc->sc_dev,
1995 		    "%s: could not create TX buf DMA tag, error %d\n",
1996 		    __func__, error);
1997 		goto fail;
1998 	}
1999 
2000 	paddr = ring->cmd_dma.paddr;
2001 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2002 		struct iwn_tx_data *data = &ring->data[i];
2003 
2004 		data->cmd_paddr = paddr;
2005 		data->scratch_paddr = paddr + 12;
2006 		paddr += sizeof (struct iwn_tx_cmd);
2007 
2008 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2009 		if (error != 0) {
2010 			device_printf(sc->sc_dev,
2011 			    "%s: could not create TX buf DMA map, error %d\n",
2012 			    __func__, error);
2013 			goto fail;
2014 		}
2015 	}
2016 
2017 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2018 
2019 	return 0;
2020 
2021 fail:	iwn_free_tx_ring(sc, ring);
2022 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2023 	return error;
2024 }
2025 
2026 static void
2027 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2028 {
2029 	int i;
2030 
2031 	DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
2032 
2033 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2034 		struct iwn_tx_data *data = &ring->data[i];
2035 
2036 		if (data->m != NULL) {
2037 			bus_dmamap_sync(ring->data_dmat, data->map,
2038 			    BUS_DMASYNC_POSTWRITE);
2039 			bus_dmamap_unload(ring->data_dmat, data->map);
2040 			m_freem(data->m);
2041 			data->m = NULL;
2042 		}
2043 		if (data->ni != NULL) {
2044 			ieee80211_free_node(data->ni);
2045 			data->ni = NULL;
2046 		}
2047 	}
2048 	/* Clear TX descriptors. */
2049 	memset(ring->desc, 0, ring->desc_dma.size);
2050 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2051 	    BUS_DMASYNC_PREWRITE);
2052 	sc->qfullmsk &= ~(1 << ring->qid);
2053 	ring->queued = 0;
2054 	ring->cur = 0;
2055 }
2056 
2057 static void
2058 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2059 {
2060 	int i;
2061 
2062 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
2063 
2064 	iwn_dma_contig_free(&ring->desc_dma);
2065 	iwn_dma_contig_free(&ring->cmd_dma);
2066 
2067 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2068 		struct iwn_tx_data *data = &ring->data[i];
2069 
2070 		if (data->m != NULL) {
2071 			bus_dmamap_sync(ring->data_dmat, data->map,
2072 			    BUS_DMASYNC_POSTWRITE);
2073 			bus_dmamap_unload(ring->data_dmat, data->map);
2074 			m_freem(data->m);
2075 		}
2076 		if (data->map != NULL)
2077 			bus_dmamap_destroy(ring->data_dmat, data->map);
2078 	}
2079 	if (ring->data_dmat != NULL) {
2080 		bus_dma_tag_destroy(ring->data_dmat);
2081 		ring->data_dmat = NULL;
2082 	}
2083 }
2084 
2085 static void
2086 iwn5000_ict_reset(struct iwn_softc *sc)
2087 {
2088 	/* Disable interrupts. */
2089 	IWN_WRITE(sc, IWN_INT_MASK, 0);
2090 
2091 	/* Reset ICT table. */
2092 	memset(sc->ict, 0, IWN_ICT_SIZE);
2093 	sc->ict_cur = 0;
2094 
2095 	/* Set physical address of ICT table (4KB aligned). */
2096 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
2097 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2098 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2099 
2100 	/* Enable periodic RX interrupt. */
2101 	sc->int_mask |= IWN_INT_RX_PERIODIC;
2102 	/* Switch to ICT interrupt mode in driver. */
2103 	sc->sc_flags |= IWN_FLAG_USE_ICT;
2104 
2105 	/* Re-enable interrupts. */
2106 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
2107 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2108 }
2109 
2110 static int
2111 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
2112 {
2113 	struct iwn_ops *ops = &sc->ops;
2114 	uint16_t val;
2115 	int error;
2116 
2117 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2118 
2119 	/* Check whether adapter has an EEPROM or an OTPROM. */
2120 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2121 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2122 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2123 	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
2124 	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2125 
2126 	/* Adapter has to be powered on for EEPROM access to work. */
2127 	if ((error = iwn_apm_init(sc)) != 0) {
2128 		device_printf(sc->sc_dev,
2129 		    "%s: could not power ON adapter, error %d\n", __func__,
2130 		    error);
2131 		return error;
2132 	}
2133 
2134 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2135 		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
2136 		return EIO;
2137 	}
2138 	if ((error = iwn_eeprom_lock(sc)) != 0) {
2139 		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
2140 		    __func__, error);
2141 		return error;
2142 	}
2143 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2144 		if ((error = iwn_init_otprom(sc)) != 0) {
2145 			device_printf(sc->sc_dev,
2146 			    "%s: could not initialize OTPROM, error %d\n",
2147 			    __func__, error);
2148 			return error;
2149 		}
2150 	}
2151 
2152 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2153 	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
2154 	/* Check if HT support is bonded out. */
2155 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2156 		sc->sc_flags |= IWN_FLAG_HAS_11N;
2157 
2158 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2159 	sc->rfcfg = le16toh(val);
2160 	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
2161 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
2162 	if (sc->txchainmask == 0)
2163 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2164 	if (sc->rxchainmask == 0)
2165 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2166 
2167 	/* Read MAC address. */
2168 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
2169 
2170 	/* Read adapter-specific information from EEPROM. */
2171 	ops->read_eeprom(sc);
2172 
2173 	iwn_apm_stop(sc);	/* Power OFF adapter. */
2174 
2175 	iwn_eeprom_unlock(sc);
2176 
2177 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2178 
2179 	return 0;
2180 }
2181 
2182 static void
2183 iwn4965_read_eeprom(struct iwn_softc *sc)
2184 {
2185 	uint32_t addr;
2186 	uint16_t val;
2187 	int i;
2188 
2189 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2190 
2191 	/* Read regulatory domain (4 ASCII characters). */
2192 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2193 
2194 	/* Read the list of authorized channels (20MHz ones only). */
2195 	for (i = 0; i < IWN_NBANDS - 1; i++) {
2196 		addr = iwn4965_regulatory_bands[i];
2197 		iwn_read_eeprom_channels(sc, i, addr);
2198 	}
2199 
2200 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2201 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2202 	sc->maxpwr2GHz = val & 0xff;
2203 	sc->maxpwr5GHz = val >> 8;
2204 	/* Check that EEPROM values are within valid range. */
2205 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2206 		sc->maxpwr5GHz = 38;
2207 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2208 		sc->maxpwr2GHz = 38;
2209 	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
2210 	    sc->maxpwr2GHz, sc->maxpwr5GHz);
2211 
2212 	/* Read samples for each TX power group. */
2213 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2214 	    sizeof sc->bands);
2215 
2216 	/* Read voltage at which samples were taken. */
2217 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2218 	sc->eeprom_voltage = (int16_t)le16toh(val);
2219 	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
2220 	    sc->eeprom_voltage);
2221 
2222 #ifdef IWN_DEBUG
2223 	/* Print samples. */
2224 	if (sc->sc_debug & IWN_DEBUG_ANY) {
2225 		for (i = 0; i < IWN_NBANDS - 1; i++)
2226 			iwn4965_print_power_group(sc, i);
2227 	}
2228 #endif
2229 
2230 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2231 }
2232 
2233 #ifdef IWN_DEBUG
2234 static void
2235 iwn4965_print_power_group(struct iwn_softc *sc, int i)
2236 {
2237 	struct iwn4965_eeprom_band *band = &sc->bands[i];
2238 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
2239 	int j, c;
2240 
2241 	printf("===band %d===\n", i);
2242 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
2243 	printf("chan1 num=%d\n", chans[0].num);
2244 	for (c = 0; c < 2; c++) {
2245 		for (j = 0; j < IWN_NSAMPLES; j++) {
2246 			printf("chain %d, sample %d: temp=%d gain=%d "
2247 			    "power=%d pa_det=%d\n", c, j,
2248 			    chans[0].samples[c][j].temp,
2249 			    chans[0].samples[c][j].gain,
2250 			    chans[0].samples[c][j].power,
2251 			    chans[0].samples[c][j].pa_det);
2252 		}
2253 	}
2254 	printf("chan2 num=%d\n", chans[1].num);
2255 	for (c = 0; c < 2; c++) {
2256 		for (j = 0; j < IWN_NSAMPLES; j++) {
2257 			printf("chain %d, sample %d: temp=%d gain=%d "
2258 			    "power=%d pa_det=%d\n", c, j,
2259 			    chans[1].samples[c][j].temp,
2260 			    chans[1].samples[c][j].gain,
2261 			    chans[1].samples[c][j].power,
2262 			    chans[1].samples[c][j].pa_det);
2263 		}
2264 	}
2265 }
2266 #endif
2267 
2268 static void
2269 iwn5000_read_eeprom(struct iwn_softc *sc)
2270 {
2271 	struct iwn5000_eeprom_calib_hdr hdr;
2272 	int32_t volt;
2273 	uint32_t base, addr;
2274 	uint16_t val;
2275 	int i;
2276 
2277 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2278 
2279 	/* Read regulatory domain (4 ASCII characters). */
2280 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2281 	base = le16toh(val);
2282 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2283 	    sc->eeprom_domain, 4);
2284 
2285 	/* Read the list of authorized channels (20MHz ones only). */
2286 	for (i = 0; i < IWN_NBANDS - 1; i++) {
2287 		addr =  base + sc->base_params->regulatory_bands[i];
2288 		iwn_read_eeprom_channels(sc, i, addr);
2289 	}
2290 
2291 	/* Read enhanced TX power information for 6000 Series. */
2292 	if (sc->base_params->enhanced_TX_power)
2293 		iwn_read_eeprom_enhinfo(sc);
2294 
2295 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2296 	base = le16toh(val);
2297 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2298 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2299 	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
2300 	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
2301 	sc->calib_ver = hdr.version;
2302 
2303 	if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
2304 		sc->eeprom_voltage = le16toh(hdr.volt);
2305 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2306 		sc->eeprom_temp_high=le16toh(val);
2307 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2308 		sc->eeprom_temp = le16toh(val);
2309 	}
2310 
2311 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2312 		/* Compute temperature offset. */
2313 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2314 		sc->eeprom_temp = le16toh(val);
2315 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2316 		volt = le16toh(val);
2317 		sc->temp_off = sc->eeprom_temp - (volt / -5);
2318 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
2319 		    sc->eeprom_temp, volt, sc->temp_off);
2320 	} else {
2321 		/* Read crystal calibration. */
2322 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2323 		    &sc->eeprom_crystal, sizeof (uint32_t));
2324 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
2325 		    le32toh(sc->eeprom_crystal));
2326 	}
2327 
2328 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2329 
2330 }
2331 
2332 /*
2333  * Translate EEPROM flags to net80211.
2334  */
2335 static uint32_t
2336 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
2337 {
2338 	uint32_t nflags;
2339 
2340 	nflags = 0;
2341 	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
2342 		nflags |= IEEE80211_CHAN_PASSIVE;
2343 	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
2344 		nflags |= IEEE80211_CHAN_NOADHOC;
2345 	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
2346 		nflags |= IEEE80211_CHAN_DFS;
2347 		/* XXX apparently IBSS may still be marked */
2348 		nflags |= IEEE80211_CHAN_NOADHOC;
2349 	}
2350 
2351 	return nflags;
2352 }
2353 
2354 static void
2355 iwn_read_eeprom_band(struct iwn_softc *sc, int n)
2356 {
2357 	struct ifnet *ifp = sc->sc_ifp;
2358 	struct ieee80211com *ic = ifp->if_l2com;
2359 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2360 	const struct iwn_chan_band *band = &iwn_bands[n];
2361 	struct ieee80211_channel *c;
2362 	uint8_t chan;
2363 	int i, nflags;
2364 
2365 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2366 
2367 	for (i = 0; i < band->nchan; i++) {
2368 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2369 			DPRINTF(sc, IWN_DEBUG_RESET,
2370 			    "skip chan %d flags 0x%x maxpwr %d\n",
2371 			    band->chan[i], channels[i].flags,
2372 			    channels[i].maxpwr);
2373 			continue;
2374 		}
2375 		chan = band->chan[i];
2376 		nflags = iwn_eeprom_channel_flags(&channels[i]);
2377 
2378 		c = &ic->ic_channels[ic->ic_nchans++];
2379 		c->ic_ieee = chan;
2380 		c->ic_maxregpower = channels[i].maxpwr;
2381 		c->ic_maxpower = 2*c->ic_maxregpower;
2382 
2383 		if (n == 0) {	/* 2GHz band */
2384 			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
2385 			/* G =>'s B is supported */
2386 			c->ic_flags = IEEE80211_CHAN_B | nflags;
2387 			c = &ic->ic_channels[ic->ic_nchans++];
2388 			c[0] = c[-1];
2389 			c->ic_flags = IEEE80211_CHAN_G | nflags;
2390 		} else {	/* 5GHz band */
2391 			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
2392 			c->ic_flags = IEEE80211_CHAN_A | nflags;
2393 		}
2394 
2395 		/* Save maximum allowed TX power for this channel. */
2396 		sc->maxpwr[chan] = channels[i].maxpwr;
2397 
2398 		DPRINTF(sc, IWN_DEBUG_RESET,
2399 		    "add chan %d flags 0x%x maxpwr %d\n", chan,
2400 		    channels[i].flags, channels[i].maxpwr);
2401 
2402 		if (sc->sc_flags & IWN_FLAG_HAS_11N) {
2403 			/* add HT20, HT40 added separately */
2404 			c = &ic->ic_channels[ic->ic_nchans++];
2405 			c[0] = c[-1];
2406 			c->ic_flags |= IEEE80211_CHAN_HT20;
2407 		}
2408 	}
2409 
2410 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2411 
2412 }
2413 
2414 static void
2415 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
2416 {
2417 	struct ifnet *ifp = sc->sc_ifp;
2418 	struct ieee80211com *ic = ifp->if_l2com;
2419 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2420 	const struct iwn_chan_band *band = &iwn_bands[n];
2421 	struct ieee80211_channel *c, *cent, *extc;
2422 	uint8_t chan;
2423 	int i, nflags;
2424 
2425 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
2426 
2427 	if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
2428 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2429 		return;
2430 	}
2431 
2432 	for (i = 0; i < band->nchan; i++) {
2433 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2434 			DPRINTF(sc, IWN_DEBUG_RESET,
2435 			    "skip chan %d flags 0x%x maxpwr %d\n",
2436 			    band->chan[i], channels[i].flags,
2437 			    channels[i].maxpwr);
2438 			continue;
2439 		}
2440 		chan = band->chan[i];
2441 		nflags = iwn_eeprom_channel_flags(&channels[i]);
2442 
2443 		/*
2444 		 * Each entry defines an HT40 channel pair; find the
2445 		 * center channel, then the extension channel above.
2446 		 */
2447 		cent = ieee80211_find_channel_byieee(ic, chan,
2448 		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2449 		if (cent == NULL) {	/* XXX shouldn't happen */
2450 			device_printf(sc->sc_dev,
2451 			    "%s: no entry for channel %d\n", __func__, chan);
2452 			continue;
2453 		}
2454 		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
2455 		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2456 		if (extc == NULL) {
2457 			DPRINTF(sc, IWN_DEBUG_RESET,
2458 			    "%s: skip chan %d, extension channel not found\n",
2459 			    __func__, chan);
2460 			continue;
2461 		}
2462 
2463 		DPRINTF(sc, IWN_DEBUG_RESET,
2464 		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
2465 		    chan, channels[i].flags, channels[i].maxpwr);
2466 
2467 		c = &ic->ic_channels[ic->ic_nchans++];
2468 		c[0] = cent[0];
2469 		c->ic_extieee = extc->ic_ieee;
2470 		c->ic_flags &= ~IEEE80211_CHAN_HT;
2471 		c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
2472 		c = &ic->ic_channels[ic->ic_nchans++];
2473 		c[0] = extc[0];
2474 		c->ic_extieee = cent->ic_ieee;
2475 		c->ic_flags &= ~IEEE80211_CHAN_HT;
2476 		c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
2477 	}
2478 
2479 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2480 
2481 }
2482 
2483 static void
2484 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2485 {
2486 	struct ifnet *ifp = sc->sc_ifp;
2487 	struct ieee80211com *ic = ifp->if_l2com;
2488 
2489 	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2490 	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2491 
2492 	if (n < 5)
2493 		iwn_read_eeprom_band(sc, n);
2494 	else
2495 		iwn_read_eeprom_ht40(sc, n);
2496 	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2497 }
2498 
2499 static struct iwn_eeprom_chan *
2500 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2501 {
2502 	int band, chan, i, j;
2503 
2504 	if (IEEE80211_IS_CHAN_HT40(c)) {
2505 		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2506 		if (IEEE80211_IS_CHAN_HT40D(c))
2507 			chan = c->ic_extieee;
2508 		else
2509 			chan = c->ic_ieee;
2510 		for (i = 0; i < iwn_bands[band].nchan; i++) {
2511 			if (iwn_bands[band].chan[i] == chan)
2512 				return &sc->eeprom_channels[band][i];
2513 		}
2514 	} else {
2515 		for (j = 0; j < 5; j++) {
2516 			for (i = 0; i < iwn_bands[j].nchan; i++) {
2517 				if (iwn_bands[j].chan[i] == c->ic_ieee)
2518 					return &sc->eeprom_channels[j][i];
2519 			}
2520 		}
2521 	}
2522 	return NULL;
2523 }
2524 
2525 /*
2526  * Enforce flags read from EEPROM.
2527  */
2528 static int
2529 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2530     int nchan, struct ieee80211_channel chans[])
2531 {
2532 	struct iwn_softc *sc = ic->ic_softc;
2533 	int i;
2534 
2535 	for (i = 0; i < nchan; i++) {
2536 		struct ieee80211_channel *c = &chans[i];
2537 		struct iwn_eeprom_chan *channel;
2538 
2539 		channel = iwn_find_eeprom_channel(sc, c);
2540 		if (channel == NULL) {
2541 			ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
2542 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2543 			return EINVAL;
2544 		}
2545 		c->ic_flags |= iwn_eeprom_channel_flags(channel);
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static void
2552 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2553 {
2554 	struct iwn_eeprom_enhinfo enhinfo[35];
2555 	struct ifnet *ifp = sc->sc_ifp;
2556 	struct ieee80211com *ic = ifp->if_l2com;
2557 	struct ieee80211_channel *c;
2558 	uint16_t val, base;
2559 	int8_t maxpwr;
2560 	uint8_t flags;
2561 	int i, j;
2562 
2563 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2564 
2565 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2566 	base = le16toh(val);
2567 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2568 	    enhinfo, sizeof enhinfo);
2569 
2570 	for (i = 0; i < nitems(enhinfo); i++) {
2571 		flags = enhinfo[i].flags;
2572 		if (!(flags & IWN_ENHINFO_VALID))
2573 			continue;	/* Skip invalid entries. */
2574 
2575 		maxpwr = 0;
2576 		if (sc->txchainmask & IWN_ANT_A)
2577 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2578 		if (sc->txchainmask & IWN_ANT_B)
2579 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2580 		if (sc->txchainmask & IWN_ANT_C)
2581 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2582 		if (sc->ntxchains == 2)
2583 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2584 		else if (sc->ntxchains == 3)
2585 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2586 
2587 		for (j = 0; j < ic->ic_nchans; j++) {
2588 			c = &ic->ic_channels[j];
2589 			if ((flags & IWN_ENHINFO_5GHZ)) {
2590 				if (!IEEE80211_IS_CHAN_A(c))
2591 					continue;
2592 			} else if ((flags & IWN_ENHINFO_OFDM)) {
2593 				if (!IEEE80211_IS_CHAN_G(c))
2594 					continue;
2595 			} else if (!IEEE80211_IS_CHAN_B(c))
2596 				continue;
2597 			if ((flags & IWN_ENHINFO_HT40)) {
2598 				if (!IEEE80211_IS_CHAN_HT40(c))
2599 					continue;
2600 			} else {
2601 				if (IEEE80211_IS_CHAN_HT40(c))
2602 					continue;
2603 			}
2604 			if (enhinfo[i].chan != 0 &&
2605 			    enhinfo[i].chan != c->ic_ieee)
2606 				continue;
2607 
2608 			DPRINTF(sc, IWN_DEBUG_RESET,
2609 			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2610 			    c->ic_flags, maxpwr / 2);
2611 			c->ic_maxregpower = maxpwr / 2;
2612 			c->ic_maxpower = maxpwr;
2613 		}
2614 	}
2615 
2616 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2617 
2618 }
2619 
2620 static struct ieee80211_node *
2621 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2622 {
2623 	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2624 }
2625 
2626 static __inline int
2627 rate2plcp(int rate)
2628 {
2629 	switch (rate & 0xff) {
2630 	case 12:	return 0xd;
2631 	case 18:	return 0xf;
2632 	case 24:	return 0x5;
2633 	case 36:	return 0x7;
2634 	case 48:	return 0x9;
2635 	case 72:	return 0xb;
2636 	case 96:	return 0x1;
2637 	case 108:	return 0x3;
2638 	case 2:		return 10;
2639 	case 4:		return 20;
2640 	case 11:	return 55;
2641 	case 22:	return 110;
2642 	}
2643 	return 0;
2644 }
2645 
2646 static int
2647 iwn_get_1stream_tx_antmask(struct iwn_softc *sc)
2648 {
2649 
2650 	return IWN_LSB(sc->txchainmask);
2651 }
2652 
2653 static int
2654 iwn_get_2stream_tx_antmask(struct iwn_softc *sc)
2655 {
2656 	int tx;
2657 
2658 	/*
2659 	 * The '2 stream' setup is a bit .. odd.
2660 	 *
2661 	 * For NICs that support only 1 antenna, default to IWN_ANT_AB or
2662 	 * the firmware panics (eg Intel 5100.)
2663 	 *
2664 	 * For NICs that support two antennas, we use ANT_AB.
2665 	 *
2666 	 * For NICs that support three antennas, we use the two that
2667 	 * wasn't the default one.
2668 	 *
2669 	 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict
2670 	 * this to only one antenna.
2671 	 */
2672 
2673 	/* Default - transmit on the other antennas */
2674 	tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask));
2675 
2676 	/* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */
2677 	if (tx == 0)
2678 		tx = IWN_ANT_AB;
2679 
2680 	/*
2681 	 * If the NIC is a two-stream TX NIC, configure the TX mask to
2682 	 * the default chainmask
2683 	 */
2684 	else if (sc->ntxchains == 2)
2685 		tx = sc->txchainmask;
2686 
2687 	return (tx);
2688 }
2689 
2690 
2691 
2692 /*
2693  * Calculate the required PLCP value from the given rate,
2694  * to the given node.
2695  *
2696  * This will take the node configuration (eg 11n, rate table
2697  * setup, etc) into consideration.
2698  */
2699 static uint32_t
2700 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2701     uint8_t rate)
2702 {
2703 #define	RV(v)	((v) & IEEE80211_RATE_VAL)
2704 	struct ieee80211com *ic = ni->ni_ic;
2705 	uint32_t plcp = 0;
2706 	int ridx;
2707 
2708 	/*
2709 	 * If it's an MCS rate, let's set the plcp correctly
2710 	 * and set the relevant flags based on the node config.
2711 	 */
2712 	if (rate & IEEE80211_RATE_MCS) {
2713 		/*
2714 		 * Set the initial PLCP value to be between 0->31 for
2715 		 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2716 		 * flag.
2717 		 */
2718 		plcp = RV(rate) | IWN_RFLAG_MCS;
2719 
2720 		/*
2721 		 * XXX the following should only occur if both
2722 		 * the local configuration _and_ the remote node
2723 		 * advertise these capabilities.  Thus this code
2724 		 * may need fixing!
2725 		 */
2726 
2727 		/*
2728 		 * Set the channel width and guard interval.
2729 		 */
2730 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2731 			plcp |= IWN_RFLAG_HT40;
2732 			if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2733 				plcp |= IWN_RFLAG_SGI;
2734 		} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2735 			plcp |= IWN_RFLAG_SGI;
2736 		}
2737 
2738 		/*
2739 		 * Ensure the selected rate matches the link quality
2740 		 * table entries being used.
2741 		 */
2742 		if (rate > 0x8f)
2743 			plcp |= IWN_RFLAG_ANT(sc->txchainmask);
2744 		else if (rate > 0x87)
2745 			plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc));
2746 		else
2747 			plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2748 	} else {
2749 		/*
2750 		 * Set the initial PLCP - fine for both
2751 		 * OFDM and CCK rates.
2752 		 */
2753 		plcp = rate2plcp(rate);
2754 
2755 		/* Set CCK flag if it's CCK */
2756 
2757 		/* XXX It would be nice to have a method
2758 		 * to map the ridx -> phy table entry
2759 		 * so we could just query that, rather than
2760 		 * this hack to check against IWN_RIDX_OFDM6.
2761 		 */
2762 		ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2763 		    rate & IEEE80211_RATE_VAL);
2764 		if (ridx < IWN_RIDX_OFDM6 &&
2765 		    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2766 			plcp |= IWN_RFLAG_CCK;
2767 
2768 		/* Set antenna configuration */
2769 		/* XXX TODO: is this the right antenna to use for legacy? */
2770 		plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2771 	}
2772 
2773 	DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2774 	    __func__,
2775 	    rate,
2776 	    plcp);
2777 
2778 	return (htole32(plcp));
2779 #undef	RV
2780 }
2781 
2782 static void
2783 iwn_newassoc(struct ieee80211_node *ni, int isnew)
2784 {
2785 	/* Doesn't do anything at the moment */
2786 }
2787 
2788 static int
2789 iwn_media_change(struct ifnet *ifp)
2790 {
2791 	int error;
2792 
2793 	error = ieee80211_media_change(ifp);
2794 	/* NB: only the fixed rate can change and that doesn't need a reset */
2795 	return (error == ENETRESET ? 0 : error);
2796 }
2797 
2798 static int
2799 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2800 {
2801 	struct iwn_vap *ivp = IWN_VAP(vap);
2802 	struct ieee80211com *ic = vap->iv_ic;
2803 	struct iwn_softc *sc = ic->ic_softc;
2804 	int error = 0;
2805 
2806 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2807 
2808 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2809 	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2810 
2811 	IEEE80211_UNLOCK(ic);
2812 	IWN_LOCK(sc);
2813 	callout_stop(&sc->calib_to);
2814 
2815 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2816 
2817 	switch (nstate) {
2818 	case IEEE80211_S_ASSOC:
2819 		if (vap->iv_state != IEEE80211_S_RUN)
2820 			break;
2821 		/* FALLTHROUGH */
2822 	case IEEE80211_S_AUTH:
2823 		if (vap->iv_state == IEEE80211_S_AUTH)
2824 			break;
2825 
2826 		/*
2827 		 * !AUTH -> AUTH transition requires state reset to handle
2828 		 * reassociations correctly.
2829 		 */
2830 		sc->rxon->associd = 0;
2831 		sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2832 		sc->calib.state = IWN_CALIB_STATE_INIT;
2833 
2834 		if ((error = iwn_auth(sc, vap)) != 0) {
2835 			device_printf(sc->sc_dev,
2836 			    "%s: could not move to auth state\n", __func__);
2837 		}
2838 		break;
2839 
2840 	case IEEE80211_S_RUN:
2841 		/*
2842 		 * RUN -> RUN transition; Just restart the timers.
2843 		 */
2844 		if (vap->iv_state == IEEE80211_S_RUN) {
2845 			sc->calib_cnt = 0;
2846 			break;
2847 		}
2848 
2849 		/*
2850 		 * !RUN -> RUN requires setting the association id
2851 		 * which is done with a firmware cmd.  We also defer
2852 		 * starting the timers until that work is done.
2853 		 */
2854 		if ((error = iwn_run(sc, vap)) != 0) {
2855 			device_printf(sc->sc_dev,
2856 			    "%s: could not move to run state\n", __func__);
2857 		}
2858 		break;
2859 
2860 	case IEEE80211_S_INIT:
2861 		sc->calib.state = IWN_CALIB_STATE_INIT;
2862 		break;
2863 
2864 	default:
2865 		break;
2866 	}
2867 	IWN_UNLOCK(sc);
2868 	IEEE80211_LOCK(ic);
2869 	if (error != 0){
2870 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2871 		return error;
2872 	}
2873 
2874 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2875 
2876 	return ivp->iv_newstate(vap, nstate, arg);
2877 }
2878 
2879 static void
2880 iwn_calib_timeout(void *arg)
2881 {
2882 	struct iwn_softc *sc = arg;
2883 
2884 	IWN_LOCK_ASSERT(sc);
2885 
2886 	/* Force automatic TX power calibration every 60 secs. */
2887 	if (++sc->calib_cnt >= 120) {
2888 		uint32_t flags = 0;
2889 
2890 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2891 		    "sending request for statistics");
2892 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2893 		    sizeof flags, 1);
2894 		sc->calib_cnt = 0;
2895 	}
2896 	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2897 	    sc);
2898 }
2899 
2900 /*
2901  * Process an RX_PHY firmware notification.  This is usually immediately
2902  * followed by an MPDU_RX_DONE notification.
2903  */
2904 static void
2905 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2906     struct iwn_rx_data *data)
2907 {
2908 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2909 
2910 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2911 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2912 
2913 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2914 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2915 	sc->last_rx_valid = 1;
2916 }
2917 
2918 /*
2919  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2920  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2921  */
2922 static void
2923 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2924     struct iwn_rx_data *data)
2925 {
2926 	struct iwn_ops *ops = &sc->ops;
2927 	struct ifnet *ifp = sc->sc_ifp;
2928 	struct ieee80211com *ic = ifp->if_l2com;
2929 	struct iwn_rx_ring *ring = &sc->rxq;
2930 	struct ieee80211_frame *wh;
2931 	struct ieee80211_node *ni;
2932 	struct mbuf *m, *m1;
2933 	struct iwn_rx_stat *stat;
2934 	caddr_t head;
2935 	bus_addr_t paddr;
2936 	uint32_t flags;
2937 	int error, len, rssi, nf;
2938 
2939 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2940 
2941 	if (desc->type == IWN_MPDU_RX_DONE) {
2942 		/* Check for prior RX_PHY notification. */
2943 		if (!sc->last_rx_valid) {
2944 			DPRINTF(sc, IWN_DEBUG_ANY,
2945 			    "%s: missing RX_PHY\n", __func__);
2946 			return;
2947 		}
2948 		stat = &sc->last_rx_stat;
2949 	} else
2950 		stat = (struct iwn_rx_stat *)(desc + 1);
2951 
2952 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2953 
2954 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2955 		device_printf(sc->sc_dev,
2956 		    "%s: invalid RX statistic header, len %d\n", __func__,
2957 		    stat->cfg_phy_len);
2958 		return;
2959 	}
2960 	if (desc->type == IWN_MPDU_RX_DONE) {
2961 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2962 		head = (caddr_t)(mpdu + 1);
2963 		len = le16toh(mpdu->len);
2964 	} else {
2965 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2966 		len = le16toh(stat->len);
2967 	}
2968 
2969 	flags = le32toh(*(uint32_t *)(head + len));
2970 
2971 	/* Discard frames with a bad FCS early. */
2972 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2973 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2974 		    __func__, flags);
2975 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2976 		return;
2977 	}
2978 	/* Discard frames that are too short. */
2979 	if (len < sizeof (struct ieee80211_frame_ack)) {
2980 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2981 		    __func__, len);
2982 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2983 		return;
2984 	}
2985 
2986 	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2987 	if (m1 == NULL) {
2988 		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2989 		    __func__);
2990 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2991 		return;
2992 	}
2993 	bus_dmamap_unload(ring->data_dmat, data->map);
2994 
2995 	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2996 	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2997 	if (error != 0 && error != EFBIG) {
2998 		device_printf(sc->sc_dev,
2999 		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
3000 		m_freem(m1);
3001 
3002 		/* Try to reload the old mbuf. */
3003 		error = bus_dmamap_load(ring->data_dmat, data->map,
3004 		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
3005 		    &paddr, BUS_DMA_NOWAIT);
3006 		if (error != 0 && error != EFBIG) {
3007 			panic("%s: could not load old RX mbuf", __func__);
3008 		}
3009 		/* Physical address may have changed. */
3010 		ring->desc[ring->cur] = htole32(paddr >> 8);
3011 		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
3012 		    BUS_DMASYNC_PREWRITE);
3013 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3014 		return;
3015 	}
3016 
3017 	m = data->m;
3018 	data->m = m1;
3019 	/* Update RX descriptor. */
3020 	ring->desc[ring->cur] = htole32(paddr >> 8);
3021 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3022 	    BUS_DMASYNC_PREWRITE);
3023 
3024 	/* Finalize mbuf. */
3025 	m->m_pkthdr.rcvif = ifp;
3026 	m->m_data = head;
3027 	m->m_pkthdr.len = m->m_len = len;
3028 
3029 	/* Grab a reference to the source node. */
3030 	wh = mtod(m, struct ieee80211_frame *);
3031 	if (len >= sizeof(struct ieee80211_frame_min))
3032 		ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3033 	else
3034 		ni = NULL;
3035 	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
3036 	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
3037 
3038 	rssi = ops->get_rssi(sc, stat);
3039 
3040 	if (ieee80211_radiotap_active(ic)) {
3041 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
3042 
3043 		tap->wr_flags = 0;
3044 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
3045 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3046 		tap->wr_dbm_antsignal = (int8_t)rssi;
3047 		tap->wr_dbm_antnoise = (int8_t)nf;
3048 		tap->wr_tsft = stat->tstamp;
3049 		switch (stat->rate) {
3050 		/* CCK rates. */
3051 		case  10: tap->wr_rate =   2; break;
3052 		case  20: tap->wr_rate =   4; break;
3053 		case  55: tap->wr_rate =  11; break;
3054 		case 110: tap->wr_rate =  22; break;
3055 		/* OFDM rates. */
3056 		case 0xd: tap->wr_rate =  12; break;
3057 		case 0xf: tap->wr_rate =  18; break;
3058 		case 0x5: tap->wr_rate =  24; break;
3059 		case 0x7: tap->wr_rate =  36; break;
3060 		case 0x9: tap->wr_rate =  48; break;
3061 		case 0xb: tap->wr_rate =  72; break;
3062 		case 0x1: tap->wr_rate =  96; break;
3063 		case 0x3: tap->wr_rate = 108; break;
3064 		/* Unknown rate: should not happen. */
3065 		default:  tap->wr_rate =   0;
3066 		}
3067 	}
3068 
3069 	IWN_UNLOCK(sc);
3070 
3071 	/* Send the frame to the 802.11 layer. */
3072 	if (ni != NULL) {
3073 		if (ni->ni_flags & IEEE80211_NODE_HT)
3074 			m->m_flags |= M_AMPDU;
3075 		(void)ieee80211_input(ni, m, rssi - nf, nf);
3076 		/* Node is no longer needed. */
3077 		ieee80211_free_node(ni);
3078 	} else
3079 		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
3080 
3081 	IWN_LOCK(sc);
3082 
3083 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3084 
3085 }
3086 
3087 /* Process an incoming Compressed BlockAck. */
3088 static void
3089 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3090     struct iwn_rx_data *data)
3091 {
3092 	struct iwn_ops *ops = &sc->ops;
3093 	struct ifnet *ifp = sc->sc_ifp;
3094 	struct iwn_node *wn;
3095 	struct ieee80211_node *ni;
3096 	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
3097 	struct iwn_tx_ring *txq;
3098 	struct iwn_tx_data *txdata;
3099 	struct ieee80211_tx_ampdu *tap;
3100 	struct mbuf *m;
3101 	uint64_t bitmap;
3102 	uint16_t ssn;
3103 	uint8_t tid;
3104 	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
3105 	int tx_ok = 0, tx_err = 0;
3106 
3107 	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__);
3108 
3109 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3110 
3111 	qid = le16toh(ba->qid);
3112 	txq = &sc->txq[ba->qid];
3113 	tap = sc->qid2tap[ba->qid];
3114 	tid = tap->txa_tid;
3115 	wn = (void *)tap->txa_ni;
3116 
3117 	res = NULL;
3118 	ssn = 0;
3119 	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3120 		res = tap->txa_private;
3121 		ssn = tap->txa_start & 0xfff;
3122 	}
3123 
3124 	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
3125 		txdata = &txq->data[txq->read];
3126 
3127 		/* Unmap and free mbuf. */
3128 		bus_dmamap_sync(txq->data_dmat, txdata->map,
3129 		    BUS_DMASYNC_POSTWRITE);
3130 		bus_dmamap_unload(txq->data_dmat, txdata->map);
3131 		m = txdata->m, txdata->m = NULL;
3132 		ni = txdata->ni, txdata->ni = NULL;
3133 
3134 		KASSERT(ni != NULL, ("no node"));
3135 		KASSERT(m != NULL, ("no mbuf"));
3136 
3137 		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3138 		ieee80211_tx_complete(ni, m, 1);
3139 
3140 		txq->queued--;
3141 		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
3142 	}
3143 
3144 	if (txq->queued == 0 && res != NULL) {
3145 		iwn_nic_lock(sc);
3146 		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3147 		iwn_nic_unlock(sc);
3148 		sc->qid2tap[qid] = NULL;
3149 		free(res, M_DEVBUF);
3150 		return;
3151 	}
3152 
3153 	if (wn->agg[tid].bitmap == 0)
3154 		return;
3155 
3156 	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
3157 	if (shift < 0)
3158 		shift += 0x100;
3159 
3160 	if (wn->agg[tid].nframes > (64 - shift))
3161 		return;
3162 
3163 	/*
3164 	 * Walk the bitmap and calculate how many successful and failed
3165 	 * attempts are made.
3166 	 *
3167 	 * Yes, the rate control code doesn't know these are A-MPDU
3168 	 * subframes and that it's okay to fail some of these.
3169 	 */
3170 	ni = tap->txa_ni;
3171 	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
3172 	for (i = 0; bitmap; i++) {
3173 		if ((bitmap & 1) == 0) {
3174 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3175 			tx_err ++;
3176 			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3177 			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3178 		} else {
3179 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3180 			tx_ok ++;
3181 			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3182 			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3183 		}
3184 		bitmap >>= 1;
3185 	}
3186 
3187 	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3188 	    "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err);
3189 
3190 }
3191 
3192 /*
3193  * Process a CALIBRATION_RESULT notification sent by the initialization
3194  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
3195  */
3196 static void
3197 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3198     struct iwn_rx_data *data)
3199 {
3200 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
3201 	int len, idx = -1;
3202 
3203 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3204 
3205 	/* Runtime firmware should not send such a notification. */
3206 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
3207 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
3208 	    __func__);
3209 		return;
3210 	}
3211 	len = (le32toh(desc->len) & 0x3fff) - 4;
3212 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3213 
3214 	switch (calib->code) {
3215 	case IWN5000_PHY_CALIB_DC:
3216 		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC)
3217 			idx = 0;
3218 		break;
3219 	case IWN5000_PHY_CALIB_LO:
3220 		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO)
3221 			idx = 1;
3222 		break;
3223 	case IWN5000_PHY_CALIB_TX_IQ:
3224 		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ)
3225 			idx = 2;
3226 		break;
3227 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
3228 		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC)
3229 			idx = 3;
3230 		break;
3231 	case IWN5000_PHY_CALIB_BASE_BAND:
3232 		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND)
3233 			idx = 4;
3234 		break;
3235 	}
3236 	if (idx == -1)	/* Ignore other results. */
3237 		return;
3238 
3239 	/* Save calibration result. */
3240 	if (sc->calibcmd[idx].buf != NULL)
3241 		free(sc->calibcmd[idx].buf, M_DEVBUF);
3242 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
3243 	if (sc->calibcmd[idx].buf == NULL) {
3244 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3245 		    "not enough memory for calibration result %d\n",
3246 		    calib->code);
3247 		return;
3248 	}
3249 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3250 	    "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len);
3251 	sc->calibcmd[idx].len = len;
3252 	memcpy(sc->calibcmd[idx].buf, calib, len);
3253 }
3254 
3255 static void
3256 iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib,
3257     struct iwn_stats *stats, int len)
3258 {
3259 	struct iwn_stats_bt *stats_bt;
3260 	struct iwn_stats *lstats;
3261 
3262 	/*
3263 	 * First - check whether the length is the bluetooth or normal.
3264 	 *
3265 	 * If it's normal - just copy it and bump out.
3266 	 * Otherwise we have to convert things.
3267 	 */
3268 
3269 	if (len == sizeof(struct iwn_stats) + 4) {
3270 		memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3271 		sc->last_stat_valid = 1;
3272 		return;
3273 	}
3274 
3275 	/*
3276 	 * If it's not the bluetooth size - log, then just copy.
3277 	 */
3278 	if (len != sizeof(struct iwn_stats_bt) + 4) {
3279 		DPRINTF(sc, IWN_DEBUG_STATS,
3280 		    "%s: size of rx statistics (%d) not an expected size!\n",
3281 		    __func__,
3282 		    len);
3283 		memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3284 		sc->last_stat_valid = 1;
3285 		return;
3286 	}
3287 
3288 	/*
3289 	 * Ok. Time to copy.
3290 	 */
3291 	stats_bt = (struct iwn_stats_bt *) stats;
3292 	lstats = &sc->last_stat;
3293 
3294 	/* flags */
3295 	lstats->flags = stats_bt->flags;
3296 	/* rx_bt */
3297 	memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm,
3298 	    sizeof(struct iwn_rx_phy_stats));
3299 	memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck,
3300 	    sizeof(struct iwn_rx_phy_stats));
3301 	memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common,
3302 	    sizeof(struct iwn_rx_general_stats));
3303 	memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht,
3304 	    sizeof(struct iwn_rx_ht_phy_stats));
3305 	/* tx */
3306 	memcpy(&lstats->tx, &stats_bt->tx,
3307 	    sizeof(struct iwn_tx_stats));
3308 	/* general */
3309 	memcpy(&lstats->general, &stats_bt->general,
3310 	    sizeof(struct iwn_general_stats));
3311 
3312 	/* XXX TODO: Squirrel away the extra bluetooth stats somewhere */
3313 	sc->last_stat_valid = 1;
3314 }
3315 
3316 /*
3317  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
3318  * The latter is sent by the firmware after each received beacon.
3319  */
3320 static void
3321 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3322     struct iwn_rx_data *data)
3323 {
3324 	struct iwn_ops *ops = &sc->ops;
3325 	struct ifnet *ifp = sc->sc_ifp;
3326 	struct ieee80211com *ic = ifp->if_l2com;
3327 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3328 	struct iwn_calib_state *calib = &sc->calib;
3329 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
3330 	struct iwn_stats *lstats;
3331 	int temp;
3332 
3333 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3334 
3335 	/* Ignore statistics received during a scan. */
3336 	if (vap->iv_state != IEEE80211_S_RUN ||
3337 	    (ic->ic_flags & IEEE80211_F_SCAN)){
3338 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
3339 	    __func__);
3340 		return;
3341 	}
3342 
3343 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3344 
3345 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS,
3346 	    "%s: received statistics, cmd %d, len %d\n",
3347 	    __func__, desc->type, le16toh(desc->len));
3348 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
3349 
3350 	/*
3351 	 * Collect/track general statistics for reporting.
3352 	 *
3353 	 * This takes care of ensuring that the bluetooth sized message
3354 	 * will be correctly converted to the legacy sized message.
3355 	 */
3356 	iwn_stats_update(sc, calib, stats, le16toh(desc->len));
3357 
3358 	/*
3359 	 * And now, let's take a reference of it to use!
3360 	 */
3361 	lstats = &sc->last_stat;
3362 
3363 	/* Test if temperature has changed. */
3364 	if (lstats->general.temp != sc->rawtemp) {
3365 		/* Convert "raw" temperature to degC. */
3366 		sc->rawtemp = stats->general.temp;
3367 		temp = ops->get_temperature(sc);
3368 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
3369 		    __func__, temp);
3370 
3371 		/* Update TX power if need be (4965AGN only). */
3372 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3373 			iwn4965_power_calibration(sc, temp);
3374 	}
3375 
3376 	if (desc->type != IWN_BEACON_STATISTICS)
3377 		return;	/* Reply to a statistics request. */
3378 
3379 	sc->noise = iwn_get_noise(&lstats->rx.general);
3380 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
3381 
3382 	/* Test that RSSI and noise are present in stats report. */
3383 	if (le32toh(lstats->rx.general.flags) != 1) {
3384 		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
3385 		    "received statistics without RSSI");
3386 		return;
3387 	}
3388 
3389 	if (calib->state == IWN_CALIB_STATE_ASSOC)
3390 		iwn_collect_noise(sc, &lstats->rx.general);
3391 	else if (calib->state == IWN_CALIB_STATE_RUN) {
3392 		iwn_tune_sensitivity(sc, &lstats->rx);
3393 		/*
3394 		 * XXX TODO: Only run the RX recovery if we're associated!
3395 		 */
3396 		iwn_check_rx_recovery(sc, lstats);
3397 		iwn_save_stats_counters(sc, lstats);
3398 	}
3399 
3400 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3401 }
3402 
3403 /*
3404  * Save the relevant statistic counters for the next calibration
3405  * pass.
3406  */
3407 static void
3408 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs)
3409 {
3410 	struct iwn_calib_state *calib = &sc->calib;
3411 
3412 	/* Save counters values for next call. */
3413 	calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp);
3414 	calib->fa_cck = le32toh(rs->rx.cck.fa);
3415 	calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp);
3416 	calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp);
3417 	calib->fa_ofdm = le32toh(rs->rx.ofdm.fa);
3418 
3419 	/* Last time we received these tick values */
3420 	sc->last_calib_ticks = ticks;
3421 }
3422 
3423 /*
3424  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
3425  * and 5000 adapters have different incompatible TX status formats.
3426  */
3427 static void
3428 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3429     struct iwn_rx_data *data)
3430 {
3431 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
3432 	struct iwn_tx_ring *ring;
3433 	int qid;
3434 
3435 	qid = desc->qid & 0xf;
3436 	ring = &sc->txq[qid];
3437 
3438 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3439 	    "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3440 	    __func__, desc->qid, desc->idx,
3441 	    stat->rtsfailcnt,
3442 	    stat->ackfailcnt,
3443 	    stat->btkillcnt,
3444 	    stat->rate, le16toh(stat->duration),
3445 	    le32toh(stat->status));
3446 
3447 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3448 	if (qid >= sc->firstaggqueue) {
3449 		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3450 		    stat->ackfailcnt, &stat->status);
3451 	} else {
3452 		iwn_tx_done(sc, desc, stat->ackfailcnt,
3453 		    le32toh(stat->status) & 0xff);
3454 	}
3455 }
3456 
3457 static void
3458 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3459     struct iwn_rx_data *data)
3460 {
3461 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
3462 	struct iwn_tx_ring *ring;
3463 	int qid;
3464 
3465 	qid = desc->qid & 0xf;
3466 	ring = &sc->txq[qid];
3467 
3468 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3469 	    "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3470 	    __func__, desc->qid, desc->idx,
3471 	    stat->rtsfailcnt,
3472 	    stat->ackfailcnt,
3473 	    stat->btkillcnt,
3474 	    stat->rate, le16toh(stat->duration),
3475 	    le32toh(stat->status));
3476 
3477 #ifdef notyet
3478 	/* Reset TX scheduler slot. */
3479 	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
3480 #endif
3481 
3482 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3483 	if (qid >= sc->firstaggqueue) {
3484 		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3485 		    stat->ackfailcnt, &stat->status);
3486 	} else {
3487 		iwn_tx_done(sc, desc, stat->ackfailcnt,
3488 		    le16toh(stat->status) & 0xff);
3489 	}
3490 }
3491 
3492 /*
3493  * Adapter-independent backend for TX_DONE firmware notifications.
3494  */
3495 static void
3496 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
3497     uint8_t status)
3498 {
3499 	struct ifnet *ifp = sc->sc_ifp;
3500 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
3501 	struct iwn_tx_data *data = &ring->data[desc->idx];
3502 	struct mbuf *m;
3503 	struct ieee80211_node *ni;
3504 	struct ieee80211vap *vap;
3505 
3506 	KASSERT(data->ni != NULL, ("no node"));
3507 
3508 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3509 
3510 	/* Unmap and free mbuf. */
3511 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
3512 	bus_dmamap_unload(ring->data_dmat, data->map);
3513 	m = data->m, data->m = NULL;
3514 	ni = data->ni, data->ni = NULL;
3515 	vap = ni->ni_vap;
3516 
3517 	/*
3518 	 * Update rate control statistics for the node.
3519 	 */
3520 	if (status & IWN_TX_FAIL) {
3521 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3522 		ieee80211_ratectl_tx_complete(vap, ni,
3523 		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3524 	} else {
3525 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3526 		ieee80211_ratectl_tx_complete(vap, ni,
3527 		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3528 	}
3529 
3530 	/*
3531 	 * Channels marked for "radar" require traffic to be received
3532 	 * to unlock before we can transmit.  Until traffic is seen
3533 	 * any attempt to transmit is returned immediately with status
3534 	 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
3535 	 * happen on first authenticate after scanning.  To workaround
3536 	 * this we ignore a failure of this sort in AUTH state so the
3537 	 * 802.11 layer will fall back to using a timeout to wait for
3538 	 * the AUTH reply.  This allows the firmware time to see
3539 	 * traffic so a subsequent retry of AUTH succeeds.  It's
3540 	 * unclear why the firmware does not maintain state for
3541 	 * channels recently visited as this would allow immediate
3542 	 * use of the channel after a scan (where we see traffic).
3543 	 */
3544 	if (status == IWN_TX_FAIL_TX_LOCKED &&
3545 	    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
3546 		ieee80211_tx_complete(ni, m, 0);
3547 	else
3548 		ieee80211_tx_complete(ni, m,
3549 		    (status & IWN_TX_FAIL) != 0);
3550 
3551 	sc->sc_tx_timer = 0;
3552 	if (--ring->queued < IWN_TX_RING_LOMARK) {
3553 		sc->qfullmsk &= ~(1 << ring->qid);
3554 		if (sc->qfullmsk == 0 &&
3555 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
3556 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3557 			iwn_start_locked(ifp);
3558 		}
3559 	}
3560 
3561 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3562 
3563 }
3564 
3565 /*
3566  * Process a "command done" firmware notification.  This is where we wakeup
3567  * processes waiting for a synchronous command completion.
3568  */
3569 static void
3570 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
3571 {
3572 	struct iwn_tx_ring *ring;
3573 	struct iwn_tx_data *data;
3574 	int cmd_queue_num;
3575 
3576 	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
3577 		cmd_queue_num = IWN_PAN_CMD_QUEUE;
3578 	else
3579 		cmd_queue_num = IWN_CMD_QUEUE_NUM;
3580 
3581 	if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num)
3582 		return;	/* Not a command ack. */
3583 
3584 	ring = &sc->txq[cmd_queue_num];
3585 	data = &ring->data[desc->idx];
3586 
3587 	/* If the command was mapped in an mbuf, free it. */
3588 	if (data->m != NULL) {
3589 		bus_dmamap_sync(ring->data_dmat, data->map,
3590 		    BUS_DMASYNC_POSTWRITE);
3591 		bus_dmamap_unload(ring->data_dmat, data->map);
3592 		m_freem(data->m);
3593 		data->m = NULL;
3594 	}
3595 	wakeup(&ring->desc[desc->idx]);
3596 }
3597 
3598 static void
3599 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3600     int ackfailcnt, void *stat)
3601 {
3602 	struct iwn_ops *ops = &sc->ops;
3603 	struct ifnet *ifp = sc->sc_ifp;
3604 	struct iwn_tx_ring *ring = &sc->txq[qid];
3605 	struct iwn_tx_data *data;
3606 	struct mbuf *m;
3607 	struct iwn_node *wn;
3608 	struct ieee80211_node *ni;
3609 	struct ieee80211_tx_ampdu *tap;
3610 	uint64_t bitmap;
3611 	uint32_t *status = stat;
3612 	uint16_t *aggstatus = stat;
3613 	uint16_t ssn;
3614 	uint8_t tid;
3615 	int bit, i, lastidx, *res, seqno, shift, start;
3616 
3617 	/* XXX TODO: status is le16 field! Grr */
3618 
3619 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3620 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n",
3621 	    __func__,
3622 	    nframes,
3623 	    *status);
3624 
3625 	tap = sc->qid2tap[qid];
3626 	tid = tap->txa_tid;
3627 	wn = (void *)tap->txa_ni;
3628 	ni = tap->txa_ni;
3629 
3630 	/*
3631 	 * XXX TODO: ACK and RTS failures would be nice here!
3632 	 */
3633 
3634 	/*
3635 	 * A-MPDU single frame status - if we failed to transmit it
3636 	 * in A-MPDU, then it may be a permanent failure.
3637 	 *
3638 	 * XXX TODO: check what the Linux iwlwifi driver does here;
3639 	 * there's some permanent and temporary failures that may be
3640 	 * handled differently.
3641 	 */
3642 	if (nframes == 1) {
3643 		if ((*status & 0xff) != 1 && (*status & 0xff) != 2) {
3644 #ifdef	NOT_YET
3645 			printf("ieee80211_send_bar()\n");
3646 #endif
3647 			/*
3648 			 * If we completely fail a transmit, make sure a
3649 			 * notification is pushed up to the rate control
3650 			 * layer.
3651 			 */
3652 			ieee80211_ratectl_tx_complete(ni->ni_vap,
3653 			    ni,
3654 			    IEEE80211_RATECTL_TX_FAILURE,
3655 			    &ackfailcnt,
3656 			    NULL);
3657 		} else {
3658 			/*
3659 			 * If nframes=1, then we won't be getting a BA for
3660 			 * this frame.  Ensure that we correctly update the
3661 			 * rate control code with how many retries were
3662 			 * needed to send it.
3663 			 */
3664 			ieee80211_ratectl_tx_complete(ni->ni_vap,
3665 			    ni,
3666 			    IEEE80211_RATECTL_TX_SUCCESS,
3667 			    &ackfailcnt,
3668 			    NULL);
3669 		}
3670 	}
3671 
3672 	bitmap = 0;
3673 	start = idx;
3674 	for (i = 0; i < nframes; i++) {
3675 		if (le16toh(aggstatus[i * 2]) & 0xc)
3676 			continue;
3677 
3678 		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3679 		bit = idx - start;
3680 		shift = 0;
3681 		if (bit >= 64) {
3682 			shift = 0x100 - idx + start;
3683 			bit = 0;
3684 			start = idx;
3685 		} else if (bit <= -64)
3686 			bit = 0x100 - start + idx;
3687 		else if (bit < 0) {
3688 			shift = start - idx;
3689 			start = idx;
3690 			bit = 0;
3691 		}
3692 		bitmap = bitmap << shift;
3693 		bitmap |= 1ULL << bit;
3694 	}
3695 	tap = sc->qid2tap[qid];
3696 	tid = tap->txa_tid;
3697 	wn = (void *)tap->txa_ni;
3698 	wn->agg[tid].bitmap = bitmap;
3699 	wn->agg[tid].startidx = start;
3700 	wn->agg[tid].nframes = nframes;
3701 
3702 	res = NULL;
3703 	ssn = 0;
3704 	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3705 		res = tap->txa_private;
3706 		ssn = tap->txa_start & 0xfff;
3707 	}
3708 
3709 	/* This is going nframes DWORDS into the descriptor? */
3710 	seqno = le32toh(*(status + nframes)) & 0xfff;
3711 	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3712 		data = &ring->data[ring->read];
3713 
3714 		/* Unmap and free mbuf. */
3715 		bus_dmamap_sync(ring->data_dmat, data->map,
3716 		    BUS_DMASYNC_POSTWRITE);
3717 		bus_dmamap_unload(ring->data_dmat, data->map);
3718 		m = data->m, data->m = NULL;
3719 		ni = data->ni, data->ni = NULL;
3720 
3721 		KASSERT(ni != NULL, ("no node"));
3722 		KASSERT(m != NULL, ("no mbuf"));
3723 		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3724 		ieee80211_tx_complete(ni, m, 1);
3725 
3726 		ring->queued--;
3727 		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3728 	}
3729 
3730 	if (ring->queued == 0 && res != NULL) {
3731 		iwn_nic_lock(sc);
3732 		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3733 		iwn_nic_unlock(sc);
3734 		sc->qid2tap[qid] = NULL;
3735 		free(res, M_DEVBUF);
3736 		return;
3737 	}
3738 
3739 	sc->sc_tx_timer = 0;
3740 	if (ring->queued < IWN_TX_RING_LOMARK) {
3741 		sc->qfullmsk &= ~(1 << ring->qid);
3742 		if (sc->qfullmsk == 0 &&
3743 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
3744 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3745 			iwn_start_locked(ifp);
3746 		}
3747 	}
3748 
3749 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3750 
3751 }
3752 
3753 /*
3754  * Process an INT_FH_RX or INT_SW_RX interrupt.
3755  */
3756 static void
3757 iwn_notif_intr(struct iwn_softc *sc)
3758 {
3759 	struct iwn_ops *ops = &sc->ops;
3760 	struct ifnet *ifp = sc->sc_ifp;
3761 	struct ieee80211com *ic = ifp->if_l2com;
3762 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3763 	uint16_t hw;
3764 
3765 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3766 	    BUS_DMASYNC_POSTREAD);
3767 
3768 	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3769 	while (sc->rxq.cur != hw) {
3770 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3771 		struct iwn_rx_desc *desc;
3772 
3773 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3774 		    BUS_DMASYNC_POSTREAD);
3775 		desc = mtod(data->m, struct iwn_rx_desc *);
3776 
3777 		DPRINTF(sc, IWN_DEBUG_RECV,
3778 		    "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
3779 		    __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags,
3780 		    desc->type, iwn_intr_str(desc->type),
3781 		    le16toh(desc->len));
3782 
3783 		if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF))	/* Reply to a command. */
3784 			iwn_cmd_done(sc, desc);
3785 
3786 		switch (desc->type) {
3787 		case IWN_RX_PHY:
3788 			iwn_rx_phy(sc, desc, data);
3789 			break;
3790 
3791 		case IWN_RX_DONE:		/* 4965AGN only. */
3792 		case IWN_MPDU_RX_DONE:
3793 			/* An 802.11 frame has been received. */
3794 			iwn_rx_done(sc, desc, data);
3795 			break;
3796 
3797 		case IWN_RX_COMPRESSED_BA:
3798 			/* A Compressed BlockAck has been received. */
3799 			iwn_rx_compressed_ba(sc, desc, data);
3800 			break;
3801 
3802 		case IWN_TX_DONE:
3803 			/* An 802.11 frame has been transmitted. */
3804 			ops->tx_done(sc, desc, data);
3805 			break;
3806 
3807 		case IWN_RX_STATISTICS:
3808 		case IWN_BEACON_STATISTICS:
3809 			iwn_rx_statistics(sc, desc, data);
3810 			break;
3811 
3812 		case IWN_BEACON_MISSED:
3813 		{
3814 			struct iwn_beacon_missed *miss =
3815 			    (struct iwn_beacon_missed *)(desc + 1);
3816 			int misses;
3817 
3818 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3819 			    BUS_DMASYNC_POSTREAD);
3820 			misses = le32toh(miss->consecutive);
3821 
3822 			DPRINTF(sc, IWN_DEBUG_STATE,
3823 			    "%s: beacons missed %d/%d\n", __func__,
3824 			    misses, le32toh(miss->total));
3825 			/*
3826 			 * If more than 5 consecutive beacons are missed,
3827 			 * reinitialize the sensitivity state machine.
3828 			 */
3829 			if (vap->iv_state == IEEE80211_S_RUN &&
3830 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3831 				if (misses > 5)
3832 					(void)iwn_init_sensitivity(sc);
3833 				if (misses >= vap->iv_bmissthreshold) {
3834 					IWN_UNLOCK(sc);
3835 					ieee80211_beacon_miss(ic);
3836 					IWN_LOCK(sc);
3837 				}
3838 			}
3839 			break;
3840 		}
3841 		case IWN_UC_READY:
3842 		{
3843 			struct iwn_ucode_info *uc =
3844 			    (struct iwn_ucode_info *)(desc + 1);
3845 
3846 			/* The microcontroller is ready. */
3847 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3848 			    BUS_DMASYNC_POSTREAD);
3849 			DPRINTF(sc, IWN_DEBUG_RESET,
3850 			    "microcode alive notification version=%d.%d "
3851 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3852 			    uc->subtype, le32toh(uc->valid));
3853 
3854 			if (le32toh(uc->valid) != 1) {
3855 				device_printf(sc->sc_dev,
3856 				    "microcontroller initialization failed");
3857 				break;
3858 			}
3859 			if (uc->subtype == IWN_UCODE_INIT) {
3860 				/* Save microcontroller report. */
3861 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3862 			}
3863 			/* Save the address of the error log in SRAM. */
3864 			sc->errptr = le32toh(uc->errptr);
3865 			break;
3866 		}
3867 		case IWN_STATE_CHANGED:
3868 		{
3869 			/*
3870 			 * State change allows hardware switch change to be
3871 			 * noted. However, we handle this in iwn_intr as we
3872 			 * get both the enable/disble intr.
3873 			 */
3874 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3875 			    BUS_DMASYNC_POSTREAD);
3876 #ifdef	IWN_DEBUG
3877 			uint32_t *status = (uint32_t *)(desc + 1);
3878 			DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE,
3879 			    "state changed to %x\n",
3880 			    le32toh(*status));
3881 #endif
3882 			break;
3883 		}
3884 		case IWN_START_SCAN:
3885 		{
3886 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3887 			    BUS_DMASYNC_POSTREAD);
3888 #ifdef	IWN_DEBUG
3889 			struct iwn_start_scan *scan =
3890 			    (struct iwn_start_scan *)(desc + 1);
3891 			DPRINTF(sc, IWN_DEBUG_ANY,
3892 			    "%s: scanning channel %d status %x\n",
3893 			    __func__, scan->chan, le32toh(scan->status));
3894 #endif
3895 			break;
3896 		}
3897 		case IWN_STOP_SCAN:
3898 		{
3899 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3900 			    BUS_DMASYNC_POSTREAD);
3901 #ifdef	IWN_DEBUG
3902 			struct iwn_stop_scan *scan =
3903 			    (struct iwn_stop_scan *)(desc + 1);
3904 			DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN,
3905 			    "scan finished nchan=%d status=%d chan=%d\n",
3906 			    scan->nchan, scan->status, scan->chan);
3907 #endif
3908 			sc->sc_is_scanning = 0;
3909 			IWN_UNLOCK(sc);
3910 			ieee80211_scan_next(vap);
3911 			IWN_LOCK(sc);
3912 			break;
3913 		}
3914 		case IWN5000_CALIBRATION_RESULT:
3915 			iwn5000_rx_calib_results(sc, desc, data);
3916 			break;
3917 
3918 		case IWN5000_CALIBRATION_DONE:
3919 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3920 			wakeup(sc);
3921 			break;
3922 		}
3923 
3924 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3925 	}
3926 
3927 	/* Tell the firmware what we have processed. */
3928 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3929 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3930 }
3931 
3932 /*
3933  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3934  * from power-down sleep mode.
3935  */
3936 static void
3937 iwn_wakeup_intr(struct iwn_softc *sc)
3938 {
3939 	int qid;
3940 
3941 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3942 	    __func__);
3943 
3944 	/* Wakeup RX and TX rings. */
3945 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3946 	for (qid = 0; qid < sc->ntxqs; qid++) {
3947 		struct iwn_tx_ring *ring = &sc->txq[qid];
3948 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3949 	}
3950 }
3951 
3952 static void
3953 iwn_rftoggle_intr(struct iwn_softc *sc)
3954 {
3955 	struct ifnet *ifp = sc->sc_ifp;
3956 	struct ieee80211com *ic = ifp->if_l2com;
3957 	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3958 
3959 	IWN_LOCK_ASSERT(sc);
3960 
3961 	device_printf(sc->sc_dev, "RF switch: radio %s\n",
3962 	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3963 	if (tmp & IWN_GP_CNTRL_RFKILL)
3964 		ieee80211_runtask(ic, &sc->sc_radioon_task);
3965 	else
3966 		ieee80211_runtask(ic, &sc->sc_radiooff_task);
3967 }
3968 
3969 /*
3970  * Dump the error log of the firmware when a firmware panic occurs.  Although
3971  * we can't debug the firmware because it is neither open source nor free, it
3972  * can help us to identify certain classes of problems.
3973  */
3974 static void
3975 iwn_fatal_intr(struct iwn_softc *sc)
3976 {
3977 	struct iwn_fw_dump dump;
3978 	int i;
3979 
3980 	IWN_LOCK_ASSERT(sc);
3981 
3982 	/* Force a complete recalibration on next init. */
3983 	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3984 
3985 	/* Check that the error log address is valid. */
3986 	if (sc->errptr < IWN_FW_DATA_BASE ||
3987 	    sc->errptr + sizeof (dump) >
3988 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3989 		printf("%s: bad firmware error log address 0x%08x\n", __func__,
3990 		    sc->errptr);
3991 		return;
3992 	}
3993 	if (iwn_nic_lock(sc) != 0) {
3994 		printf("%s: could not read firmware error log\n", __func__);
3995 		return;
3996 	}
3997 	/* Read firmware error log from SRAM. */
3998 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3999 	    sizeof (dump) / sizeof (uint32_t));
4000 	iwn_nic_unlock(sc);
4001 
4002 	if (dump.valid == 0) {
4003 		printf("%s: firmware error log is empty\n", __func__);
4004 		return;
4005 	}
4006 	printf("firmware error log:\n");
4007 	printf("  error type      = \"%s\" (0x%08X)\n",
4008 	    (dump.id < nitems(iwn_fw_errmsg)) ?
4009 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
4010 	    dump.id);
4011 	printf("  program counter = 0x%08X\n", dump.pc);
4012 	printf("  source line     = 0x%08X\n", dump.src_line);
4013 	printf("  error data      = 0x%08X%08X\n",
4014 	    dump.error_data[0], dump.error_data[1]);
4015 	printf("  branch link     = 0x%08X%08X\n",
4016 	    dump.branch_link[0], dump.branch_link[1]);
4017 	printf("  interrupt link  = 0x%08X%08X\n",
4018 	    dump.interrupt_link[0], dump.interrupt_link[1]);
4019 	printf("  time            = %u\n", dump.time[0]);
4020 
4021 	/* Dump driver status (TX and RX rings) while we're here. */
4022 	printf("driver status:\n");
4023 	for (i = 0; i < sc->ntxqs; i++) {
4024 		struct iwn_tx_ring *ring = &sc->txq[i];
4025 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
4026 		    i, ring->qid, ring->cur, ring->queued);
4027 	}
4028 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
4029 }
4030 
4031 static void
4032 iwn_intr(void *arg)
4033 {
4034 	struct iwn_softc *sc = arg;
4035 	struct ifnet *ifp = sc->sc_ifp;
4036 	uint32_t r1, r2, tmp;
4037 
4038 	IWN_LOCK(sc);
4039 
4040 	/* Disable interrupts. */
4041 	IWN_WRITE(sc, IWN_INT_MASK, 0);
4042 
4043 	/* Read interrupts from ICT (fast) or from registers (slow). */
4044 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4045 		tmp = 0;
4046 		while (sc->ict[sc->ict_cur] != 0) {
4047 			tmp |= sc->ict[sc->ict_cur];
4048 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
4049 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
4050 		}
4051 		tmp = le32toh(tmp);
4052 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
4053 			tmp = 0;
4054 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
4055 			tmp |= 0x8000;
4056 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
4057 		r2 = 0;	/* Unused. */
4058 	} else {
4059 		r1 = IWN_READ(sc, IWN_INT);
4060 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
4061 			IWN_UNLOCK(sc);
4062 			return;	/* Hardware gone! */
4063 		}
4064 		r2 = IWN_READ(sc, IWN_FH_INT);
4065 	}
4066 
4067 	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
4068     , r1, r2);
4069 
4070 	if (r1 == 0 && r2 == 0)
4071 		goto done;	/* Interrupt not for us. */
4072 
4073 	/* Acknowledge interrupts. */
4074 	IWN_WRITE(sc, IWN_INT, r1);
4075 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
4076 		IWN_WRITE(sc, IWN_FH_INT, r2);
4077 
4078 	if (r1 & IWN_INT_RF_TOGGLED) {
4079 		iwn_rftoggle_intr(sc);
4080 		goto done;
4081 	}
4082 	if (r1 & IWN_INT_CT_REACHED) {
4083 		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
4084 		    __func__);
4085 	}
4086 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
4087 		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
4088 		    __func__);
4089 #ifdef	IWN_DEBUG
4090 		iwn_debug_register(sc);
4091 #endif
4092 		/* Dump firmware error log and stop. */
4093 		iwn_fatal_intr(sc);
4094 
4095 		taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task);
4096 		goto done;
4097 	}
4098 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
4099 	    (r2 & IWN_FH_INT_RX)) {
4100 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4101 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
4102 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
4103 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4104 			    IWN_INT_PERIODIC_DIS);
4105 			iwn_notif_intr(sc);
4106 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
4107 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4108 				    IWN_INT_PERIODIC_ENA);
4109 			}
4110 		} else
4111 			iwn_notif_intr(sc);
4112 	}
4113 
4114 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
4115 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
4116 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
4117 		wakeup(sc);	/* FH DMA transfer completed. */
4118 	}
4119 
4120 	if (r1 & IWN_INT_ALIVE)
4121 		wakeup(sc);	/* Firmware is alive. */
4122 
4123 	if (r1 & IWN_INT_WAKEUP)
4124 		iwn_wakeup_intr(sc);
4125 
4126 done:
4127 	/* Re-enable interrupts. */
4128 	if (ifp->if_flags & IFF_UP)
4129 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
4130 
4131 	IWN_UNLOCK(sc);
4132 }
4133 
4134 /*
4135  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
4136  * 5000 adapters use a slightly different format).
4137  */
4138 static void
4139 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4140     uint16_t len)
4141 {
4142 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
4143 
4144 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4145 
4146 	*w = htole16(len + 8);
4147 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4148 	    BUS_DMASYNC_PREWRITE);
4149 	if (idx < IWN_SCHED_WINSZ) {
4150 		*(w + IWN_TX_RING_COUNT) = *w;
4151 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4152 		    BUS_DMASYNC_PREWRITE);
4153 	}
4154 }
4155 
4156 static void
4157 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4158     uint16_t len)
4159 {
4160 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4161 
4162 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4163 
4164 	*w = htole16(id << 12 | (len + 8));
4165 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4166 	    BUS_DMASYNC_PREWRITE);
4167 	if (idx < IWN_SCHED_WINSZ) {
4168 		*(w + IWN_TX_RING_COUNT) = *w;
4169 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4170 		    BUS_DMASYNC_PREWRITE);
4171 	}
4172 }
4173 
4174 #ifdef notyet
4175 static void
4176 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
4177 {
4178 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4179 
4180 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4181 
4182 	*w = (*w & htole16(0xf000)) | htole16(1);
4183 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4184 	    BUS_DMASYNC_PREWRITE);
4185 	if (idx < IWN_SCHED_WINSZ) {
4186 		*(w + IWN_TX_RING_COUNT) = *w;
4187 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4188 		    BUS_DMASYNC_PREWRITE);
4189 	}
4190 }
4191 #endif
4192 
4193 /*
4194  * Check whether OFDM 11g protection will be enabled for the given rate.
4195  *
4196  * The original driver code only enabled protection for OFDM rates.
4197  * It didn't check to see whether it was operating in 11a or 11bg mode.
4198  */
4199 static int
4200 iwn_check_rate_needs_protection(struct iwn_softc *sc,
4201     struct ieee80211vap *vap, uint8_t rate)
4202 {
4203 	struct ieee80211com *ic = vap->iv_ic;
4204 
4205 	/*
4206 	 * Not in 2GHz mode? Then there's no need to enable OFDM
4207 	 * 11bg protection.
4208 	 */
4209 	if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
4210 		return (0);
4211 	}
4212 
4213 	/*
4214 	 * 11bg protection not enabled? Then don't use it.
4215 	 */
4216 	if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0)
4217 		return (0);
4218 
4219 	/*
4220 	 * If it's an 11n rate - no protection.
4221 	 * We'll do it via a specific 11n check.
4222 	 */
4223 	if (rate & IEEE80211_RATE_MCS) {
4224 		return (0);
4225 	}
4226 
4227 	/*
4228 	 * Do a rate table lookup.  If the PHY is CCK,
4229 	 * don't do protection.
4230 	 */
4231 	if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK)
4232 		return (0);
4233 
4234 	/*
4235 	 * Yup, enable protection.
4236 	 */
4237 	return (1);
4238 }
4239 
4240 /*
4241  * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into
4242  * the link quality table that reflects this particular entry.
4243  */
4244 static int
4245 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
4246     uint8_t rate)
4247 {
4248 	struct ieee80211_rateset *rs;
4249 	int is_11n;
4250 	int nr;
4251 	int i;
4252 	uint8_t cmp_rate;
4253 
4254 	/*
4255 	 * Figure out if we're using 11n or not here.
4256 	 */
4257 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
4258 		is_11n = 1;
4259 	else
4260 		is_11n = 0;
4261 
4262 	/*
4263 	 * Use the correct rate table.
4264 	 */
4265 	if (is_11n) {
4266 		rs = (struct ieee80211_rateset *) &ni->ni_htrates;
4267 		nr = ni->ni_htrates.rs_nrates;
4268 	} else {
4269 		rs = &ni->ni_rates;
4270 		nr = rs->rs_nrates;
4271 	}
4272 
4273 	/*
4274 	 * Find the relevant link quality entry in the table.
4275 	 */
4276 	for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) {
4277 		/*
4278 		 * The link quality table index starts at 0 == highest
4279 		 * rate, so we walk the rate table backwards.
4280 		 */
4281 		cmp_rate = rs->rs_rates[(nr - 1) - i];
4282 		if (rate & IEEE80211_RATE_MCS)
4283 			cmp_rate |= IEEE80211_RATE_MCS;
4284 
4285 #if 0
4286 		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n",
4287 		    __func__,
4288 		    i,
4289 		    nr,
4290 		    rate,
4291 		    cmp_rate);
4292 #endif
4293 
4294 		if (cmp_rate == rate)
4295 			return (i);
4296 	}
4297 
4298 	/* Failed? Start at the end */
4299 	return (IWN_MAX_TX_RETRIES - 1);
4300 }
4301 
4302 static int
4303 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
4304 {
4305 	struct iwn_ops *ops = &sc->ops;
4306 	const struct ieee80211_txparam *tp;
4307 	struct ieee80211vap *vap = ni->ni_vap;
4308 	struct ieee80211com *ic = ni->ni_ic;
4309 	struct iwn_node *wn = (void *)ni;
4310 	struct iwn_tx_ring *ring;
4311 	struct iwn_tx_desc *desc;
4312 	struct iwn_tx_data *data;
4313 	struct iwn_tx_cmd *cmd;
4314 	struct iwn_cmd_data *tx;
4315 	struct ieee80211_frame *wh;
4316 	struct ieee80211_key *k = NULL;
4317 	struct mbuf *m1;
4318 	uint32_t flags;
4319 	uint16_t qos;
4320 	u_int hdrlen;
4321 	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4322 	uint8_t tid, type;
4323 	int ac, i, totlen, error, pad, nsegs = 0, rate;
4324 
4325 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4326 
4327 	IWN_LOCK_ASSERT(sc);
4328 
4329 	wh = mtod(m, struct ieee80211_frame *);
4330 	hdrlen = ieee80211_anyhdrsize(wh);
4331 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4332 
4333 	/* Select EDCA Access Category and TX ring for this frame. */
4334 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
4335 		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
4336 		tid = qos & IEEE80211_QOS_TID;
4337 	} else {
4338 		qos = 0;
4339 		tid = 0;
4340 	}
4341 	ac = M_WME_GETAC(m);
4342 	if (m->m_flags & M_AMPDU_MPDU) {
4343 		uint16_t seqno;
4344 		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
4345 
4346 		if (!IEEE80211_AMPDU_RUNNING(tap)) {
4347 			m_freem(m);
4348 			return EINVAL;
4349 		}
4350 
4351 		/*
4352 		 * Queue this frame to the hardware ring that we've
4353 		 * negotiated AMPDU TX on.
4354 		 *
4355 		 * Note that the sequence number must match the TX slot
4356 		 * being used!
4357 		 */
4358 		ac = *(int *)tap->txa_private;
4359 		seqno = ni->ni_txseqs[tid];
4360 		*(uint16_t *)wh->i_seq =
4361 		    htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
4362 		ring = &sc->txq[ac];
4363 		if ((seqno % 256) != ring->cur) {
4364 			device_printf(sc->sc_dev,
4365 			    "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n",
4366 			    __func__,
4367 			    m,
4368 			    seqno,
4369 			    seqno % 256,
4370 			    ring->cur);
4371 		}
4372 		ni->ni_txseqs[tid]++;
4373 	}
4374 	ring = &sc->txq[ac];
4375 	desc = &ring->desc[ring->cur];
4376 	data = &ring->data[ring->cur];
4377 
4378 	/* Choose a TX rate index. */
4379 	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
4380 	if (type == IEEE80211_FC0_TYPE_MGT)
4381 		rate = tp->mgmtrate;
4382 	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4383 		rate = tp->mcastrate;
4384 	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
4385 		rate = tp->ucastrate;
4386 	else if (m->m_flags & M_EAPOL)
4387 		rate = tp->mgmtrate;
4388 	else {
4389 		/* XXX pass pktlen */
4390 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
4391 		rate = ni->ni_txrate;
4392 	}
4393 
4394 	/* Encrypt the frame if need be. */
4395 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4396 		/* Retrieve key for TX. */
4397 		k = ieee80211_crypto_encap(ni, m);
4398 		if (k == NULL) {
4399 			m_freem(m);
4400 			return ENOBUFS;
4401 		}
4402 		/* 802.11 header may have moved. */
4403 		wh = mtod(m, struct ieee80211_frame *);
4404 	}
4405 	totlen = m->m_pkthdr.len;
4406 
4407 	if (ieee80211_radiotap_active_vap(vap)) {
4408 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4409 
4410 		tap->wt_flags = 0;
4411 		tap->wt_rate = rate;
4412 		if (k != NULL)
4413 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4414 
4415 		ieee80211_radiotap_tx(vap, m);
4416 	}
4417 
4418 	/* Prepare TX firmware command. */
4419 	cmd = &ring->cmd[ring->cur];
4420 	cmd->code = IWN_CMD_TX_DATA;
4421 	cmd->flags = 0;
4422 	cmd->qid = ring->qid;
4423 	cmd->idx = ring->cur;
4424 
4425 	tx = (struct iwn_cmd_data *)cmd->data;
4426 	/* NB: No need to clear tx, all fields are reinitialized here. */
4427 	tx->scratch = 0;	/* clear "scratch" area */
4428 
4429 	flags = 0;
4430 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4431 		/* Unicast frame, check if an ACK is expected. */
4432 		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
4433 		    IEEE80211_QOS_ACKPOLICY_NOACK)
4434 			flags |= IWN_TX_NEED_ACK;
4435 	}
4436 	if ((wh->i_fc[0] &
4437 	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
4438 	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
4439 		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
4440 
4441 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
4442 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
4443 
4444 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
4445 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4446 		/* NB: Group frames are sent using CCK in 802.11b/g. */
4447 		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
4448 			flags |= IWN_TX_NEED_RTS;
4449 		} else if (iwn_check_rate_needs_protection(sc, vap, rate)) {
4450 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4451 				flags |= IWN_TX_NEED_CTS;
4452 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4453 				flags |= IWN_TX_NEED_RTS;
4454 		} else if ((rate & IEEE80211_RATE_MCS) &&
4455 			(ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) {
4456 			flags |= IWN_TX_NEED_RTS;
4457 		}
4458 
4459 		/* XXX HT protection? */
4460 
4461 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
4462 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4463 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
4464 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
4465 				flags |= IWN_TX_NEED_PROTECTION;
4466 			} else
4467 				flags |= IWN_TX_FULL_TXOP;
4468 		}
4469 	}
4470 
4471 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4472 	    type != IEEE80211_FC0_TYPE_DATA)
4473 		tx->id = sc->broadcast_id;
4474 	else
4475 		tx->id = wn->id;
4476 
4477 	if (type == IEEE80211_FC0_TYPE_MGT) {
4478 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4479 
4480 		/* Tell HW to set timestamp in probe responses. */
4481 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4482 			flags |= IWN_TX_INSERT_TSTAMP;
4483 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4484 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4485 			tx->timeout = htole16(3);
4486 		else
4487 			tx->timeout = htole16(2);
4488 	} else
4489 		tx->timeout = htole16(0);
4490 
4491 	if (hdrlen & 3) {
4492 		/* First segment length must be a multiple of 4. */
4493 		flags |= IWN_TX_NEED_PADDING;
4494 		pad = 4 - (hdrlen & 3);
4495 	} else
4496 		pad = 0;
4497 
4498 	tx->len = htole16(totlen);
4499 	tx->tid = tid;
4500 	tx->rts_ntries = 60;
4501 	tx->data_ntries = 15;
4502 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4503 	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4504 	if (tx->id == sc->broadcast_id) {
4505 		/* Group or management frame. */
4506 		tx->linkq = 0;
4507 	} else {
4508 		tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate);
4509 		flags |= IWN_TX_LINKQ;	/* enable MRR */
4510 	}
4511 
4512 	/* Set physical address of "scratch area". */
4513 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4514 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4515 
4516 	/* Copy 802.11 header in TX command. */
4517 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4518 
4519 	/* Trim 802.11 header. */
4520 	m_adj(m, hdrlen);
4521 	tx->security = 0;
4522 	tx->flags = htole32(flags);
4523 
4524 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4525 	    &nsegs, BUS_DMA_NOWAIT);
4526 	if (error != 0) {
4527 		if (error != EFBIG) {
4528 			device_printf(sc->sc_dev,
4529 			    "%s: can't map mbuf (error %d)\n", __func__, error);
4530 			m_freem(m);
4531 			return error;
4532 		}
4533 		/* Too many DMA segments, linearize mbuf. */
4534 		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4535 		if (m1 == NULL) {
4536 			device_printf(sc->sc_dev,
4537 			    "%s: could not defrag mbuf\n", __func__);
4538 			m_freem(m);
4539 			return ENOBUFS;
4540 		}
4541 		m = m1;
4542 
4543 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4544 		    segs, &nsegs, BUS_DMA_NOWAIT);
4545 		if (error != 0) {
4546 			device_printf(sc->sc_dev,
4547 			    "%s: can't map mbuf (error %d)\n", __func__, error);
4548 			m_freem(m);
4549 			return error;
4550 		}
4551 	}
4552 
4553 	data->m = m;
4554 	data->ni = ni;
4555 
4556 	DPRINTF(sc, IWN_DEBUG_XMIT,
4557 	    "%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n",
4558 	    __func__,
4559 	    ring->qid,
4560 	    ring->cur,
4561 	    m->m_pkthdr.len,
4562 	    nsegs,
4563 	    flags,
4564 	    rate,
4565 	    tx->rate);
4566 
4567 	/* Fill TX descriptor. */
4568 	desc->nsegs = 1;
4569 	if (m->m_len != 0)
4570 		desc->nsegs += nsegs;
4571 	/* First DMA segment is used by the TX command. */
4572 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4573 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
4574 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4575 	/* Other DMA segments are for data payload. */
4576 	seg = &segs[0];
4577 	for (i = 1; i <= nsegs; i++) {
4578 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4579 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4580 		    seg->ds_len << 4);
4581 		seg++;
4582 	}
4583 
4584 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4585 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4586 	    BUS_DMASYNC_PREWRITE);
4587 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4588 	    BUS_DMASYNC_PREWRITE);
4589 
4590 	/* Update TX scheduler. */
4591 	if (ring->qid >= sc->firstaggqueue)
4592 		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4593 
4594 	/* Kick TX ring. */
4595 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4596 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4597 
4598 	/* Mark TX ring as full if we reach a certain threshold. */
4599 	if (++ring->queued > IWN_TX_RING_HIMARK)
4600 		sc->qfullmsk |= 1 << ring->qid;
4601 
4602 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4603 
4604 	return 0;
4605 }
4606 
4607 static int
4608 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
4609     struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
4610 {
4611 	struct iwn_ops *ops = &sc->ops;
4612 //	struct ifnet *ifp = sc->sc_ifp;
4613 	struct ieee80211vap *vap = ni->ni_vap;
4614 //	struct ieee80211com *ic = ifp->if_l2com;
4615 	struct iwn_tx_cmd *cmd;
4616 	struct iwn_cmd_data *tx;
4617 	struct ieee80211_frame *wh;
4618 	struct iwn_tx_ring *ring;
4619 	struct iwn_tx_desc *desc;
4620 	struct iwn_tx_data *data;
4621 	struct mbuf *m1;
4622 	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4623 	uint32_t flags;
4624 	u_int hdrlen;
4625 	int ac, totlen, error, pad, nsegs = 0, i, rate;
4626 	uint8_t type;
4627 
4628 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4629 
4630 	IWN_LOCK_ASSERT(sc);
4631 
4632 	wh = mtod(m, struct ieee80211_frame *);
4633 	hdrlen = ieee80211_anyhdrsize(wh);
4634 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4635 
4636 	ac = params->ibp_pri & 3;
4637 
4638 	ring = &sc->txq[ac];
4639 	desc = &ring->desc[ring->cur];
4640 	data = &ring->data[ring->cur];
4641 
4642 	/* Choose a TX rate. */
4643 	rate = params->ibp_rate0;
4644 	totlen = m->m_pkthdr.len;
4645 
4646 	/* Prepare TX firmware command. */
4647 	cmd = &ring->cmd[ring->cur];
4648 	cmd->code = IWN_CMD_TX_DATA;
4649 	cmd->flags = 0;
4650 	cmd->qid = ring->qid;
4651 	cmd->idx = ring->cur;
4652 
4653 	tx = (struct iwn_cmd_data *)cmd->data;
4654 	/* NB: No need to clear tx, all fields are reinitialized here. */
4655 	tx->scratch = 0;	/* clear "scratch" area */
4656 
4657 	flags = 0;
4658 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
4659 		flags |= IWN_TX_NEED_ACK;
4660 	if (params->ibp_flags & IEEE80211_BPF_RTS) {
4661 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4662 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
4663 			flags &= ~IWN_TX_NEED_RTS;
4664 			flags |= IWN_TX_NEED_PROTECTION;
4665 		} else
4666 			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
4667 	}
4668 	if (params->ibp_flags & IEEE80211_BPF_CTS) {
4669 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4670 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
4671 			flags &= ~IWN_TX_NEED_CTS;
4672 			flags |= IWN_TX_NEED_PROTECTION;
4673 		} else
4674 			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
4675 	}
4676 	if (type == IEEE80211_FC0_TYPE_MGT) {
4677 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4678 
4679 		/* Tell HW to set timestamp in probe responses. */
4680 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4681 			flags |= IWN_TX_INSERT_TSTAMP;
4682 
4683 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4684 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4685 			tx->timeout = htole16(3);
4686 		else
4687 			tx->timeout = htole16(2);
4688 	} else
4689 		tx->timeout = htole16(0);
4690 
4691 	if (hdrlen & 3) {
4692 		/* First segment length must be a multiple of 4. */
4693 		flags |= IWN_TX_NEED_PADDING;
4694 		pad = 4 - (hdrlen & 3);
4695 	} else
4696 		pad = 0;
4697 
4698 	if (ieee80211_radiotap_active_vap(vap)) {
4699 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4700 
4701 		tap->wt_flags = 0;
4702 		tap->wt_rate = rate;
4703 
4704 		ieee80211_radiotap_tx(vap, m);
4705 	}
4706 
4707 	tx->len = htole16(totlen);
4708 	tx->tid = 0;
4709 	tx->id = sc->broadcast_id;
4710 	tx->rts_ntries = params->ibp_try1;
4711 	tx->data_ntries = params->ibp_try0;
4712 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4713 	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4714 
4715 	/* Group or management frame. */
4716 	tx->linkq = 0;
4717 
4718 	/* Set physical address of "scratch area". */
4719 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4720 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4721 
4722 	/* Copy 802.11 header in TX command. */
4723 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4724 
4725 	/* Trim 802.11 header. */
4726 	m_adj(m, hdrlen);
4727 	tx->security = 0;
4728 	tx->flags = htole32(flags);
4729 
4730 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4731 	    &nsegs, BUS_DMA_NOWAIT);
4732 	if (error != 0) {
4733 		if (error != EFBIG) {
4734 			device_printf(sc->sc_dev,
4735 			    "%s: can't map mbuf (error %d)\n", __func__, error);
4736 			m_freem(m);
4737 			return error;
4738 		}
4739 		/* Too many DMA segments, linearize mbuf. */
4740 		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4741 		if (m1 == NULL) {
4742 			device_printf(sc->sc_dev,
4743 			    "%s: could not defrag mbuf\n", __func__);
4744 			m_freem(m);
4745 			return ENOBUFS;
4746 		}
4747 		m = m1;
4748 
4749 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4750 		    segs, &nsegs, BUS_DMA_NOWAIT);
4751 		if (error != 0) {
4752 			device_printf(sc->sc_dev,
4753 			    "%s: can't map mbuf (error %d)\n", __func__, error);
4754 			m_freem(m);
4755 			return error;
4756 		}
4757 	}
4758 
4759 	data->m = m;
4760 	data->ni = ni;
4761 
4762 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
4763 	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
4764 
4765 	/* Fill TX descriptor. */
4766 	desc->nsegs = 1;
4767 	if (m->m_len != 0)
4768 		desc->nsegs += nsegs;
4769 	/* First DMA segment is used by the TX command. */
4770 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4771 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
4772 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4773 	/* Other DMA segments are for data payload. */
4774 	seg = &segs[0];
4775 	for (i = 1; i <= nsegs; i++) {
4776 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4777 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4778 		    seg->ds_len << 4);
4779 		seg++;
4780 	}
4781 
4782 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4783 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4784 	    BUS_DMASYNC_PREWRITE);
4785 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4786 	    BUS_DMASYNC_PREWRITE);
4787 
4788 	/* Update TX scheduler. */
4789 	if (ring->qid >= sc->firstaggqueue)
4790 		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4791 
4792 	/* Kick TX ring. */
4793 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4794 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4795 
4796 	/* Mark TX ring as full if we reach a certain threshold. */
4797 	if (++ring->queued > IWN_TX_RING_HIMARK)
4798 		sc->qfullmsk |= 1 << ring->qid;
4799 
4800 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4801 
4802 	return 0;
4803 }
4804 
4805 static int
4806 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4807     const struct ieee80211_bpf_params *params)
4808 {
4809 	struct ieee80211com *ic = ni->ni_ic;
4810 	struct ifnet *ifp = ic->ic_ifp;
4811 	struct iwn_softc *sc = ifp->if_softc;
4812 	int error = 0;
4813 
4814 	DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4815 
4816 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4817 		ieee80211_free_node(ni);
4818 		m_freem(m);
4819 		return ENETDOWN;
4820 	}
4821 
4822 	IWN_LOCK(sc);
4823 	if (params == NULL) {
4824 		/*
4825 		 * Legacy path; interpret frame contents to decide
4826 		 * precisely how to send the frame.
4827 		 */
4828 		error = iwn_tx_data(sc, m, ni);
4829 	} else {
4830 		/*
4831 		 * Caller supplied explicit parameters to use in
4832 		 * sending the frame.
4833 		 */
4834 		error = iwn_tx_data_raw(sc, m, ni, params);
4835 	}
4836 	if (error != 0) {
4837 		/* NB: m is reclaimed on tx failure */
4838 		ieee80211_free_node(ni);
4839 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4840 	} else
4841 		sc->sc_tx_timer = 5;
4842 
4843 	IWN_UNLOCK(sc);
4844 
4845 	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__);
4846 
4847 	return error;
4848 }
4849 
4850 static void
4851 iwn_start(struct ifnet *ifp)
4852 {
4853 	struct iwn_softc *sc = ifp->if_softc;
4854 
4855 	IWN_LOCK(sc);
4856 	iwn_start_locked(ifp);
4857 	IWN_UNLOCK(sc);
4858 }
4859 
4860 static void
4861 iwn_start_locked(struct ifnet *ifp)
4862 {
4863 	struct iwn_softc *sc = ifp->if_softc;
4864 	struct ieee80211_node *ni;
4865 	struct mbuf *m;
4866 
4867 	IWN_LOCK_ASSERT(sc);
4868 
4869 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4870 
4871 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
4872 	    (ifp->if_drv_flags & IFF_DRV_OACTIVE))
4873 		return;
4874 
4875 	for (;;) {
4876 		if (sc->qfullmsk != 0) {
4877 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4878 			break;
4879 		}
4880 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
4881 		if (m == NULL)
4882 			break;
4883 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4884 		if (iwn_tx_data(sc, m, ni) != 0) {
4885 			ieee80211_free_node(ni);
4886 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4887 		} else
4888 			sc->sc_tx_timer = 5;
4889 	}
4890 
4891 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: done\n", __func__);
4892 }
4893 
4894 static void
4895 iwn_watchdog(void *arg)
4896 {
4897 	struct iwn_softc *sc = arg;
4898 	struct ifnet *ifp = sc->sc_ifp;
4899 	struct ieee80211com *ic = ifp->if_l2com;
4900 
4901 	IWN_LOCK_ASSERT(sc);
4902 
4903 	KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4904 
4905 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4906 
4907 	if (sc->sc_tx_timer > 0) {
4908 		if (--sc->sc_tx_timer == 0) {
4909 			ic_printf(ic, "device timeout\n");
4910 			ieee80211_runtask(ic, &sc->sc_reinit_task);
4911 			return;
4912 		}
4913 	}
4914 	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
4915 }
4916 
4917 static int
4918 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4919 {
4920 	struct iwn_softc *sc = ifp->if_softc;
4921 	struct ieee80211com *ic = ifp->if_l2com;
4922 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4923 	struct ifreq *ifr = (struct ifreq *) data;
4924 	int error = 0, startall = 0, stop = 0;
4925 
4926 	switch (cmd) {
4927 	case SIOCGIFADDR:
4928 		error = ether_ioctl(ifp, cmd, data);
4929 		break;
4930 	case SIOCSIFFLAGS:
4931 		IWN_LOCK(sc);
4932 		if (ifp->if_flags & IFF_UP) {
4933 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4934 				iwn_init_locked(sc);
4935 				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
4936 					startall = 1;
4937 				else
4938 					stop = 1;
4939 			}
4940 		} else {
4941 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4942 				iwn_stop_locked(sc);
4943 		}
4944 		IWN_UNLOCK(sc);
4945 		if (startall)
4946 			ieee80211_start_all(ic);
4947 		else if (vap != NULL && stop)
4948 			ieee80211_stop(vap);
4949 		break;
4950 	case SIOCGIFMEDIA:
4951 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4952 		break;
4953 	case SIOCGIWNSTATS:
4954 		IWN_LOCK(sc);
4955 		/* XXX validate permissions/memory/etc? */
4956 		error = copyout(&sc->last_stat, ifr->ifr_data,
4957 		    sizeof(struct iwn_stats));
4958 		IWN_UNLOCK(sc);
4959 		break;
4960 	case SIOCZIWNSTATS:
4961 		IWN_LOCK(sc);
4962 		memset(&sc->last_stat, 0, sizeof(struct iwn_stats));
4963 		IWN_UNLOCK(sc);
4964 		break;
4965 	default:
4966 		error = EINVAL;
4967 		break;
4968 	}
4969 	return error;
4970 }
4971 
4972 /*
4973  * Send a command to the firmware.
4974  */
4975 static int
4976 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
4977 {
4978 	struct iwn_tx_ring *ring;
4979 	struct iwn_tx_desc *desc;
4980 	struct iwn_tx_data *data;
4981 	struct iwn_tx_cmd *cmd;
4982 	struct mbuf *m;
4983 	bus_addr_t paddr;
4984 	int totlen, error;
4985 	int cmd_queue_num;
4986 
4987 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4988 
4989 	if (async == 0)
4990 		IWN_LOCK_ASSERT(sc);
4991 
4992 	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
4993 		cmd_queue_num = IWN_PAN_CMD_QUEUE;
4994 	else
4995 		cmd_queue_num = IWN_CMD_QUEUE_NUM;
4996 
4997 	ring = &sc->txq[cmd_queue_num];
4998 	desc = &ring->desc[ring->cur];
4999 	data = &ring->data[ring->cur];
5000 	totlen = 4 + size;
5001 
5002 	if (size > sizeof cmd->data) {
5003 		/* Command is too large to fit in a descriptor. */
5004 		if (totlen > MCLBYTES)
5005 			return EINVAL;
5006 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
5007 		if (m == NULL)
5008 			return ENOMEM;
5009 		cmd = mtod(m, struct iwn_tx_cmd *);
5010 		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
5011 		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
5012 		if (error != 0) {
5013 			m_freem(m);
5014 			return error;
5015 		}
5016 		data->m = m;
5017 	} else {
5018 		cmd = &ring->cmd[ring->cur];
5019 		paddr = data->cmd_paddr;
5020 	}
5021 
5022 	cmd->code = code;
5023 	cmd->flags = 0;
5024 	cmd->qid = ring->qid;
5025 	cmd->idx = ring->cur;
5026 	memcpy(cmd->data, buf, size);
5027 
5028 	desc->nsegs = 1;
5029 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
5030 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
5031 
5032 	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
5033 	    __func__, iwn_intr_str(cmd->code), cmd->code,
5034 	    cmd->flags, cmd->qid, cmd->idx);
5035 
5036 	if (size > sizeof cmd->data) {
5037 		bus_dmamap_sync(ring->data_dmat, data->map,
5038 		    BUS_DMASYNC_PREWRITE);
5039 	} else {
5040 		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
5041 		    BUS_DMASYNC_PREWRITE);
5042 	}
5043 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5044 	    BUS_DMASYNC_PREWRITE);
5045 
5046 	/* Kick command ring. */
5047 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
5048 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5049 
5050 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5051 
5052 	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
5053 }
5054 
5055 static int
5056 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5057 {
5058 	struct iwn4965_node_info hnode;
5059 	caddr_t src, dst;
5060 
5061 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5062 
5063 	/*
5064 	 * We use the node structure for 5000 Series internally (it is
5065 	 * a superset of the one for 4965AGN). We thus copy the common
5066 	 * fields before sending the command.
5067 	 */
5068 	src = (caddr_t)node;
5069 	dst = (caddr_t)&hnode;
5070 	memcpy(dst, src, 48);
5071 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
5072 	memcpy(dst + 48, src + 72, 20);
5073 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
5074 }
5075 
5076 static int
5077 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5078 {
5079 
5080 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5081 
5082 	/* Direct mapping. */
5083 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
5084 }
5085 
5086 static int
5087 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
5088 {
5089 #define	RV(v)	((v) & IEEE80211_RATE_VAL)
5090 	struct iwn_node *wn = (void *)ni;
5091 	struct ieee80211_rateset *rs;
5092 	struct iwn_cmd_link_quality linkq;
5093 	int i, rate, txrate;
5094 	int is_11n;
5095 
5096 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5097 
5098 	memset(&linkq, 0, sizeof linkq);
5099 	linkq.id = wn->id;
5100 	linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5101 	linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5102 
5103 	linkq.ampdu_max = 32;		/* XXX negotiated? */
5104 	linkq.ampdu_threshold = 3;
5105 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
5106 
5107 	DPRINTF(sc, IWN_DEBUG_XMIT,
5108 	    "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n",
5109 	    __func__,
5110 	    linkq.antmsk_1stream,
5111 	    linkq.antmsk_2stream,
5112 	    sc->ntxchains);
5113 
5114 	/*
5115 	 * Are we using 11n rates? Ensure the channel is
5116 	 * 11n _and_ we have some 11n rates, or don't
5117 	 * try.
5118 	 */
5119 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
5120 		rs = (struct ieee80211_rateset *) &ni->ni_htrates;
5121 		is_11n = 1;
5122 	} else {
5123 		rs = &ni->ni_rates;
5124 		is_11n = 0;
5125 	}
5126 
5127 	/* Start at highest available bit-rate. */
5128 	/*
5129 	 * XXX this is all very dirty!
5130 	 */
5131 	if (is_11n)
5132 		txrate = ni->ni_htrates.rs_nrates - 1;
5133 	else
5134 		txrate = rs->rs_nrates - 1;
5135 	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
5136 		uint32_t plcp;
5137 
5138 		/*
5139 		 * XXX TODO: ensure the last two slots are the two lowest
5140 		 * rate entries, just for now.
5141 		 */
5142 		if (i == 14 || i == 15)
5143 			txrate = 0;
5144 
5145 		if (is_11n)
5146 			rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate];
5147 		else
5148 			rate = RV(rs->rs_rates[txrate]);
5149 
5150 		/* Do rate -> PLCP config mapping */
5151 		plcp = iwn_rate_to_plcp(sc, ni, rate);
5152 		linkq.retry[i] = plcp;
5153 		DPRINTF(sc, IWN_DEBUG_XMIT,
5154 		    "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n",
5155 		    __func__,
5156 		    i,
5157 		    txrate,
5158 		    rate,
5159 		    le32toh(plcp));
5160 
5161 		/*
5162 		 * The mimo field is an index into the table which
5163 		 * indicates the first index where it and subsequent entries
5164 		 * will not be using MIMO.
5165 		 *
5166 		 * Since we're filling linkq from 0..15 and we're filling
5167 		 * from the higest MCS rates to the lowest rates, if we
5168 		 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie,
5169 		 * the next entry.)  That way if the next entry is a non-MIMO
5170 		 * entry, we're already pointing at it.
5171 		 */
5172 		if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
5173 		    RV(le32toh(plcp)) > 7)
5174 			linkq.mimo = i + 1;
5175 
5176 		/* Next retry at immediate lower bit-rate. */
5177 		if (txrate > 0)
5178 			txrate--;
5179 	}
5180 	/*
5181 	 * If we reached the end of the list and indeed we hit
5182 	 * all MIMO rates (eg 5300 doing MCS23-15) then yes,
5183 	 * set mimo to 15.  Setting it to 16 panics the firmware.
5184 	 */
5185 	if (linkq.mimo > 15)
5186 		linkq.mimo = 15;
5187 
5188 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo);
5189 
5190 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5191 
5192 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
5193 #undef	RV
5194 }
5195 
5196 /*
5197  * Broadcast node is used to send group-addressed and management frames.
5198  */
5199 static int
5200 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
5201 {
5202 	struct iwn_ops *ops = &sc->ops;
5203 	struct ifnet *ifp = sc->sc_ifp;
5204 	struct ieee80211com *ic = ifp->if_l2com;
5205 	struct iwn_node_info node;
5206 	struct iwn_cmd_link_quality linkq;
5207 	uint8_t txant;
5208 	int i, error;
5209 
5210 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5211 
5212 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5213 
5214 	memset(&node, 0, sizeof node);
5215 	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
5216 	node.id = sc->broadcast_id;
5217 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
5218 	if ((error = ops->add_node(sc, &node, async)) != 0)
5219 		return error;
5220 
5221 	/* Use the first valid TX antenna. */
5222 	txant = IWN_LSB(sc->txchainmask);
5223 
5224 	memset(&linkq, 0, sizeof linkq);
5225 	linkq.id = sc->broadcast_id;
5226 	linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5227 	linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5228 	linkq.ampdu_max = 64;
5229 	linkq.ampdu_threshold = 3;
5230 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
5231 
5232 	/* Use lowest mandatory bit-rate. */
5233 	/* XXX rate table lookup? */
5234 	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
5235 		linkq.retry[0] = htole32(0xd);
5236 	else
5237 		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
5238 	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
5239 	/* Use same bit-rate for all TX retries. */
5240 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
5241 		linkq.retry[i] = linkq.retry[0];
5242 	}
5243 
5244 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5245 
5246 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
5247 }
5248 
5249 static int
5250 iwn_updateedca(struct ieee80211com *ic)
5251 {
5252 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5253 	struct iwn_softc *sc = ic->ic_softc;
5254 	struct iwn_edca_params cmd;
5255 	int aci;
5256 
5257 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5258 
5259 	memset(&cmd, 0, sizeof cmd);
5260 	cmd.flags = htole32(IWN_EDCA_UPDATE);
5261 	for (aci = 0; aci < WME_NUM_AC; aci++) {
5262 		const struct wmeParams *ac =
5263 		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
5264 		cmd.ac[aci].aifsn = ac->wmep_aifsn;
5265 		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
5266 		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
5267 		cmd.ac[aci].txoplimit =
5268 		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
5269 	}
5270 	IEEE80211_UNLOCK(ic);
5271 	IWN_LOCK(sc);
5272 	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
5273 	IWN_UNLOCK(sc);
5274 	IEEE80211_LOCK(ic);
5275 
5276 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5277 
5278 	return 0;
5279 #undef IWN_EXP2
5280 }
5281 
5282 static void
5283 iwn_update_mcast(struct ieee80211com *ic)
5284 {
5285 	/* Ignore */
5286 }
5287 
5288 static void
5289 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
5290 {
5291 	struct iwn_cmd_led led;
5292 
5293 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5294 
5295 #if 0
5296 	/* XXX don't set LEDs during scan? */
5297 	if (sc->sc_is_scanning)
5298 		return;
5299 #endif
5300 
5301 	/* Clear microcode LED ownership. */
5302 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
5303 
5304 	led.which = which;
5305 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
5306 	led.off = off;
5307 	led.on = on;
5308 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
5309 }
5310 
5311 /*
5312  * Set the critical temperature at which the firmware will stop the radio
5313  * and notify us.
5314  */
5315 static int
5316 iwn_set_critical_temp(struct iwn_softc *sc)
5317 {
5318 	struct iwn_critical_temp crit;
5319 	int32_t temp;
5320 
5321 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5322 
5323 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
5324 
5325 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
5326 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
5327 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
5328 		temp = IWN_CTOK(110);
5329 	else
5330 		temp = 110;
5331 	memset(&crit, 0, sizeof crit);
5332 	crit.tempR = htole32(temp);
5333 	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
5334 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
5335 }
5336 
5337 static int
5338 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
5339 {
5340 	struct iwn_cmd_timing cmd;
5341 	uint64_t val, mod;
5342 
5343 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5344 
5345 	memset(&cmd, 0, sizeof cmd);
5346 	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
5347 	cmd.bintval = htole16(ni->ni_intval);
5348 	cmd.lintval = htole16(10);
5349 
5350 	/* Compute remaining time until next beacon. */
5351 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
5352 	mod = le64toh(cmd.tstamp) % val;
5353 	cmd.binitval = htole32((uint32_t)(val - mod));
5354 
5355 	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
5356 	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
5357 
5358 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
5359 }
5360 
5361 static void
5362 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
5363 {
5364 	struct ifnet *ifp = sc->sc_ifp;
5365 	struct ieee80211com *ic = ifp->if_l2com;
5366 
5367 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5368 
5369 	/* Adjust TX power if need be (delta >= 3 degC). */
5370 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
5371 	    __func__, sc->temp, temp);
5372 	if (abs(temp - sc->temp) >= 3) {
5373 		/* Record temperature of last calibration. */
5374 		sc->temp = temp;
5375 		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
5376 	}
5377 }
5378 
5379 /*
5380  * Set TX power for current channel (each rate has its own power settings).
5381  * This function takes into account the regulatory information from EEPROM,
5382  * the current temperature and the current voltage.
5383  */
5384 static int
5385 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5386     int async)
5387 {
5388 /* Fixed-point arithmetic division using a n-bit fractional part. */
5389 #define fdivround(a, b, n)	\
5390 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
5391 /* Linear interpolation. */
5392 #define interpolate(x, x1, y1, x2, y2, n)	\
5393 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
5394 
5395 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
5396 	struct iwn_ucode_info *uc = &sc->ucode_info;
5397 	struct iwn4965_cmd_txpower cmd;
5398 	struct iwn4965_eeprom_chan_samples *chans;
5399 	const uint8_t *rf_gain, *dsp_gain;
5400 	int32_t vdiff, tdiff;
5401 	int i, c, grp, maxpwr;
5402 	uint8_t chan;
5403 
5404 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5405 	/* Retrieve current channel from last RXON. */
5406 	chan = sc->rxon->chan;
5407 	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
5408 	    chan);
5409 
5410 	memset(&cmd, 0, sizeof cmd);
5411 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
5412 	cmd.chan = chan;
5413 
5414 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
5415 		maxpwr   = sc->maxpwr5GHz;
5416 		rf_gain  = iwn4965_rf_gain_5ghz;
5417 		dsp_gain = iwn4965_dsp_gain_5ghz;
5418 	} else {
5419 		maxpwr   = sc->maxpwr2GHz;
5420 		rf_gain  = iwn4965_rf_gain_2ghz;
5421 		dsp_gain = iwn4965_dsp_gain_2ghz;
5422 	}
5423 
5424 	/* Compute voltage compensation. */
5425 	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
5426 	if (vdiff > 0)
5427 		vdiff *= 2;
5428 	if (abs(vdiff) > 2)
5429 		vdiff = 0;
5430 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5431 	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
5432 	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
5433 
5434 	/* Get channel attenuation group. */
5435 	if (chan <= 20)		/* 1-20 */
5436 		grp = 4;
5437 	else if (chan <= 43)	/* 34-43 */
5438 		grp = 0;
5439 	else if (chan <= 70)	/* 44-70 */
5440 		grp = 1;
5441 	else if (chan <= 124)	/* 71-124 */
5442 		grp = 2;
5443 	else			/* 125-200 */
5444 		grp = 3;
5445 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5446 	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
5447 
5448 	/* Get channel sub-band. */
5449 	for (i = 0; i < IWN_NBANDS; i++)
5450 		if (sc->bands[i].lo != 0 &&
5451 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
5452 			break;
5453 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
5454 		return EINVAL;
5455 	chans = sc->bands[i].chans;
5456 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5457 	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
5458 
5459 	for (c = 0; c < 2; c++) {
5460 		uint8_t power, gain, temp;
5461 		int maxchpwr, pwr, ridx, idx;
5462 
5463 		power = interpolate(chan,
5464 		    chans[0].num, chans[0].samples[c][1].power,
5465 		    chans[1].num, chans[1].samples[c][1].power, 1);
5466 		gain  = interpolate(chan,
5467 		    chans[0].num, chans[0].samples[c][1].gain,
5468 		    chans[1].num, chans[1].samples[c][1].gain, 1);
5469 		temp  = interpolate(chan,
5470 		    chans[0].num, chans[0].samples[c][1].temp,
5471 		    chans[1].num, chans[1].samples[c][1].temp, 1);
5472 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5473 		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
5474 		    __func__, c, power, gain, temp);
5475 
5476 		/* Compute temperature compensation. */
5477 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
5478 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5479 		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
5480 		    __func__, tdiff, sc->temp, temp);
5481 
5482 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
5483 			/* Convert dBm to half-dBm. */
5484 			maxchpwr = sc->maxpwr[chan] * 2;
5485 			if ((ridx / 8) & 1)
5486 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
5487 
5488 			pwr = maxpwr;
5489 
5490 			/* Adjust TX power based on rate. */
5491 			if ((ridx % 8) == 5)
5492 				pwr -= 15;	/* OFDM48: -7.5dB */
5493 			else if ((ridx % 8) == 6)
5494 				pwr -= 17;	/* OFDM54: -8.5dB */
5495 			else if ((ridx % 8) == 7)
5496 				pwr -= 20;	/* OFDM60: -10dB */
5497 			else
5498 				pwr -= 10;	/* Others: -5dB */
5499 
5500 			/* Do not exceed channel max TX power. */
5501 			if (pwr > maxchpwr)
5502 				pwr = maxchpwr;
5503 
5504 			idx = gain - (pwr - power) - tdiff - vdiff;
5505 			if ((ridx / 8) & 1)	/* MIMO */
5506 				idx += (int32_t)le32toh(uc->atten[grp][c]);
5507 
5508 			if (cmd.band == 0)
5509 				idx += 9;	/* 5GHz */
5510 			if (ridx == IWN_RIDX_MAX)
5511 				idx += 5;	/* CCK */
5512 
5513 			/* Make sure idx stays in a valid range. */
5514 			if (idx < 0)
5515 				idx = 0;
5516 			else if (idx > IWN4965_MAX_PWR_INDEX)
5517 				idx = IWN4965_MAX_PWR_INDEX;
5518 
5519 			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5520 			    "%s: Tx chain %d, rate idx %d: power=%d\n",
5521 			    __func__, c, ridx, idx);
5522 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
5523 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
5524 		}
5525 	}
5526 
5527 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5528 	    "%s: set tx power for chan %d\n", __func__, chan);
5529 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
5530 
5531 #undef interpolate
5532 #undef fdivround
5533 }
5534 
5535 static int
5536 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5537     int async)
5538 {
5539 	struct iwn5000_cmd_txpower cmd;
5540 	int cmdid;
5541 
5542 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5543 
5544 	/*
5545 	 * TX power calibration is handled automatically by the firmware
5546 	 * for 5000 Series.
5547 	 */
5548 	memset(&cmd, 0, sizeof cmd);
5549 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
5550 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
5551 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
5552 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5553 	    "%s: setting TX power; rev=%d\n",
5554 	    __func__,
5555 	    IWN_UCODE_API(sc->ucode_rev));
5556 	if (IWN_UCODE_API(sc->ucode_rev) == 1)
5557 		cmdid = IWN_CMD_TXPOWER_DBM_V1;
5558 	else
5559 		cmdid = IWN_CMD_TXPOWER_DBM;
5560 	return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async);
5561 }
5562 
5563 /*
5564  * Retrieve the maximum RSSI (in dBm) among receivers.
5565  */
5566 static int
5567 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5568 {
5569 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
5570 	uint8_t mask, agc;
5571 	int rssi;
5572 
5573 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5574 
5575 	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
5576 	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
5577 
5578 	rssi = 0;
5579 	if (mask & IWN_ANT_A)
5580 		rssi = MAX(rssi, phy->rssi[0]);
5581 	if (mask & IWN_ANT_B)
5582 		rssi = MAX(rssi, phy->rssi[2]);
5583 	if (mask & IWN_ANT_C)
5584 		rssi = MAX(rssi, phy->rssi[4]);
5585 
5586 	DPRINTF(sc, IWN_DEBUG_RECV,
5587 	    "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
5588 	    mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
5589 	    rssi - agc - IWN_RSSI_TO_DBM);
5590 	return rssi - agc - IWN_RSSI_TO_DBM;
5591 }
5592 
5593 static int
5594 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5595 {
5596 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
5597 	uint8_t agc;
5598 	int rssi;
5599 
5600 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5601 
5602 	agc = (le32toh(phy->agc) >> 9) & 0x7f;
5603 
5604 	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
5605 		   le16toh(phy->rssi[1]) & 0xff);
5606 	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
5607 
5608 	DPRINTF(sc, IWN_DEBUG_RECV,
5609 	    "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
5610 	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
5611 	    rssi - agc - IWN_RSSI_TO_DBM);
5612 	return rssi - agc - IWN_RSSI_TO_DBM;
5613 }
5614 
5615 /*
5616  * Retrieve the average noise (in dBm) among receivers.
5617  */
5618 static int
5619 iwn_get_noise(const struct iwn_rx_general_stats *stats)
5620 {
5621 	int i, total, nbant, noise;
5622 
5623 	total = nbant = 0;
5624 	for (i = 0; i < 3; i++) {
5625 		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
5626 			continue;
5627 		total += noise;
5628 		nbant++;
5629 	}
5630 	/* There should be at least one antenna but check anyway. */
5631 	return (nbant == 0) ? -127 : (total / nbant) - 107;
5632 }
5633 
5634 /*
5635  * Compute temperature (in degC) from last received statistics.
5636  */
5637 static int
5638 iwn4965_get_temperature(struct iwn_softc *sc)
5639 {
5640 	struct iwn_ucode_info *uc = &sc->ucode_info;
5641 	int32_t r1, r2, r3, r4, temp;
5642 
5643 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5644 
5645 	r1 = le32toh(uc->temp[0].chan20MHz);
5646 	r2 = le32toh(uc->temp[1].chan20MHz);
5647 	r3 = le32toh(uc->temp[2].chan20MHz);
5648 	r4 = le32toh(sc->rawtemp);
5649 
5650 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
5651 		return 0;
5652 
5653 	/* Sign-extend 23-bit R4 value to 32-bit. */
5654 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
5655 	/* Compute temperature in Kelvin. */
5656 	temp = (259 * (r4 - r2)) / (r3 - r1);
5657 	temp = (temp * 97) / 100 + 8;
5658 
5659 	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
5660 	    IWN_KTOC(temp));
5661 	return IWN_KTOC(temp);
5662 }
5663 
5664 static int
5665 iwn5000_get_temperature(struct iwn_softc *sc)
5666 {
5667 	int32_t temp;
5668 
5669 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5670 
5671 	/*
5672 	 * Temperature is not used by the driver for 5000 Series because
5673 	 * TX power calibration is handled by firmware.
5674 	 */
5675 	temp = le32toh(sc->rawtemp);
5676 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
5677 		temp = (temp / -5) + sc->temp_off;
5678 		temp = IWN_KTOC(temp);
5679 	}
5680 	return temp;
5681 }
5682 
5683 /*
5684  * Initialize sensitivity calibration state machine.
5685  */
5686 static int
5687 iwn_init_sensitivity(struct iwn_softc *sc)
5688 {
5689 	struct iwn_ops *ops = &sc->ops;
5690 	struct iwn_calib_state *calib = &sc->calib;
5691 	uint32_t flags;
5692 	int error;
5693 
5694 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5695 
5696 	/* Reset calibration state machine. */
5697 	memset(calib, 0, sizeof (*calib));
5698 	calib->state = IWN_CALIB_STATE_INIT;
5699 	calib->cck_state = IWN_CCK_STATE_HIFA;
5700 	/* Set initial correlation values. */
5701 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
5702 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
5703 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
5704 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
5705 	calib->cck_x4      = 125;
5706 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
5707 	calib->energy_cck  = sc->limits->energy_cck;
5708 
5709 	/* Write initial sensitivity. */
5710 	if ((error = iwn_send_sensitivity(sc)) != 0)
5711 		return error;
5712 
5713 	/* Write initial gains. */
5714 	if ((error = ops->init_gains(sc)) != 0)
5715 		return error;
5716 
5717 	/* Request statistics at each beacon interval. */
5718 	flags = 0;
5719 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
5720 	    __func__);
5721 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
5722 }
5723 
5724 /*
5725  * Collect noise and RSSI statistics for the first 20 beacons received
5726  * after association and use them to determine connected antennas and
5727  * to set differential gains.
5728  */
5729 static void
5730 iwn_collect_noise(struct iwn_softc *sc,
5731     const struct iwn_rx_general_stats *stats)
5732 {
5733 	struct iwn_ops *ops = &sc->ops;
5734 	struct iwn_calib_state *calib = &sc->calib;
5735 	struct ifnet *ifp = sc->sc_ifp;
5736 	struct ieee80211com *ic = ifp->if_l2com;
5737 	uint32_t val;
5738 	int i;
5739 
5740 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5741 
5742 	/* Accumulate RSSI and noise for all 3 antennas. */
5743 	for (i = 0; i < 3; i++) {
5744 		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
5745 		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
5746 	}
5747 	/* NB: We update differential gains only once after 20 beacons. */
5748 	if (++calib->nbeacons < 20)
5749 		return;
5750 
5751 	/* Determine highest average RSSI. */
5752 	val = MAX(calib->rssi[0], calib->rssi[1]);
5753 	val = MAX(calib->rssi[2], val);
5754 
5755 	/* Determine which antennas are connected. */
5756 	sc->chainmask = sc->rxchainmask;
5757 	for (i = 0; i < 3; i++)
5758 		if (val - calib->rssi[i] > 15 * 20)
5759 			sc->chainmask &= ~(1 << i);
5760 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5761 	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
5762 	    __func__, sc->rxchainmask, sc->chainmask);
5763 
5764 	/* If none of the TX antennas are connected, keep at least one. */
5765 	if ((sc->chainmask & sc->txchainmask) == 0)
5766 		sc->chainmask |= IWN_LSB(sc->txchainmask);
5767 
5768 	(void)ops->set_gains(sc);
5769 	calib->state = IWN_CALIB_STATE_RUN;
5770 
5771 #ifdef notyet
5772 	/* XXX Disable RX chains with no antennas connected. */
5773 	sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
5774 	if (sc->sc_is_scanning)
5775 		device_printf(sc->sc_dev,
5776 		    "%s: is_scanning set, before RXON\n",
5777 		    __func__);
5778 	(void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5779 #endif
5780 
5781 	/* Enable power-saving mode if requested by user. */
5782 	if (ic->ic_flags & IEEE80211_F_PMGTON)
5783 		(void)iwn_set_pslevel(sc, 0, 3, 1);
5784 
5785 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5786 
5787 }
5788 
5789 static int
5790 iwn4965_init_gains(struct iwn_softc *sc)
5791 {
5792 	struct iwn_phy_calib_gain cmd;
5793 
5794 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5795 
5796 	memset(&cmd, 0, sizeof cmd);
5797 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5798 	/* Differential gains initially set to 0 for all 3 antennas. */
5799 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5800 	    "%s: setting initial differential gains\n", __func__);
5801 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5802 }
5803 
5804 static int
5805 iwn5000_init_gains(struct iwn_softc *sc)
5806 {
5807 	struct iwn_phy_calib cmd;
5808 
5809 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5810 
5811 	memset(&cmd, 0, sizeof cmd);
5812 	cmd.code = sc->reset_noise_gain;
5813 	cmd.ngroups = 1;
5814 	cmd.isvalid = 1;
5815 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5816 	    "%s: setting initial differential gains\n", __func__);
5817 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5818 }
5819 
5820 static int
5821 iwn4965_set_gains(struct iwn_softc *sc)
5822 {
5823 	struct iwn_calib_state *calib = &sc->calib;
5824 	struct iwn_phy_calib_gain cmd;
5825 	int i, delta, noise;
5826 
5827 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5828 
5829 	/* Get minimal noise among connected antennas. */
5830 	noise = INT_MAX;	/* NB: There's at least one antenna. */
5831 	for (i = 0; i < 3; i++)
5832 		if (sc->chainmask & (1 << i))
5833 			noise = MIN(calib->noise[i], noise);
5834 
5835 	memset(&cmd, 0, sizeof cmd);
5836 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5837 	/* Set differential gains for connected antennas. */
5838 	for (i = 0; i < 3; i++) {
5839 		if (sc->chainmask & (1 << i)) {
5840 			/* Compute attenuation (in unit of 1.5dB). */
5841 			delta = (noise - (int32_t)calib->noise[i]) / 30;
5842 			/* NB: delta <= 0 */
5843 			/* Limit to [-4.5dB,0]. */
5844 			cmd.gain[i] = MIN(abs(delta), 3);
5845 			if (delta < 0)
5846 				cmd.gain[i] |= 1 << 2;	/* sign bit */
5847 		}
5848 	}
5849 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5850 	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
5851 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
5852 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5853 }
5854 
5855 static int
5856 iwn5000_set_gains(struct iwn_softc *sc)
5857 {
5858 	struct iwn_calib_state *calib = &sc->calib;
5859 	struct iwn_phy_calib_gain cmd;
5860 	int i, ant, div, delta;
5861 
5862 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5863 
5864 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
5865 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5866 
5867 	memset(&cmd, 0, sizeof cmd);
5868 	cmd.code = sc->noise_gain;
5869 	cmd.ngroups = 1;
5870 	cmd.isvalid = 1;
5871 	/* Get first available RX antenna as referential. */
5872 	ant = IWN_LSB(sc->rxchainmask);
5873 	/* Set differential gains for other antennas. */
5874 	for (i = ant + 1; i < 3; i++) {
5875 		if (sc->chainmask & (1 << i)) {
5876 			/* The delta is relative to antenna "ant". */
5877 			delta = ((int32_t)calib->noise[ant] -
5878 			    (int32_t)calib->noise[i]) / div;
5879 			/* Limit to [-4.5dB,+4.5dB]. */
5880 			cmd.gain[i - 1] = MIN(abs(delta), 3);
5881 			if (delta < 0)
5882 				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
5883 		}
5884 	}
5885 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5886 	    "setting differential gains Ant B/C: %x/%x (%x)\n",
5887 	    cmd.gain[0], cmd.gain[1], sc->chainmask);
5888 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5889 }
5890 
5891 /*
5892  * Tune RF RX sensitivity based on the number of false alarms detected
5893  * during the last beacon period.
5894  */
5895 static void
5896 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5897 {
5898 #define inc(val, inc, max)			\
5899 	if ((val) < (max)) {			\
5900 		if ((val) < (max) - (inc))	\
5901 			(val) += (inc);		\
5902 		else				\
5903 			(val) = (max);		\
5904 		needs_update = 1;		\
5905 	}
5906 #define dec(val, dec, min)			\
5907 	if ((val) > (min)) {			\
5908 		if ((val) > (min) + (dec))	\
5909 			(val) -= (dec);		\
5910 		else				\
5911 			(val) = (min);		\
5912 		needs_update = 1;		\
5913 	}
5914 
5915 	const struct iwn_sensitivity_limits *limits = sc->limits;
5916 	struct iwn_calib_state *calib = &sc->calib;
5917 	uint32_t val, rxena, fa;
5918 	uint32_t energy[3], energy_min;
5919 	uint8_t noise[3], noise_ref;
5920 	int i, needs_update = 0;
5921 
5922 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5923 
5924 	/* Check that we've been enabled long enough. */
5925 	if ((rxena = le32toh(stats->general.load)) == 0){
5926 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
5927 		return;
5928 	}
5929 
5930 	/* Compute number of false alarms since last call for OFDM. */
5931 	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
5932 	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
5933 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5934 
5935 	if (fa > 50 * rxena) {
5936 		/* High false alarm count, decrease sensitivity. */
5937 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5938 		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
5939 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
5940 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
5941 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
5942 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
5943 
5944 	} else if (fa < 5 * rxena) {
5945 		/* Low false alarm count, increase sensitivity. */
5946 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5947 		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
5948 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
5949 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
5950 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
5951 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
5952 	}
5953 
5954 	/* Compute maximum noise among 3 receivers. */
5955 	for (i = 0; i < 3; i++)
5956 		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
5957 	val = MAX(noise[0], noise[1]);
5958 	val = MAX(noise[2], val);
5959 	/* Insert it into our samples table. */
5960 	calib->noise_samples[calib->cur_noise_sample] = val;
5961 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
5962 
5963 	/* Compute maximum noise among last 20 samples. */
5964 	noise_ref = calib->noise_samples[0];
5965 	for (i = 1; i < 20; i++)
5966 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
5967 
5968 	/* Compute maximum energy among 3 receivers. */
5969 	for (i = 0; i < 3; i++)
5970 		energy[i] = le32toh(stats->general.energy[i]);
5971 	val = MIN(energy[0], energy[1]);
5972 	val = MIN(energy[2], val);
5973 	/* Insert it into our samples table. */
5974 	calib->energy_samples[calib->cur_energy_sample] = val;
5975 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
5976 
5977 	/* Compute minimum energy among last 10 samples. */
5978 	energy_min = calib->energy_samples[0];
5979 	for (i = 1; i < 10; i++)
5980 		energy_min = MAX(energy_min, calib->energy_samples[i]);
5981 	energy_min += 6;
5982 
5983 	/* Compute number of false alarms since last call for CCK. */
5984 	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
5985 	fa += le32toh(stats->cck.fa) - calib->fa_cck;
5986 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5987 
5988 	if (fa > 50 * rxena) {
5989 		/* High false alarm count, decrease sensitivity. */
5990 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5991 		    "%s: CCK high false alarm count: %u\n", __func__, fa);
5992 		calib->cck_state = IWN_CCK_STATE_HIFA;
5993 		calib->low_fa = 0;
5994 
5995 		if (calib->cck_x4 > 160) {
5996 			calib->noise_ref = noise_ref;
5997 			if (calib->energy_cck > 2)
5998 				dec(calib->energy_cck, 2, energy_min);
5999 		}
6000 		if (calib->cck_x4 < 160) {
6001 			calib->cck_x4 = 161;
6002 			needs_update = 1;
6003 		} else
6004 			inc(calib->cck_x4, 3, limits->max_cck_x4);
6005 
6006 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
6007 
6008 	} else if (fa < 5 * rxena) {
6009 		/* Low false alarm count, increase sensitivity. */
6010 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6011 		    "%s: CCK low false alarm count: %u\n", __func__, fa);
6012 		calib->cck_state = IWN_CCK_STATE_LOFA;
6013 		calib->low_fa++;
6014 
6015 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
6016 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
6017 		     calib->low_fa > 100)) {
6018 			inc(calib->energy_cck, 2, limits->min_energy_cck);
6019 			dec(calib->cck_x4,     3, limits->min_cck_x4);
6020 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
6021 		}
6022 	} else {
6023 		/* Not worth to increase or decrease sensitivity. */
6024 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6025 		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
6026 		calib->low_fa = 0;
6027 		calib->noise_ref = noise_ref;
6028 
6029 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
6030 			/* Previous interval had many false alarms. */
6031 			dec(calib->energy_cck, 8, energy_min);
6032 		}
6033 		calib->cck_state = IWN_CCK_STATE_INIT;
6034 	}
6035 
6036 	if (needs_update)
6037 		(void)iwn_send_sensitivity(sc);
6038 
6039 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6040 
6041 #undef dec
6042 #undef inc
6043 }
6044 
6045 static int
6046 iwn_send_sensitivity(struct iwn_softc *sc)
6047 {
6048 	struct iwn_calib_state *calib = &sc->calib;
6049 	struct iwn_enhanced_sensitivity_cmd cmd;
6050 	int len;
6051 
6052 	memset(&cmd, 0, sizeof cmd);
6053 	len = sizeof (struct iwn_sensitivity_cmd);
6054 	cmd.which = IWN_SENSITIVITY_WORKTBL;
6055 	/* OFDM modulation. */
6056 	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
6057 	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
6058 	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
6059 	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
6060 	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
6061 	cmd.energy_ofdm_th     = htole16(62);
6062 	/* CCK modulation. */
6063 	cmd.corr_cck_x4        = htole16(calib->cck_x4);
6064 	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
6065 	cmd.energy_cck         = htole16(calib->energy_cck);
6066 	/* Barker modulation: use default values. */
6067 	cmd.corr_barker        = htole16(190);
6068 	cmd.corr_barker_mrc    = htole16(sc->limits->barker_mrc);
6069 
6070 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6071 	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
6072 	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
6073 	    calib->ofdm_mrc_x4, calib->cck_x4,
6074 	    calib->cck_mrc_x4, calib->energy_cck);
6075 
6076 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
6077 		goto send;
6078 	/* Enhanced sensitivity settings. */
6079 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
6080 	cmd.ofdm_det_slope_mrc = htole16(668);
6081 	cmd.ofdm_det_icept_mrc = htole16(4);
6082 	cmd.ofdm_det_slope     = htole16(486);
6083 	cmd.ofdm_det_icept     = htole16(37);
6084 	cmd.cck_det_slope_mrc  = htole16(853);
6085 	cmd.cck_det_icept_mrc  = htole16(4);
6086 	cmd.cck_det_slope      = htole16(476);
6087 	cmd.cck_det_icept      = htole16(99);
6088 send:
6089 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
6090 }
6091 
6092 /*
6093  * Look at the increase of PLCP errors over time; if it exceeds
6094  * a programmed threshold then trigger an RF retune.
6095  */
6096 static void
6097 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs)
6098 {
6099 	int32_t delta_ofdm, delta_ht, delta_cck;
6100 	struct iwn_calib_state *calib = &sc->calib;
6101 	int delta_ticks, cur_ticks;
6102 	int delta_msec;
6103 	int thresh;
6104 
6105 	/*
6106 	 * Calculate the difference between the current and
6107 	 * previous statistics.
6108 	 */
6109 	delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck;
6110 	delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm;
6111 	delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht;
6112 
6113 	/*
6114 	 * Calculate the delta in time between successive statistics
6115 	 * messages.  Yes, it can roll over; so we make sure that
6116 	 * this doesn't happen.
6117 	 *
6118 	 * XXX go figure out what to do about rollover
6119 	 * XXX go figure out what to do if ticks rolls over to -ve instead!
6120 	 * XXX go stab signed integer overflow undefined-ness in the face.
6121 	 */
6122 	cur_ticks = ticks;
6123 	delta_ticks = cur_ticks - sc->last_calib_ticks;
6124 
6125 	/*
6126 	 * If any are negative, then the firmware likely reset; so just
6127 	 * bail.  We'll pick this up next time.
6128 	 */
6129 	if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0)
6130 		return;
6131 
6132 	/*
6133 	 * delta_ticks is in ticks; we need to convert it up to milliseconds
6134 	 * so we can do some useful math with it.
6135 	 */
6136 	delta_msec = ticks_to_msecs(delta_ticks);
6137 
6138 	/*
6139 	 * Calculate what our threshold is given the current delta_msec.
6140 	 */
6141 	thresh = sc->base_params->plcp_err_threshold * delta_msec;
6142 
6143 	DPRINTF(sc, IWN_DEBUG_STATE,
6144 	    "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n",
6145 	    __func__,
6146 	    delta_msec,
6147 	    delta_cck,
6148 	    delta_ofdm,
6149 	    delta_ht,
6150 	    (delta_msec + delta_cck + delta_ofdm + delta_ht),
6151 	    thresh);
6152 
6153 	/*
6154 	 * If we need a retune, then schedule a single channel scan
6155 	 * to a channel that isn't the currently active one!
6156 	 *
6157 	 * The math from linux iwlwifi:
6158 	 *
6159 	 * if ((delta * 100 / msecs) > threshold)
6160 	 */
6161 	if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) {
6162 		DPRINTF(sc, IWN_DEBUG_ANY,
6163 		    "%s: PLCP error threshold raw (%d) comparison (%d) "
6164 		    "over limit (%d); retune!\n",
6165 		    __func__,
6166 		    (delta_cck + delta_ofdm + delta_ht),
6167 		    (delta_cck + delta_ofdm + delta_ht) * 100,
6168 		    thresh);
6169 	}
6170 }
6171 
6172 /*
6173  * Set STA mode power saving level (between 0 and 5).
6174  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
6175  */
6176 static int
6177 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
6178 {
6179 	struct iwn_pmgt_cmd cmd;
6180 	const struct iwn_pmgt *pmgt;
6181 	uint32_t max, skip_dtim;
6182 	uint32_t reg;
6183 	int i;
6184 
6185 	DPRINTF(sc, IWN_DEBUG_PWRSAVE,
6186 	    "%s: dtim=%d, level=%d, async=%d\n",
6187 	    __func__,
6188 	    dtim,
6189 	    level,
6190 	    async);
6191 
6192 	/* Select which PS parameters to use. */
6193 	if (dtim <= 2)
6194 		pmgt = &iwn_pmgt[0][level];
6195 	else if (dtim <= 10)
6196 		pmgt = &iwn_pmgt[1][level];
6197 	else
6198 		pmgt = &iwn_pmgt[2][level];
6199 
6200 	memset(&cmd, 0, sizeof cmd);
6201 	if (level != 0)	/* not CAM */
6202 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
6203 	if (level == 5)
6204 		cmd.flags |= htole16(IWN_PS_FAST_PD);
6205 	/* Retrieve PCIe Active State Power Management (ASPM). */
6206 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6207 	if (!(reg & 0x1))	/* L0s Entry disabled. */
6208 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
6209 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
6210 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
6211 
6212 	if (dtim == 0) {
6213 		dtim = 1;
6214 		skip_dtim = 0;
6215 	} else
6216 		skip_dtim = pmgt->skip_dtim;
6217 	if (skip_dtim != 0) {
6218 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
6219 		max = pmgt->intval[4];
6220 		if (max == (uint32_t)-1)
6221 			max = dtim * (skip_dtim + 1);
6222 		else if (max > dtim)
6223 			max = (max / dtim) * dtim;
6224 	} else
6225 		max = dtim;
6226 	for (i = 0; i < 5; i++)
6227 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
6228 
6229 	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
6230 	    level);
6231 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
6232 }
6233 
6234 static int
6235 iwn_send_btcoex(struct iwn_softc *sc)
6236 {
6237 	struct iwn_bluetooth cmd;
6238 
6239 	memset(&cmd, 0, sizeof cmd);
6240 	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
6241 	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
6242 	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
6243 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
6244 	    __func__);
6245 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
6246 }
6247 
6248 static int
6249 iwn_send_advanced_btcoex(struct iwn_softc *sc)
6250 {
6251 	static const uint32_t btcoex_3wire[12] = {
6252 		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
6253 		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
6254 		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
6255 	};
6256 	struct iwn6000_btcoex_config btconfig;
6257 	struct iwn2000_btcoex_config btconfig2k;
6258 	struct iwn_btcoex_priotable btprio;
6259 	struct iwn_btcoex_prot btprot;
6260 	int error, i;
6261 	uint8_t flags;
6262 
6263 	memset(&btconfig, 0, sizeof btconfig);
6264 	memset(&btconfig2k, 0, sizeof btconfig2k);
6265 
6266 	flags = IWN_BT_FLAG_COEX6000_MODE_3W <<
6267 	    IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2
6268 
6269 	if (sc->base_params->bt_sco_disable)
6270 		flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6271 	else
6272 		flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6273 
6274 	flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION;
6275 
6276 	/* Default flags result is 145 as old value */
6277 
6278 	/*
6279 	 * Flags value has to be review. Values must change if we
6280 	 * which to disable it
6281 	 */
6282 	if (sc->base_params->bt_session_2) {
6283 		btconfig2k.flags = flags;
6284 		btconfig2k.max_kill = 5;
6285 		btconfig2k.bt3_t7_timer = 1;
6286 		btconfig2k.kill_ack = htole32(0xffff0000);
6287 		btconfig2k.kill_cts = htole32(0xffff0000);
6288 		btconfig2k.sample_time = 2;
6289 		btconfig2k.bt3_t2_timer = 0xc;
6290 
6291 		for (i = 0; i < 12; i++)
6292 			btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]);
6293 		btconfig2k.valid = htole16(0xff);
6294 		btconfig2k.prio_boost = htole32(0xf0);
6295 		DPRINTF(sc, IWN_DEBUG_RESET,
6296 		    "%s: configuring advanced bluetooth coexistence"
6297 		    " session 2, flags : 0x%x\n",
6298 		    __func__,
6299 		    flags);
6300 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k,
6301 		    sizeof(btconfig2k), 1);
6302 	} else {
6303 		btconfig.flags = flags;
6304 		btconfig.max_kill = 5;
6305 		btconfig.bt3_t7_timer = 1;
6306 		btconfig.kill_ack = htole32(0xffff0000);
6307 		btconfig.kill_cts = htole32(0xffff0000);
6308 		btconfig.sample_time = 2;
6309 		btconfig.bt3_t2_timer = 0xc;
6310 
6311 		for (i = 0; i < 12; i++)
6312 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
6313 		btconfig.valid = htole16(0xff);
6314 		btconfig.prio_boost = 0xf0;
6315 		DPRINTF(sc, IWN_DEBUG_RESET,
6316 		    "%s: configuring advanced bluetooth coexistence,"
6317 		    " flags : 0x%x\n",
6318 		    __func__,
6319 		    flags);
6320 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
6321 		    sizeof(btconfig), 1);
6322 	}
6323 
6324 	if (error != 0)
6325 		return error;
6326 
6327 	memset(&btprio, 0, sizeof btprio);
6328 	btprio.calib_init1 = 0x6;
6329 	btprio.calib_init2 = 0x7;
6330 	btprio.calib_periodic_low1 = 0x2;
6331 	btprio.calib_periodic_low2 = 0x3;
6332 	btprio.calib_periodic_high1 = 0x4;
6333 	btprio.calib_periodic_high2 = 0x5;
6334 	btprio.dtim = 0x6;
6335 	btprio.scan52 = 0x8;
6336 	btprio.scan24 = 0xa;
6337 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
6338 	    1);
6339 	if (error != 0)
6340 		return error;
6341 
6342 	/* Force BT state machine change. */
6343 	memset(&btprot, 0, sizeof btprot);
6344 	btprot.open = 1;
6345 	btprot.type = 1;
6346 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6347 	if (error != 0)
6348 		return error;
6349 	btprot.open = 0;
6350 	return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6351 }
6352 
6353 static int
6354 iwn5000_runtime_calib(struct iwn_softc *sc)
6355 {
6356 	struct iwn5000_calib_config cmd;
6357 
6358 	memset(&cmd, 0, sizeof cmd);
6359 	cmd.ucode.once.enable = 0xffffffff;
6360 	cmd.ucode.once.start = IWN5000_CALIB_DC;
6361 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6362 	    "%s: configuring runtime calibration\n", __func__);
6363 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
6364 }
6365 
6366 static int
6367 iwn_config(struct iwn_softc *sc)
6368 {
6369 	struct iwn_ops *ops = &sc->ops;
6370 	struct ifnet *ifp = sc->sc_ifp;
6371 	struct ieee80211com *ic = ifp->if_l2com;
6372 	uint32_t txmask;
6373 	uint16_t rxchain;
6374 	int error;
6375 
6376 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6377 
6378 	if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET)
6379 	    && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) {
6380 		device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are"
6381 		    " exclusive each together. Review NIC config file. Conf"
6382 		    " :  0x%08x Flags :  0x%08x  \n", __func__,
6383 		    sc->base_params->calib_need,
6384 		    (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET |
6385 		    IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2));
6386 		return (EINVAL);
6387 	}
6388 
6389 	/* Compute temperature calib if needed. Will be send by send calib */
6390 	if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) {
6391 		error = iwn5000_temp_offset_calib(sc);
6392 		if (error != 0) {
6393 			device_printf(sc->sc_dev,
6394 			    "%s: could not set temperature offset\n", __func__);
6395 			return (error);
6396 		}
6397 	} else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
6398 		error = iwn5000_temp_offset_calibv2(sc);
6399 		if (error != 0) {
6400 			device_printf(sc->sc_dev,
6401 			    "%s: could not compute temperature offset v2\n",
6402 			    __func__);
6403 			return (error);
6404 		}
6405 	}
6406 
6407 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6408 		/* Configure runtime DC calibration. */
6409 		error = iwn5000_runtime_calib(sc);
6410 		if (error != 0) {
6411 			device_printf(sc->sc_dev,
6412 			    "%s: could not configure runtime calibration\n",
6413 			    __func__);
6414 			return error;
6415 		}
6416 	}
6417 
6418 	/* Configure valid TX chains for >=5000 Series. */
6419 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6420 	    IWN_UCODE_API(sc->ucode_rev) > 1) {
6421 		txmask = htole32(sc->txchainmask);
6422 		DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6423 		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
6424 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
6425 		    sizeof txmask, 0);
6426 		if (error != 0) {
6427 			device_printf(sc->sc_dev,
6428 			    "%s: could not configure valid TX chains, "
6429 			    "error %d\n", __func__, error);
6430 			return error;
6431 		}
6432 	}
6433 
6434 	/* Configure bluetooth coexistence. */
6435 	error = 0;
6436 
6437 	/* Configure bluetooth coexistence if needed. */
6438 	if (sc->base_params->bt_mode == IWN_BT_ADVANCED)
6439 		error = iwn_send_advanced_btcoex(sc);
6440 	if (sc->base_params->bt_mode == IWN_BT_SIMPLE)
6441 		error = iwn_send_btcoex(sc);
6442 
6443 	if (error != 0) {
6444 		device_printf(sc->sc_dev,
6445 		    "%s: could not configure bluetooth coexistence, error %d\n",
6446 		    __func__, error);
6447 		return error;
6448 	}
6449 
6450 	/* Set mode, channel, RX filter and enable RX. */
6451 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6452 	memset(sc->rxon, 0, sizeof (struct iwn_rxon));
6453 	IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp));
6454 	IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp));
6455 	sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
6456 	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6457 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
6458 		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6459 	switch (ic->ic_opmode) {
6460 	case IEEE80211_M_STA:
6461 		sc->rxon->mode = IWN_MODE_STA;
6462 		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
6463 		break;
6464 	case IEEE80211_M_MONITOR:
6465 		sc->rxon->mode = IWN_MODE_MONITOR;
6466 		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
6467 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
6468 		break;
6469 	default:
6470 		/* Should not get there. */
6471 		break;
6472 	}
6473 	sc->rxon->cck_mask  = 0x0f;	/* not yet negotiated */
6474 	sc->rxon->ofdm_mask = 0xff;	/* not yet negotiated */
6475 	sc->rxon->ht_single_mask = 0xff;
6476 	sc->rxon->ht_dual_mask = 0xff;
6477 	sc->rxon->ht_triple_mask = 0xff;
6478 	/*
6479 	 * In active association mode, ensure that
6480 	 * all the receive chains are enabled.
6481 	 *
6482 	 * Since we're not yet doing SMPS, don't allow the
6483 	 * number of idle RX chains to be less than the active
6484 	 * number.
6485 	 */
6486 	rxchain =
6487 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
6488 	    IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
6489 	    IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
6490 	sc->rxon->rxchain = htole16(rxchain);
6491 	DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6492 	    "%s: rxchainmask=0x%x, nrxchains=%d\n",
6493 	    __func__,
6494 	    sc->rxchainmask,
6495 	    sc->nrxchains);
6496 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
6497 	if (sc->sc_is_scanning)
6498 		device_printf(sc->sc_dev,
6499 		    "%s: is_scanning set, before RXON\n",
6500 		    __func__);
6501 	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
6502 	if (error != 0) {
6503 		device_printf(sc->sc_dev, "%s: RXON command failed\n",
6504 		    __func__);
6505 		return error;
6506 	}
6507 
6508 	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
6509 		device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
6510 		    __func__);
6511 		return error;
6512 	}
6513 
6514 	/* Configuration has changed, set TX power accordingly. */
6515 	if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
6516 		device_printf(sc->sc_dev, "%s: could not set TX power\n",
6517 		    __func__);
6518 		return error;
6519 	}
6520 
6521 	if ((error = iwn_set_critical_temp(sc)) != 0) {
6522 		device_printf(sc->sc_dev,
6523 		    "%s: could not set critical temperature\n", __func__);
6524 		return error;
6525 	}
6526 
6527 	/* Set power saving level to CAM during initialization. */
6528 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
6529 		device_printf(sc->sc_dev,
6530 		    "%s: could not set power saving level\n", __func__);
6531 		return error;
6532 	}
6533 
6534 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6535 
6536 	return 0;
6537 }
6538 
6539 static uint16_t
6540 iwn_get_active_dwell_time(struct iwn_softc *sc,
6541     struct ieee80211_channel *c, uint8_t n_probes)
6542 {
6543 	/* No channel? Default to 2GHz settings */
6544 	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6545 		return (IWN_ACTIVE_DWELL_TIME_2GHZ +
6546 		IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
6547 	}
6548 
6549 	/* 5GHz dwell time */
6550 	return (IWN_ACTIVE_DWELL_TIME_5GHZ +
6551 	    IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
6552 }
6553 
6554 /*
6555  * Limit the total dwell time to 85% of the beacon interval.
6556  *
6557  * Returns the dwell time in milliseconds.
6558  */
6559 static uint16_t
6560 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
6561 {
6562 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
6563 	struct ieee80211vap *vap = NULL;
6564 	int bintval = 0;
6565 
6566 	/* bintval is in TU (1.024mS) */
6567 	if (! TAILQ_EMPTY(&ic->ic_vaps)) {
6568 		vap = TAILQ_FIRST(&ic->ic_vaps);
6569 		bintval = vap->iv_bss->ni_intval;
6570 	}
6571 
6572 	/*
6573 	 * If it's non-zero, we should calculate the minimum of
6574 	 * it and the DWELL_BASE.
6575 	 *
6576 	 * XXX Yes, the math should take into account that bintval
6577 	 * is 1.024mS, not 1mS..
6578 	 */
6579 	if (bintval > 0) {
6580 		DPRINTF(sc, IWN_DEBUG_SCAN,
6581 		    "%s: bintval=%d\n",
6582 		    __func__,
6583 		    bintval);
6584 		return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
6585 	}
6586 
6587 	/* No association context? Default */
6588 	return (IWN_PASSIVE_DWELL_BASE);
6589 }
6590 
6591 static uint16_t
6592 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c)
6593 {
6594 	uint16_t passive;
6595 
6596 	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6597 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
6598 	} else {
6599 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
6600 	}
6601 
6602 	/* Clamp to the beacon interval if we're associated */
6603 	return (iwn_limit_dwell(sc, passive));
6604 }
6605 
6606 static int
6607 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap,
6608     struct ieee80211_scan_state *ss, struct ieee80211_channel *c)
6609 {
6610 	struct ifnet *ifp = sc->sc_ifp;
6611 	struct ieee80211com *ic = ifp->if_l2com;
6612 	struct ieee80211_node *ni = vap->iv_bss;
6613 	struct iwn_scan_hdr *hdr;
6614 	struct iwn_cmd_data *tx;
6615 	struct iwn_scan_essid *essid;
6616 	struct iwn_scan_chan *chan;
6617 	struct ieee80211_frame *wh;
6618 	struct ieee80211_rateset *rs;
6619 	uint8_t *buf, *frm;
6620 	uint16_t rxchain;
6621 	uint8_t txant;
6622 	int buflen, error;
6623 	int is_active;
6624 	uint16_t dwell_active, dwell_passive;
6625 	uint32_t extra, scan_service_time;
6626 
6627 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6628 
6629 	/*
6630 	 * We are absolutely not allowed to send a scan command when another
6631 	 * scan command is pending.
6632 	 */
6633 	if (sc->sc_is_scanning) {
6634 		device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
6635 		    __func__);
6636 		return (EAGAIN);
6637 	}
6638 
6639 	/* Assign the scan channel */
6640 	c = ic->ic_curchan;
6641 
6642 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6643 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
6644 	if (buf == NULL) {
6645 		device_printf(sc->sc_dev,
6646 		    "%s: could not allocate buffer for scan command\n",
6647 		    __func__);
6648 		return ENOMEM;
6649 	}
6650 	hdr = (struct iwn_scan_hdr *)buf;
6651 	/*
6652 	 * Move to the next channel if no frames are received within 10ms
6653 	 * after sending the probe request.
6654 	 */
6655 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
6656 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
6657 	/*
6658 	 * Max needs to be greater than active and passive and quiet!
6659 	 * It's also in microseconds!
6660 	 */
6661 	hdr->max_svc = htole32(250 * 1024);
6662 
6663 	/*
6664 	 * Reset scan: interval=100
6665 	 * Normal scan: interval=becaon interval
6666 	 * suspend_time: 100 (TU)
6667 	 *
6668 	 */
6669 	extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22;
6670 	//scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024);
6671 	scan_service_time = (4 << 22) | (100 * 1024);	/* Hardcode for now! */
6672 	hdr->pause_svc = htole32(scan_service_time);
6673 
6674 	/* Select antennas for scanning. */
6675 	rxchain =
6676 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
6677 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
6678 	    IWN_RXCHAIN_DRIVER_FORCE;
6679 	if (IEEE80211_IS_CHAN_A(c) &&
6680 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
6681 		/* Ant A must be avoided in 5GHz because of an HW bug. */
6682 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
6683 	} else	/* Use all available RX antennas. */
6684 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
6685 	hdr->rxchain = htole16(rxchain);
6686 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
6687 
6688 	tx = (struct iwn_cmd_data *)(hdr + 1);
6689 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
6690 	tx->id = sc->broadcast_id;
6691 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
6692 
6693 	if (IEEE80211_IS_CHAN_5GHZ(c)) {
6694 		/* Send probe requests at 6Mbps. */
6695 		tx->rate = htole32(0xd);
6696 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6697 	} else {
6698 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
6699 		if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
6700 		    sc->rxon->associd && sc->rxon->chan > 14)
6701 			tx->rate = htole32(0xd);
6702 		else {
6703 			/* Send probe requests at 1Mbps. */
6704 			tx->rate = htole32(10 | IWN_RFLAG_CCK);
6705 		}
6706 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6707 	}
6708 	/* Use the first valid TX antenna. */
6709 	txant = IWN_LSB(sc->txchainmask);
6710 	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
6711 
6712 	/*
6713 	 * Only do active scanning if we're announcing a probe request
6714 	 * for a given SSID (or more, if we ever add it to the driver.)
6715 	 */
6716 	is_active = 0;
6717 
6718 	/*
6719 	 * If we're scanning for a specific SSID, add it to the command.
6720 	 *
6721 	 * XXX maybe look at adding support for scanning multiple SSIDs?
6722 	 */
6723 	essid = (struct iwn_scan_essid *)(tx + 1);
6724 	if (ss != NULL) {
6725 		if (ss->ss_ssid[0].len != 0) {
6726 			essid[0].id = IEEE80211_ELEMID_SSID;
6727 			essid[0].len = ss->ss_ssid[0].len;
6728 			memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
6729 		}
6730 
6731 		DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n",
6732 		    __func__,
6733 		    ss->ss_ssid[0].len,
6734 		    ss->ss_ssid[0].len,
6735 		    ss->ss_ssid[0].ssid);
6736 
6737 		if (ss->ss_nssid > 0)
6738 			is_active = 1;
6739 	}
6740 
6741 	/*
6742 	 * Build a probe request frame.  Most of the following code is a
6743 	 * copy & paste of what is done in net80211.
6744 	 */
6745 	wh = (struct ieee80211_frame *)(essid + 20);
6746 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6747 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6748 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6749 	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
6750 	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
6751 	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
6752 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6753 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6754 
6755 	frm = (uint8_t *)(wh + 1);
6756 	frm = ieee80211_add_ssid(frm, NULL, 0);
6757 	frm = ieee80211_add_rates(frm, rs);
6758 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6759 		frm = ieee80211_add_xrates(frm, rs);
6760 	if (ic->ic_htcaps & IEEE80211_HTC_HT)
6761 		frm = ieee80211_add_htcap(frm, ni);
6762 
6763 	/* Set length of probe request. */
6764 	tx->len = htole16(frm - (uint8_t *)wh);
6765 
6766 	/*
6767 	 * If active scanning is requested but a certain channel is
6768 	 * marked passive, we can do active scanning if we detect
6769 	 * transmissions.
6770 	 *
6771 	 * There is an issue with some firmware versions that triggers
6772 	 * a sysassert on a "good CRC threshold" of zero (== disabled),
6773 	 * on a radar channel even though this means that we should NOT
6774 	 * send probes.
6775 	 *
6776 	 * The "good CRC threshold" is the number of frames that we
6777 	 * need to receive during our dwell time on a channel before
6778 	 * sending out probes -- setting this to a huge value will
6779 	 * mean we never reach it, but at the same time work around
6780 	 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
6781 	 * here instead of IWL_GOOD_CRC_TH_DISABLED.
6782 	 *
6783 	 * This was fixed in later versions along with some other
6784 	 * scan changes, and the threshold behaves as a flag in those
6785 	 * versions.
6786 	 */
6787 
6788 	/*
6789 	 * If we're doing active scanning, set the crc_threshold
6790 	 * to a suitable value.  This is different to active veruss
6791 	 * passive scanning depending upon the channel flags; the
6792 	 * firmware will obey that particular check for us.
6793 	 */
6794 	if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
6795 		hdr->crc_threshold = is_active ?
6796 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
6797 	else
6798 		hdr->crc_threshold = is_active ?
6799 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
6800 
6801 	chan = (struct iwn_scan_chan *)frm;
6802 	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
6803 	chan->flags = 0;
6804 	if (ss->ss_nssid > 0)
6805 		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
6806 	chan->dsp_gain = 0x6e;
6807 
6808 	/*
6809 	 * Set the passive/active flag depending upon the channel mode.
6810 	 * XXX TODO: take the is_active flag into account as well?
6811 	 */
6812 	if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
6813 		chan->flags |= htole32(IWN_CHAN_PASSIVE);
6814 	else
6815 		chan->flags |= htole32(IWN_CHAN_ACTIVE);
6816 
6817 	/*
6818 	 * Calculate the active/passive dwell times.
6819 	 */
6820 
6821 	dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid);
6822 	dwell_passive = iwn_get_passive_dwell_time(sc, c);
6823 
6824 	/* Make sure they're valid */
6825 	if (dwell_passive <= dwell_active)
6826 		dwell_passive = dwell_active + 1;
6827 
6828 	chan->active = htole16(dwell_active);
6829 	chan->passive = htole16(dwell_passive);
6830 
6831 	if (IEEE80211_IS_CHAN_5GHZ(c))
6832 		chan->rf_gain = 0x3b;
6833 	else
6834 		chan->rf_gain = 0x28;
6835 
6836 	DPRINTF(sc, IWN_DEBUG_STATE,
6837 	    "%s: chan %u flags 0x%x rf_gain 0x%x "
6838 	    "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x "
6839 	    "isactive=%d numssid=%d\n", __func__,
6840 	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
6841 	    dwell_active, dwell_passive, scan_service_time,
6842 	    hdr->crc_threshold, is_active, ss->ss_nssid);
6843 
6844 	hdr->nchan++;
6845 	chan++;
6846 	buflen = (uint8_t *)chan - buf;
6847 	hdr->len = htole16(buflen);
6848 
6849 	if (sc->sc_is_scanning) {
6850 		device_printf(sc->sc_dev,
6851 		    "%s: called with is_scanning set!\n",
6852 		    __func__);
6853 	}
6854 	sc->sc_is_scanning = 1;
6855 
6856 	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
6857 	    hdr->nchan);
6858 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
6859 	free(buf, M_DEVBUF);
6860 
6861 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6862 
6863 	return error;
6864 }
6865 
6866 static int
6867 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
6868 {
6869 	struct iwn_ops *ops = &sc->ops;
6870 	struct ifnet *ifp = sc->sc_ifp;
6871 	struct ieee80211com *ic = ifp->if_l2com;
6872 	struct ieee80211_node *ni = vap->iv_bss;
6873 	int error;
6874 
6875 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6876 
6877 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6878 	/* Update adapter configuration. */
6879 	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
6880 	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
6881 	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6882 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
6883 		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6884 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6885 		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
6886 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6887 		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
6888 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
6889 		sc->rxon->cck_mask  = 0;
6890 		sc->rxon->ofdm_mask = 0x15;
6891 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
6892 		sc->rxon->cck_mask  = 0x03;
6893 		sc->rxon->ofdm_mask = 0;
6894 	} else {
6895 		/* Assume 802.11b/g. */
6896 		sc->rxon->cck_mask  = 0x03;
6897 		sc->rxon->ofdm_mask = 0x15;
6898 	}
6899 	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
6900 	    sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
6901 	    sc->rxon->ofdm_mask);
6902 	if (sc->sc_is_scanning)
6903 		device_printf(sc->sc_dev,
6904 		    "%s: is_scanning set, before RXON\n",
6905 		    __func__);
6906 	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
6907 	if (error != 0) {
6908 		device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
6909 		    __func__, error);
6910 		return error;
6911 	}
6912 
6913 	/* Configuration has changed, set TX power accordingly. */
6914 	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
6915 		device_printf(sc->sc_dev,
6916 		    "%s: could not set TX power, error %d\n", __func__, error);
6917 		return error;
6918 	}
6919 	/*
6920 	 * Reconfiguring RXON clears the firmware nodes table so we must
6921 	 * add the broadcast node again.
6922 	 */
6923 	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
6924 		device_printf(sc->sc_dev,
6925 		    "%s: could not add broadcast node, error %d\n", __func__,
6926 		    error);
6927 		return error;
6928 	}
6929 
6930 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6931 
6932 	return 0;
6933 }
6934 
6935 static int
6936 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
6937 {
6938 	struct iwn_ops *ops = &sc->ops;
6939 	struct ifnet *ifp = sc->sc_ifp;
6940 	struct ieee80211com *ic = ifp->if_l2com;
6941 	struct ieee80211_node *ni = vap->iv_bss;
6942 	struct iwn_node_info node;
6943 	uint32_t htflags = 0;
6944 	int error;
6945 
6946 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6947 
6948 	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6949 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6950 		/* Link LED blinks while monitoring. */
6951 		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
6952 		return 0;
6953 	}
6954 	if ((error = iwn_set_timing(sc, ni)) != 0) {
6955 		device_printf(sc->sc_dev,
6956 		    "%s: could not set timing, error %d\n", __func__, error);
6957 		return error;
6958 	}
6959 
6960 	/* Update adapter configuration. */
6961 	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
6962 	sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
6963 	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
6964 	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6965 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
6966 		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6967 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6968 		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
6969 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6970 		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
6971 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
6972 		sc->rxon->cck_mask  = 0;
6973 		sc->rxon->ofdm_mask = 0x15;
6974 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
6975 		sc->rxon->cck_mask  = 0x03;
6976 		sc->rxon->ofdm_mask = 0;
6977 	} else {
6978 		/* Assume 802.11b/g. */
6979 		sc->rxon->cck_mask  = 0x0f;
6980 		sc->rxon->ofdm_mask = 0x15;
6981 	}
6982 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
6983 		htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
6984 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6985 			switch (ic->ic_curhtprotmode) {
6986 			case IEEE80211_HTINFO_OPMODE_HT20PR:
6987 				htflags |= IWN_RXON_HT_MODEPURE40;
6988 				break;
6989 			default:
6990 				htflags |= IWN_RXON_HT_MODEMIXED;
6991 				break;
6992 			}
6993 		}
6994 		if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
6995 			htflags |= IWN_RXON_HT_HT40MINUS;
6996 	}
6997 	sc->rxon->flags |= htole32(htflags);
6998 	sc->rxon->filter |= htole32(IWN_FILTER_BSS);
6999 	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
7000 	    sc->rxon->chan, sc->rxon->flags);
7001 	if (sc->sc_is_scanning)
7002 		device_printf(sc->sc_dev,
7003 		    "%s: is_scanning set, before RXON\n",
7004 		    __func__);
7005 	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
7006 	if (error != 0) {
7007 		device_printf(sc->sc_dev,
7008 		    "%s: could not update configuration, error %d\n", __func__,
7009 		    error);
7010 		return error;
7011 	}
7012 
7013 	/* Configuration has changed, set TX power accordingly. */
7014 	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
7015 		device_printf(sc->sc_dev,
7016 		    "%s: could not set TX power, error %d\n", __func__, error);
7017 		return error;
7018 	}
7019 
7020 	/* Fake a join to initialize the TX rate. */
7021 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
7022 	iwn_newassoc(ni, 1);
7023 
7024 	/* Add BSS node. */
7025 	memset(&node, 0, sizeof node);
7026 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
7027 	node.id = IWN_ID_BSS;
7028 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
7029 		switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
7030 		case IEEE80211_HTCAP_SMPS_ENA:
7031 			node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
7032 			break;
7033 		case IEEE80211_HTCAP_SMPS_DYNAMIC:
7034 			node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
7035 			break;
7036 		}
7037 		node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
7038 		    IWN_AMDPU_DENSITY(5));	/* 4us */
7039 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
7040 			node.htflags |= htole32(IWN_NODE_HT40);
7041 	}
7042 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
7043 	error = ops->add_node(sc, &node, 1);
7044 	if (error != 0) {
7045 		device_printf(sc->sc_dev,
7046 		    "%s: could not add BSS node, error %d\n", __func__, error);
7047 		return error;
7048 	}
7049 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
7050 	    __func__, node.id);
7051 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
7052 		device_printf(sc->sc_dev,
7053 		    "%s: could not setup link quality for node %d, error %d\n",
7054 		    __func__, node.id, error);
7055 		return error;
7056 	}
7057 
7058 	if ((error = iwn_init_sensitivity(sc)) != 0) {
7059 		device_printf(sc->sc_dev,
7060 		    "%s: could not set sensitivity, error %d\n", __func__,
7061 		    error);
7062 		return error;
7063 	}
7064 	/* Start periodic calibration timer. */
7065 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
7066 	sc->calib_cnt = 0;
7067 	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
7068 	    sc);
7069 
7070 	/* Link LED always on while associated. */
7071 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7072 
7073 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7074 
7075 	return 0;
7076 }
7077 
7078 /*
7079  * This function is called by upper layer when an ADDBA request is received
7080  * from another STA and before the ADDBA response is sent.
7081  */
7082 static int
7083 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
7084     int baparamset, int batimeout, int baseqctl)
7085 {
7086 #define MS(_v, _f)	(((_v) & _f) >> _f##_S)
7087 	struct iwn_softc *sc = ni->ni_ic->ic_softc;
7088 	struct iwn_ops *ops = &sc->ops;
7089 	struct iwn_node *wn = (void *)ni;
7090 	struct iwn_node_info node;
7091 	uint16_t ssn;
7092 	uint8_t tid;
7093 	int error;
7094 
7095 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7096 
7097 	tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
7098 	ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
7099 
7100 	memset(&node, 0, sizeof node);
7101 	node.id = wn->id;
7102 	node.control = IWN_NODE_UPDATE;
7103 	node.flags = IWN_FLAG_SET_ADDBA;
7104 	node.addba_tid = tid;
7105 	node.addba_ssn = htole16(ssn);
7106 	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
7107 	    wn->id, tid, ssn);
7108 	error = ops->add_node(sc, &node, 1);
7109 	if (error != 0)
7110 		return error;
7111 	return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
7112 #undef MS
7113 }
7114 
7115 /*
7116  * This function is called by upper layer on teardown of an HT-immediate
7117  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
7118  */
7119 static void
7120 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
7121 {
7122 	struct ieee80211com *ic = ni->ni_ic;
7123 	struct iwn_softc *sc = ic->ic_softc;
7124 	struct iwn_ops *ops = &sc->ops;
7125 	struct iwn_node *wn = (void *)ni;
7126 	struct iwn_node_info node;
7127 	uint8_t tid;
7128 
7129 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7130 
7131 	/* XXX: tid as an argument */
7132 	for (tid = 0; tid < WME_NUM_TID; tid++) {
7133 		if (&ni->ni_rx_ampdu[tid] == rap)
7134 			break;
7135 	}
7136 
7137 	memset(&node, 0, sizeof node);
7138 	node.id = wn->id;
7139 	node.control = IWN_NODE_UPDATE;
7140 	node.flags = IWN_FLAG_SET_DELBA;
7141 	node.delba_tid = tid;
7142 	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
7143 	(void)ops->add_node(sc, &node, 1);
7144 	sc->sc_ampdu_rx_stop(ni, rap);
7145 }
7146 
7147 static int
7148 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7149     int dialogtoken, int baparamset, int batimeout)
7150 {
7151 	struct iwn_softc *sc = ni->ni_ic->ic_softc;
7152 	int qid;
7153 
7154 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7155 
7156 	for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
7157 		if (sc->qid2tap[qid] == NULL)
7158 			break;
7159 	}
7160 	if (qid == sc->ntxqs) {
7161 		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
7162 		    __func__);
7163 		return 0;
7164 	}
7165 	tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
7166 	if (tap->txa_private == NULL) {
7167 		device_printf(sc->sc_dev,
7168 		    "%s: failed to alloc TX aggregation structure\n", __func__);
7169 		return 0;
7170 	}
7171 	sc->qid2tap[qid] = tap;
7172 	*(int *)tap->txa_private = qid;
7173 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
7174 	    batimeout);
7175 }
7176 
7177 static int
7178 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7179     int code, int baparamset, int batimeout)
7180 {
7181 	struct iwn_softc *sc = ni->ni_ic->ic_softc;
7182 	int qid = *(int *)tap->txa_private;
7183 	uint8_t tid = tap->txa_tid;
7184 	int ret;
7185 
7186 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7187 
7188 	if (code == IEEE80211_STATUS_SUCCESS) {
7189 		ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
7190 		ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
7191 		if (ret != 1)
7192 			return ret;
7193 	} else {
7194 		sc->qid2tap[qid] = NULL;
7195 		free(tap->txa_private, M_DEVBUF);
7196 		tap->txa_private = NULL;
7197 	}
7198 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
7199 }
7200 
7201 /*
7202  * This function is called by upper layer when an ADDBA response is received
7203  * from another STA.
7204  */
7205 static int
7206 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
7207     uint8_t tid)
7208 {
7209 	struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
7210 	struct iwn_softc *sc = ni->ni_ic->ic_softc;
7211 	struct iwn_ops *ops = &sc->ops;
7212 	struct iwn_node *wn = (void *)ni;
7213 	struct iwn_node_info node;
7214 	int error, qid;
7215 
7216 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7217 
7218 	/* Enable TX for the specified RA/TID. */
7219 	wn->disable_tid &= ~(1 << tid);
7220 	memset(&node, 0, sizeof node);
7221 	node.id = wn->id;
7222 	node.control = IWN_NODE_UPDATE;
7223 	node.flags = IWN_FLAG_SET_DISABLE_TID;
7224 	node.disable_tid = htole16(wn->disable_tid);
7225 	error = ops->add_node(sc, &node, 1);
7226 	if (error != 0)
7227 		return 0;
7228 
7229 	if ((error = iwn_nic_lock(sc)) != 0)
7230 		return 0;
7231 	qid = *(int *)tap->txa_private;
7232 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
7233 	    __func__, wn->id, tid, tap->txa_start, qid);
7234 	ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
7235 	iwn_nic_unlock(sc);
7236 
7237 	iwn_set_link_quality(sc, ni);
7238 	return 1;
7239 }
7240 
7241 static void
7242 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
7243 {
7244 	struct iwn_softc *sc = ni->ni_ic->ic_softc;
7245 	struct iwn_ops *ops = &sc->ops;
7246 	uint8_t tid = tap->txa_tid;
7247 	int qid;
7248 
7249 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7250 
7251 	sc->sc_addba_stop(ni, tap);
7252 
7253 	if (tap->txa_private == NULL)
7254 		return;
7255 
7256 	qid = *(int *)tap->txa_private;
7257 	if (sc->txq[qid].queued != 0)
7258 		return;
7259 	if (iwn_nic_lock(sc) != 0)
7260 		return;
7261 	ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
7262 	iwn_nic_unlock(sc);
7263 	sc->qid2tap[qid] = NULL;
7264 	free(tap->txa_private, M_DEVBUF);
7265 	tap->txa_private = NULL;
7266 }
7267 
7268 static void
7269 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7270     int qid, uint8_t tid, uint16_t ssn)
7271 {
7272 	struct iwn_node *wn = (void *)ni;
7273 
7274 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7275 
7276 	/* Stop TX scheduler while we're changing its configuration. */
7277 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7278 	    IWN4965_TXQ_STATUS_CHGACT);
7279 
7280 	/* Assign RA/TID translation to the queue. */
7281 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
7282 	    wn->id << 4 | tid);
7283 
7284 	/* Enable chain-building mode for the queue. */
7285 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
7286 
7287 	/* Set starting sequence number from the ADDBA request. */
7288 	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7289 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7290 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7291 
7292 	/* Set scheduler window size. */
7293 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
7294 	    IWN_SCHED_WINSZ);
7295 	/* Set scheduler frame limit. */
7296 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7297 	    IWN_SCHED_LIMIT << 16);
7298 
7299 	/* Enable interrupts for the queue. */
7300 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7301 
7302 	/* Mark the queue as active. */
7303 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7304 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
7305 	    iwn_tid2fifo[tid] << 1);
7306 }
7307 
7308 static void
7309 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7310 {
7311 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7312 
7313 	/* Stop TX scheduler while we're changing its configuration. */
7314 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7315 	    IWN4965_TXQ_STATUS_CHGACT);
7316 
7317 	/* Set starting sequence number from the ADDBA request. */
7318 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7319 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7320 
7321 	/* Disable interrupts for the queue. */
7322 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7323 
7324 	/* Mark the queue as inactive. */
7325 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7326 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
7327 }
7328 
7329 static void
7330 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7331     int qid, uint8_t tid, uint16_t ssn)
7332 {
7333 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7334 
7335 	struct iwn_node *wn = (void *)ni;
7336 
7337 	/* Stop TX scheduler while we're changing its configuration. */
7338 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7339 	    IWN5000_TXQ_STATUS_CHGACT);
7340 
7341 	/* Assign RA/TID translation to the queue. */
7342 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
7343 	    wn->id << 4 | tid);
7344 
7345 	/* Enable chain-building mode for the queue. */
7346 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
7347 
7348 	/* Enable aggregation for the queue. */
7349 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7350 
7351 	/* Set starting sequence number from the ADDBA request. */
7352 	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7353 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7354 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7355 
7356 	/* Set scheduler window size and frame limit. */
7357 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7358 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7359 
7360 	/* Enable interrupts for the queue. */
7361 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7362 
7363 	/* Mark the queue as active. */
7364 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7365 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
7366 }
7367 
7368 static void
7369 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7370 {
7371 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7372 
7373 	/* Stop TX scheduler while we're changing its configuration. */
7374 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7375 	    IWN5000_TXQ_STATUS_CHGACT);
7376 
7377 	/* Disable aggregation for the queue. */
7378 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7379 
7380 	/* Set starting sequence number from the ADDBA request. */
7381 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7382 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7383 
7384 	/* Disable interrupts for the queue. */
7385 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7386 
7387 	/* Mark the queue as inactive. */
7388 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7389 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
7390 }
7391 
7392 /*
7393  * Query calibration tables from the initialization firmware.  We do this
7394  * only once at first boot.  Called from a process context.
7395  */
7396 static int
7397 iwn5000_query_calibration(struct iwn_softc *sc)
7398 {
7399 	struct iwn5000_calib_config cmd;
7400 	int error;
7401 
7402 	memset(&cmd, 0, sizeof cmd);
7403 	cmd.ucode.once.enable = htole32(0xffffffff);
7404 	cmd.ucode.once.start  = htole32(0xffffffff);
7405 	cmd.ucode.once.send   = htole32(0xffffffff);
7406 	cmd.ucode.flags       = htole32(0xffffffff);
7407 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
7408 	    __func__);
7409 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
7410 	if (error != 0)
7411 		return error;
7412 
7413 	/* Wait at most two seconds for calibration to complete. */
7414 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
7415 		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
7416 	return error;
7417 }
7418 
7419 /*
7420  * Send calibration results to the runtime firmware.  These results were
7421  * obtained on first boot from the initialization firmware.
7422  */
7423 static int
7424 iwn5000_send_calibration(struct iwn_softc *sc)
7425 {
7426 	int idx, error;
7427 
7428 	for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) {
7429 		if (!(sc->base_params->calib_need & (1<<idx))) {
7430 			DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7431 			    "No need of calib %d\n",
7432 			    idx);
7433 			continue; /* no need for this calib */
7434 		}
7435 		if (sc->calibcmd[idx].buf == NULL) {
7436 			DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7437 			    "Need calib idx : %d but no available data\n",
7438 			    idx);
7439 			continue;
7440 		}
7441 
7442 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7443 		    "send calibration result idx=%d len=%d\n", idx,
7444 		    sc->calibcmd[idx].len);
7445 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
7446 		    sc->calibcmd[idx].len, 0);
7447 		if (error != 0) {
7448 			device_printf(sc->sc_dev,
7449 			    "%s: could not send calibration result, error %d\n",
7450 			    __func__, error);
7451 			return error;
7452 		}
7453 	}
7454 	return 0;
7455 }
7456 
7457 static int
7458 iwn5000_send_wimax_coex(struct iwn_softc *sc)
7459 {
7460 	struct iwn5000_wimax_coex wimax;
7461 
7462 #if 0
7463 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
7464 		/* Enable WiMAX coexistence for combo adapters. */
7465 		wimax.flags =
7466 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
7467 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
7468 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
7469 		    IWN_WIMAX_COEX_ENABLE;
7470 		memcpy(wimax.events, iwn6050_wimax_events,
7471 		    sizeof iwn6050_wimax_events);
7472 	} else
7473 #endif
7474 	{
7475 		/* Disable WiMAX coexistence. */
7476 		wimax.flags = 0;
7477 		memset(wimax.events, 0, sizeof wimax.events);
7478 	}
7479 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
7480 	    __func__);
7481 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
7482 }
7483 
7484 static int
7485 iwn5000_crystal_calib(struct iwn_softc *sc)
7486 {
7487 	struct iwn5000_phy_calib_crystal cmd;
7488 
7489 	memset(&cmd, 0, sizeof cmd);
7490 	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
7491 	cmd.ngroups = 1;
7492 	cmd.isvalid = 1;
7493 	cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
7494 	cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
7495 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
7496 	    cmd.cap_pin[0], cmd.cap_pin[1]);
7497 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7498 }
7499 
7500 static int
7501 iwn5000_temp_offset_calib(struct iwn_softc *sc)
7502 {
7503 	struct iwn5000_phy_calib_temp_offset cmd;
7504 
7505 	memset(&cmd, 0, sizeof cmd);
7506 	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7507 	cmd.ngroups = 1;
7508 	cmd.isvalid = 1;
7509 	if (sc->eeprom_temp != 0)
7510 		cmd.offset = htole16(sc->eeprom_temp);
7511 	else
7512 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
7513 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
7514 	    le16toh(cmd.offset));
7515 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7516 }
7517 
7518 static int
7519 iwn5000_temp_offset_calibv2(struct iwn_softc *sc)
7520 {
7521 	struct iwn5000_phy_calib_temp_offsetv2 cmd;
7522 
7523 	memset(&cmd, 0, sizeof cmd);
7524 	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7525 	cmd.ngroups = 1;
7526 	cmd.isvalid = 1;
7527 	if (sc->eeprom_temp != 0) {
7528 		cmd.offset_low = htole16(sc->eeprom_temp);
7529 		cmd.offset_high = htole16(sc->eeprom_temp_high);
7530 	} else {
7531 		cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
7532 		cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
7533 	}
7534 	cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
7535 
7536 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7537 	    "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n",
7538 	    le16toh(cmd.offset_low),
7539 	    le16toh(cmd.offset_high),
7540 	    le16toh(cmd.burnt_voltage_ref));
7541 
7542 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7543 }
7544 
7545 /*
7546  * This function is called after the runtime firmware notifies us of its
7547  * readiness (called in a process context).
7548  */
7549 static int
7550 iwn4965_post_alive(struct iwn_softc *sc)
7551 {
7552 	int error, qid;
7553 
7554 	if ((error = iwn_nic_lock(sc)) != 0)
7555 		return error;
7556 
7557 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7558 
7559 	/* Clear TX scheduler state in SRAM. */
7560 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7561 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
7562 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
7563 
7564 	/* Set physical address of TX scheduler rings (1KB aligned). */
7565 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7566 
7567 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7568 
7569 	/* Disable chain mode for all our 16 queues. */
7570 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
7571 
7572 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
7573 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
7574 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7575 
7576 		/* Set scheduler window size. */
7577 		iwn_mem_write(sc, sc->sched_base +
7578 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
7579 		/* Set scheduler frame limit. */
7580 		iwn_mem_write(sc, sc->sched_base +
7581 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7582 		    IWN_SCHED_LIMIT << 16);
7583 	}
7584 
7585 	/* Enable interrupts for all our 16 queues. */
7586 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
7587 	/* Identify TX FIFO rings (0-7). */
7588 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
7589 
7590 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7591 	for (qid = 0; qid < 7; qid++) {
7592 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
7593 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7594 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
7595 	}
7596 	iwn_nic_unlock(sc);
7597 	return 0;
7598 }
7599 
7600 /*
7601  * This function is called after the initialization or runtime firmware
7602  * notifies us of its readiness (called in a process context).
7603  */
7604 static int
7605 iwn5000_post_alive(struct iwn_softc *sc)
7606 {
7607 	int error, qid;
7608 
7609 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7610 
7611 	/* Switch to using ICT interrupt mode. */
7612 	iwn5000_ict_reset(sc);
7613 
7614 	if ((error = iwn_nic_lock(sc)) != 0){
7615 		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
7616 		return error;
7617 	}
7618 
7619 	/* Clear TX scheduler state in SRAM. */
7620 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7621 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
7622 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
7623 
7624 	/* Set physical address of TX scheduler rings (1KB aligned). */
7625 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7626 
7627 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7628 
7629 	/* Enable chain mode for all queues, except command queue. */
7630 	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
7631 		iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf);
7632 	else
7633 		iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
7634 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
7635 
7636 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
7637 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
7638 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7639 
7640 		iwn_mem_write(sc, sc->sched_base +
7641 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
7642 		/* Set scheduler window size and frame limit. */
7643 		iwn_mem_write(sc, sc->sched_base +
7644 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7645 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7646 	}
7647 
7648 	/* Enable interrupts for all our 20 queues. */
7649 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
7650 	/* Identify TX FIFO rings (0-7). */
7651 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
7652 
7653 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7654 	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) {
7655 		/* Mark TX rings as active. */
7656 		for (qid = 0; qid < 11; qid++) {
7657 			static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 };
7658 			iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7659 			    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7660 		}
7661 	} else {
7662 		/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7663 		for (qid = 0; qid < 7; qid++) {
7664 			static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
7665 			iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7666 			    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7667 		}
7668 	}
7669 	iwn_nic_unlock(sc);
7670 
7671 	/* Configure WiMAX coexistence for combo adapters. */
7672 	error = iwn5000_send_wimax_coex(sc);
7673 	if (error != 0) {
7674 		device_printf(sc->sc_dev,
7675 		    "%s: could not configure WiMAX coexistence, error %d\n",
7676 		    __func__, error);
7677 		return error;
7678 	}
7679 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
7680 		/* Perform crystal calibration. */
7681 		error = iwn5000_crystal_calib(sc);
7682 		if (error != 0) {
7683 			device_printf(sc->sc_dev,
7684 			    "%s: crystal calibration failed, error %d\n",
7685 			    __func__, error);
7686 			return error;
7687 		}
7688 	}
7689 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
7690 		/* Query calibration from the initialization firmware. */
7691 		if ((error = iwn5000_query_calibration(sc)) != 0) {
7692 			device_printf(sc->sc_dev,
7693 			    "%s: could not query calibration, error %d\n",
7694 			    __func__, error);
7695 			return error;
7696 		}
7697 		/*
7698 		 * We have the calibration results now, reboot with the
7699 		 * runtime firmware (call ourselves recursively!)
7700 		 */
7701 		iwn_hw_stop(sc);
7702 		error = iwn_hw_init(sc);
7703 	} else {
7704 		/* Send calibration results to runtime firmware. */
7705 		error = iwn5000_send_calibration(sc);
7706 	}
7707 
7708 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7709 
7710 	return error;
7711 }
7712 
7713 /*
7714  * The firmware boot code is small and is intended to be copied directly into
7715  * the NIC internal memory (no DMA transfer).
7716  */
7717 static int
7718 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
7719 {
7720 	int error, ntries;
7721 
7722 	size /= sizeof (uint32_t);
7723 
7724 	if ((error = iwn_nic_lock(sc)) != 0)
7725 		return error;
7726 
7727 	/* Copy microcode image into NIC memory. */
7728 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
7729 	    (const uint32_t *)ucode, size);
7730 
7731 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
7732 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
7733 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
7734 
7735 	/* Start boot load now. */
7736 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
7737 
7738 	/* Wait for transfer to complete. */
7739 	for (ntries = 0; ntries < 1000; ntries++) {
7740 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
7741 		    IWN_BSM_WR_CTRL_START))
7742 			break;
7743 		DELAY(10);
7744 	}
7745 	if (ntries == 1000) {
7746 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7747 		    __func__);
7748 		iwn_nic_unlock(sc);
7749 		return ETIMEDOUT;
7750 	}
7751 
7752 	/* Enable boot after power up. */
7753 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
7754 
7755 	iwn_nic_unlock(sc);
7756 	return 0;
7757 }
7758 
7759 static int
7760 iwn4965_load_firmware(struct iwn_softc *sc)
7761 {
7762 	struct iwn_fw_info *fw = &sc->fw;
7763 	struct iwn_dma_info *dma = &sc->fw_dma;
7764 	int error;
7765 
7766 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
7767 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
7768 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7769 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7770 	    fw->init.text, fw->init.textsz);
7771 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7772 
7773 	/* Tell adapter where to find initialization sections. */
7774 	if ((error = iwn_nic_lock(sc)) != 0)
7775 		return error;
7776 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7777 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
7778 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7779 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7780 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
7781 	iwn_nic_unlock(sc);
7782 
7783 	/* Load firmware boot code. */
7784 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
7785 	if (error != 0) {
7786 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7787 		    __func__);
7788 		return error;
7789 	}
7790 	/* Now press "execute". */
7791 	IWN_WRITE(sc, IWN_RESET, 0);
7792 
7793 	/* Wait at most one second for first alive notification. */
7794 	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
7795 		device_printf(sc->sc_dev,
7796 		    "%s: timeout waiting for adapter to initialize, error %d\n",
7797 		    __func__, error);
7798 		return error;
7799 	}
7800 
7801 	/* Retrieve current temperature for initial TX power calibration. */
7802 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
7803 	sc->temp = iwn4965_get_temperature(sc);
7804 
7805 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
7806 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
7807 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7808 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7809 	    fw->main.text, fw->main.textsz);
7810 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7811 
7812 	/* Tell adapter where to find runtime sections. */
7813 	if ((error = iwn_nic_lock(sc)) != 0)
7814 		return error;
7815 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7816 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
7817 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7818 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7819 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
7820 	    IWN_FW_UPDATED | fw->main.textsz);
7821 	iwn_nic_unlock(sc);
7822 
7823 	return 0;
7824 }
7825 
7826 static int
7827 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
7828     const uint8_t *section, int size)
7829 {
7830 	struct iwn_dma_info *dma = &sc->fw_dma;
7831 	int error;
7832 
7833 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7834 
7835 	/* Copy firmware section into pre-allocated DMA-safe memory. */
7836 	memcpy(dma->vaddr, section, size);
7837 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7838 
7839 	if ((error = iwn_nic_lock(sc)) != 0)
7840 		return error;
7841 
7842 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
7843 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
7844 
7845 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
7846 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
7847 	    IWN_LOADDR(dma->paddr));
7848 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
7849 	    IWN_HIADDR(dma->paddr) << 28 | size);
7850 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
7851 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
7852 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
7853 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
7854 
7855 	/* Kick Flow Handler to start DMA transfer. */
7856 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
7857 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
7858 
7859 	iwn_nic_unlock(sc);
7860 
7861 	/* Wait at most five seconds for FH DMA transfer to complete. */
7862 	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
7863 }
7864 
7865 static int
7866 iwn5000_load_firmware(struct iwn_softc *sc)
7867 {
7868 	struct iwn_fw_part *fw;
7869 	int error;
7870 
7871 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7872 
7873 	/* Load the initialization firmware on first boot only. */
7874 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
7875 	    &sc->fw.main : &sc->fw.init;
7876 
7877 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
7878 	    fw->text, fw->textsz);
7879 	if (error != 0) {
7880 		device_printf(sc->sc_dev,
7881 		    "%s: could not load firmware %s section, error %d\n",
7882 		    __func__, ".text", error);
7883 		return error;
7884 	}
7885 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
7886 	    fw->data, fw->datasz);
7887 	if (error != 0) {
7888 		device_printf(sc->sc_dev,
7889 		    "%s: could not load firmware %s section, error %d\n",
7890 		    __func__, ".data", error);
7891 		return error;
7892 	}
7893 
7894 	/* Now press "execute". */
7895 	IWN_WRITE(sc, IWN_RESET, 0);
7896 	return 0;
7897 }
7898 
7899 /*
7900  * Extract text and data sections from a legacy firmware image.
7901  */
7902 static int
7903 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
7904 {
7905 	const uint32_t *ptr;
7906 	size_t hdrlen = 24;
7907 	uint32_t rev;
7908 
7909 	ptr = (const uint32_t *)fw->data;
7910 	rev = le32toh(*ptr++);
7911 
7912 	sc->ucode_rev = rev;
7913 
7914 	/* Check firmware API version. */
7915 	if (IWN_FW_API(rev) <= 1) {
7916 		device_printf(sc->sc_dev,
7917 		    "%s: bad firmware, need API version >=2\n", __func__);
7918 		return EINVAL;
7919 	}
7920 	if (IWN_FW_API(rev) >= 3) {
7921 		/* Skip build number (version 2 header). */
7922 		hdrlen += 4;
7923 		ptr++;
7924 	}
7925 	if (fw->size < hdrlen) {
7926 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
7927 		    __func__, fw->size);
7928 		return EINVAL;
7929 	}
7930 	fw->main.textsz = le32toh(*ptr++);
7931 	fw->main.datasz = le32toh(*ptr++);
7932 	fw->init.textsz = le32toh(*ptr++);
7933 	fw->init.datasz = le32toh(*ptr++);
7934 	fw->boot.textsz = le32toh(*ptr++);
7935 
7936 	/* Check that all firmware sections fit. */
7937 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
7938 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
7939 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
7940 		    __func__, fw->size);
7941 		return EINVAL;
7942 	}
7943 
7944 	/* Get pointers to firmware sections. */
7945 	fw->main.text = (const uint8_t *)ptr;
7946 	fw->main.data = fw->main.text + fw->main.textsz;
7947 	fw->init.text = fw->main.data + fw->main.datasz;
7948 	fw->init.data = fw->init.text + fw->init.textsz;
7949 	fw->boot.text = fw->init.data + fw->init.datasz;
7950 	return 0;
7951 }
7952 
7953 /*
7954  * Extract text and data sections from a TLV firmware image.
7955  */
7956 static int
7957 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
7958     uint16_t alt)
7959 {
7960 	const struct iwn_fw_tlv_hdr *hdr;
7961 	const struct iwn_fw_tlv *tlv;
7962 	const uint8_t *ptr, *end;
7963 	uint64_t altmask;
7964 	uint32_t len, tmp;
7965 
7966 	if (fw->size < sizeof (*hdr)) {
7967 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
7968 		    __func__, fw->size);
7969 		return EINVAL;
7970 	}
7971 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
7972 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
7973 		device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
7974 		    __func__, le32toh(hdr->signature));
7975 		return EINVAL;
7976 	}
7977 	DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
7978 	    le32toh(hdr->build));
7979 	sc->ucode_rev = le32toh(hdr->rev);
7980 
7981 	/*
7982 	 * Select the closest supported alternative that is less than
7983 	 * or equal to the specified one.
7984 	 */
7985 	altmask = le64toh(hdr->altmask);
7986 	while (alt > 0 && !(altmask & (1ULL << alt)))
7987 		alt--;	/* Downgrade. */
7988 	DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
7989 
7990 	ptr = (const uint8_t *)(hdr + 1);
7991 	end = (const uint8_t *)(fw->data + fw->size);
7992 
7993 	/* Parse type-length-value fields. */
7994 	while (ptr + sizeof (*tlv) <= end) {
7995 		tlv = (const struct iwn_fw_tlv *)ptr;
7996 		len = le32toh(tlv->len);
7997 
7998 		ptr += sizeof (*tlv);
7999 		if (ptr + len > end) {
8000 			device_printf(sc->sc_dev,
8001 			    "%s: firmware too short: %zu bytes\n", __func__,
8002 			    fw->size);
8003 			return EINVAL;
8004 		}
8005 		/* Skip other alternatives. */
8006 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
8007 			goto next;
8008 
8009 		switch (le16toh(tlv->type)) {
8010 		case IWN_FW_TLV_MAIN_TEXT:
8011 			fw->main.text = ptr;
8012 			fw->main.textsz = len;
8013 			break;
8014 		case IWN_FW_TLV_MAIN_DATA:
8015 			fw->main.data = ptr;
8016 			fw->main.datasz = len;
8017 			break;
8018 		case IWN_FW_TLV_INIT_TEXT:
8019 			fw->init.text = ptr;
8020 			fw->init.textsz = len;
8021 			break;
8022 		case IWN_FW_TLV_INIT_DATA:
8023 			fw->init.data = ptr;
8024 			fw->init.datasz = len;
8025 			break;
8026 		case IWN_FW_TLV_BOOT_TEXT:
8027 			fw->boot.text = ptr;
8028 			fw->boot.textsz = len;
8029 			break;
8030 		case IWN_FW_TLV_ENH_SENS:
8031 			if (!len)
8032 				sc->sc_flags |= IWN_FLAG_ENH_SENS;
8033 			break;
8034 		case IWN_FW_TLV_PHY_CALIB:
8035 			tmp = le32toh(*ptr);
8036 			if (tmp < 253) {
8037 				sc->reset_noise_gain = tmp;
8038 				sc->noise_gain = tmp + 1;
8039 			}
8040 			break;
8041 		case IWN_FW_TLV_PAN:
8042 			sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
8043 			DPRINTF(sc, IWN_DEBUG_RESET,
8044 			    "PAN Support found: %d\n", 1);
8045 			break;
8046 		case IWN_FW_TLV_FLAGS:
8047 			if (len < sizeof(uint32_t))
8048 				break;
8049 			if (len % sizeof(uint32_t))
8050 				break;
8051 			sc->tlv_feature_flags = le32toh(*ptr);
8052 			DPRINTF(sc, IWN_DEBUG_RESET,
8053 			    "%s: feature: 0x%08x\n",
8054 			    __func__,
8055 			    sc->tlv_feature_flags);
8056 			break;
8057 		case IWN_FW_TLV_PBREQ_MAXLEN:
8058 		case IWN_FW_TLV_RUNT_EVTLOG_PTR:
8059 		case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
8060 		case IWN_FW_TLV_RUNT_ERRLOG_PTR:
8061 		case IWN_FW_TLV_INIT_EVTLOG_PTR:
8062 		case IWN_FW_TLV_INIT_EVTLOG_SIZE:
8063 		case IWN_FW_TLV_INIT_ERRLOG_PTR:
8064 		case IWN_FW_TLV_WOWLAN_INST:
8065 		case IWN_FW_TLV_WOWLAN_DATA:
8066 			DPRINTF(sc, IWN_DEBUG_RESET,
8067 			    "TLV type %d recognized but not handled\n",
8068 			    le16toh(tlv->type));
8069 			break;
8070 		default:
8071 			DPRINTF(sc, IWN_DEBUG_RESET,
8072 			    "TLV type %d not handled\n", le16toh(tlv->type));
8073 			break;
8074 		}
8075  next:		/* TLV fields are 32-bit aligned. */
8076 		ptr += (len + 3) & ~3;
8077 	}
8078 	return 0;
8079 }
8080 
8081 static int
8082 iwn_read_firmware(struct iwn_softc *sc)
8083 {
8084 	struct iwn_fw_info *fw = &sc->fw;
8085 	int error;
8086 
8087 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8088 
8089 	IWN_UNLOCK(sc);
8090 
8091 	memset(fw, 0, sizeof (*fw));
8092 
8093 	/* Read firmware image from filesystem. */
8094 	sc->fw_fp = firmware_get(sc->fwname);
8095 	if (sc->fw_fp == NULL) {
8096 		device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
8097 		    __func__, sc->fwname);
8098 		IWN_LOCK(sc);
8099 		return EINVAL;
8100 	}
8101 	IWN_LOCK(sc);
8102 
8103 	fw->size = sc->fw_fp->datasize;
8104 	fw->data = (const uint8_t *)sc->fw_fp->data;
8105 	if (fw->size < sizeof (uint32_t)) {
8106 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8107 		    __func__, fw->size);
8108 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8109 		sc->fw_fp = NULL;
8110 		return EINVAL;
8111 	}
8112 
8113 	/* Retrieve text and data sections. */
8114 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
8115 		error = iwn_read_firmware_leg(sc, fw);
8116 	else
8117 		error = iwn_read_firmware_tlv(sc, fw, 1);
8118 	if (error != 0) {
8119 		device_printf(sc->sc_dev,
8120 		    "%s: could not read firmware sections, error %d\n",
8121 		    __func__, error);
8122 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8123 		sc->fw_fp = NULL;
8124 		return error;
8125 	}
8126 
8127 	device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev);
8128 
8129 	/* Make sure text and data sections fit in hardware memory. */
8130 	if (fw->main.textsz > sc->fw_text_maxsz ||
8131 	    fw->main.datasz > sc->fw_data_maxsz ||
8132 	    fw->init.textsz > sc->fw_text_maxsz ||
8133 	    fw->init.datasz > sc->fw_data_maxsz ||
8134 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
8135 	    (fw->boot.textsz & 3) != 0) {
8136 		device_printf(sc->sc_dev, "%s: firmware sections too large\n",
8137 		    __func__);
8138 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8139 		sc->fw_fp = NULL;
8140 		return EINVAL;
8141 	}
8142 
8143 	/* We can proceed with loading the firmware. */
8144 	return 0;
8145 }
8146 
8147 static int
8148 iwn_clock_wait(struct iwn_softc *sc)
8149 {
8150 	int ntries;
8151 
8152 	/* Set "initialization complete" bit. */
8153 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8154 
8155 	/* Wait for clock stabilization. */
8156 	for (ntries = 0; ntries < 2500; ntries++) {
8157 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
8158 			return 0;
8159 		DELAY(10);
8160 	}
8161 	device_printf(sc->sc_dev,
8162 	    "%s: timeout waiting for clock stabilization\n", __func__);
8163 	return ETIMEDOUT;
8164 }
8165 
8166 static int
8167 iwn_apm_init(struct iwn_softc *sc)
8168 {
8169 	uint32_t reg;
8170 	int error;
8171 
8172 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8173 
8174 	/* Disable L0s exit timer (NMI bug workaround). */
8175 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
8176 	/* Don't wait for ICH L0s (ICH bug workaround). */
8177 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
8178 
8179 	/* Set FH wait threshold to max (HW bug under stress workaround). */
8180 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
8181 
8182 	/* Enable HAP INTA to move adapter from L1a to L0s. */
8183 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
8184 
8185 	/* Retrieve PCIe Active State Power Management (ASPM). */
8186 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
8187 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
8188 	if (reg & 0x02)	/* L1 Entry enabled. */
8189 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8190 	else
8191 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8192 
8193 	if (sc->base_params->pll_cfg_val)
8194 		IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val);
8195 
8196 	/* Wait for clock stabilization before accessing prph. */
8197 	if ((error = iwn_clock_wait(sc)) != 0)
8198 		return error;
8199 
8200 	if ((error = iwn_nic_lock(sc)) != 0)
8201 		return error;
8202 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
8203 		/* Enable DMA and BSM (Bootstrap State Machine). */
8204 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
8205 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
8206 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
8207 	} else {
8208 		/* Enable DMA. */
8209 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
8210 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8211 	}
8212 	DELAY(20);
8213 	/* Disable L1-Active. */
8214 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
8215 	iwn_nic_unlock(sc);
8216 
8217 	return 0;
8218 }
8219 
8220 static void
8221 iwn_apm_stop_master(struct iwn_softc *sc)
8222 {
8223 	int ntries;
8224 
8225 	/* Stop busmaster DMA activity. */
8226 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
8227 	for (ntries = 0; ntries < 100; ntries++) {
8228 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
8229 			return;
8230 		DELAY(10);
8231 	}
8232 	device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
8233 }
8234 
8235 static void
8236 iwn_apm_stop(struct iwn_softc *sc)
8237 {
8238 	iwn_apm_stop_master(sc);
8239 
8240 	/* Reset the entire device. */
8241 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
8242 	DELAY(10);
8243 	/* Clear "initialization complete" bit. */
8244 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8245 }
8246 
8247 static int
8248 iwn4965_nic_config(struct iwn_softc *sc)
8249 {
8250 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8251 
8252 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
8253 		/*
8254 		 * I don't believe this to be correct but this is what the
8255 		 * vendor driver is doing. Probably the bits should not be
8256 		 * shifted in IWN_RFCFG_*.
8257 		 */
8258 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8259 		    IWN_RFCFG_TYPE(sc->rfcfg) |
8260 		    IWN_RFCFG_STEP(sc->rfcfg) |
8261 		    IWN_RFCFG_DASH(sc->rfcfg));
8262 	}
8263 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8264 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8265 	return 0;
8266 }
8267 
8268 static int
8269 iwn5000_nic_config(struct iwn_softc *sc)
8270 {
8271 	uint32_t tmp;
8272 	int error;
8273 
8274 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8275 
8276 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
8277 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8278 		    IWN_RFCFG_TYPE(sc->rfcfg) |
8279 		    IWN_RFCFG_STEP(sc->rfcfg) |
8280 		    IWN_RFCFG_DASH(sc->rfcfg));
8281 	}
8282 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8283 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8284 
8285 	if ((error = iwn_nic_lock(sc)) != 0)
8286 		return error;
8287 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
8288 
8289 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
8290 		/*
8291 		 * Select first Switching Voltage Regulator (1.32V) to
8292 		 * solve a stability issue related to noisy DC2DC line
8293 		 * in the silicon of 1000 Series.
8294 		 */
8295 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
8296 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
8297 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
8298 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
8299 	}
8300 	iwn_nic_unlock(sc);
8301 
8302 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
8303 		/* Use internal power amplifier only. */
8304 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
8305 	}
8306 	if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) {
8307 		/* Indicate that ROM calibration version is >=6. */
8308 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
8309 	}
8310 	if (sc->base_params->additional_gp_drv_bit)
8311 		IWN_SETBITS(sc, IWN_GP_DRIVER,
8312 		    sc->base_params->additional_gp_drv_bit);
8313 	return 0;
8314 }
8315 
8316 /*
8317  * Take NIC ownership over Intel Active Management Technology (AMT).
8318  */
8319 static int
8320 iwn_hw_prepare(struct iwn_softc *sc)
8321 {
8322 	int ntries;
8323 
8324 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8325 
8326 	/* Check if hardware is ready. */
8327 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8328 	for (ntries = 0; ntries < 5; ntries++) {
8329 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8330 		    IWN_HW_IF_CONFIG_NIC_READY)
8331 			return 0;
8332 		DELAY(10);
8333 	}
8334 
8335 	/* Hardware not ready, force into ready state. */
8336 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
8337 	for (ntries = 0; ntries < 15000; ntries++) {
8338 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
8339 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
8340 			break;
8341 		DELAY(10);
8342 	}
8343 	if (ntries == 15000)
8344 		return ETIMEDOUT;
8345 
8346 	/* Hardware should be ready now. */
8347 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8348 	for (ntries = 0; ntries < 5; ntries++) {
8349 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8350 		    IWN_HW_IF_CONFIG_NIC_READY)
8351 			return 0;
8352 		DELAY(10);
8353 	}
8354 	return ETIMEDOUT;
8355 }
8356 
8357 static int
8358 iwn_hw_init(struct iwn_softc *sc)
8359 {
8360 	struct iwn_ops *ops = &sc->ops;
8361 	int error, chnl, qid;
8362 
8363 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8364 
8365 	/* Clear pending interrupts. */
8366 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
8367 
8368 	if ((error = iwn_apm_init(sc)) != 0) {
8369 		device_printf(sc->sc_dev,
8370 		    "%s: could not power ON adapter, error %d\n", __func__,
8371 		    error);
8372 		return error;
8373 	}
8374 
8375 	/* Select VMAIN power source. */
8376 	if ((error = iwn_nic_lock(sc)) != 0)
8377 		return error;
8378 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
8379 	iwn_nic_unlock(sc);
8380 
8381 	/* Perform adapter-specific initialization. */
8382 	if ((error = ops->nic_config(sc)) != 0)
8383 		return error;
8384 
8385 	/* Initialize RX ring. */
8386 	if ((error = iwn_nic_lock(sc)) != 0)
8387 		return error;
8388 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
8389 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
8390 	/* Set physical address of RX ring (256-byte aligned). */
8391 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
8392 	/* Set physical address of RX status (16-byte aligned). */
8393 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
8394 	/* Enable RX. */
8395 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
8396 	    IWN_FH_RX_CONFIG_ENA           |
8397 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
8398 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
8399 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
8400 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
8401 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
8402 	iwn_nic_unlock(sc);
8403 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
8404 
8405 	if ((error = iwn_nic_lock(sc)) != 0)
8406 		return error;
8407 
8408 	/* Initialize TX scheduler. */
8409 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8410 
8411 	/* Set physical address of "keep warm" page (16-byte aligned). */
8412 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
8413 
8414 	/* Initialize TX rings. */
8415 	for (qid = 0; qid < sc->ntxqs; qid++) {
8416 		struct iwn_tx_ring *txq = &sc->txq[qid];
8417 
8418 		/* Set physical address of TX ring (256-byte aligned). */
8419 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
8420 		    txq->desc_dma.paddr >> 8);
8421 	}
8422 	iwn_nic_unlock(sc);
8423 
8424 	/* Enable DMA channels. */
8425 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8426 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
8427 		    IWN_FH_TX_CONFIG_DMA_ENA |
8428 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
8429 	}
8430 
8431 	/* Clear "radio off" and "commands blocked" bits. */
8432 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8433 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
8434 
8435 	/* Clear pending interrupts. */
8436 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
8437 	/* Enable interrupt coalescing. */
8438 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
8439 	/* Enable interrupts. */
8440 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8441 
8442 	/* _Really_ make sure "radio off" bit is cleared! */
8443 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8444 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8445 
8446 	/* Enable shadow registers. */
8447 	if (sc->base_params->shadow_reg_enable)
8448 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
8449 
8450 	if ((error = ops->load_firmware(sc)) != 0) {
8451 		device_printf(sc->sc_dev,
8452 		    "%s: could not load firmware, error %d\n", __func__,
8453 		    error);
8454 		return error;
8455 	}
8456 	/* Wait at most one second for firmware alive notification. */
8457 	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
8458 		device_printf(sc->sc_dev,
8459 		    "%s: timeout waiting for adapter to initialize, error %d\n",
8460 		    __func__, error);
8461 		return error;
8462 	}
8463 	/* Do post-firmware initialization. */
8464 
8465 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8466 
8467 	return ops->post_alive(sc);
8468 }
8469 
8470 static void
8471 iwn_hw_stop(struct iwn_softc *sc)
8472 {
8473 	int chnl, qid, ntries;
8474 
8475 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8476 
8477 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
8478 
8479 	/* Disable interrupts. */
8480 	IWN_WRITE(sc, IWN_INT_MASK, 0);
8481 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
8482 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
8483 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8484 
8485 	/* Make sure we no longer hold the NIC lock. */
8486 	iwn_nic_unlock(sc);
8487 
8488 	/* Stop TX scheduler. */
8489 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8490 
8491 	/* Stop all DMA channels. */
8492 	if (iwn_nic_lock(sc) == 0) {
8493 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8494 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
8495 			for (ntries = 0; ntries < 200; ntries++) {
8496 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
8497 				    IWN_FH_TX_STATUS_IDLE(chnl))
8498 					break;
8499 				DELAY(10);
8500 			}
8501 		}
8502 		iwn_nic_unlock(sc);
8503 	}
8504 
8505 	/* Stop RX ring. */
8506 	iwn_reset_rx_ring(sc, &sc->rxq);
8507 
8508 	/* Reset all TX rings. */
8509 	for (qid = 0; qid < sc->ntxqs; qid++)
8510 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
8511 
8512 	if (iwn_nic_lock(sc) == 0) {
8513 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
8514 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8515 		iwn_nic_unlock(sc);
8516 	}
8517 	DELAY(5);
8518 	/* Power OFF adapter. */
8519 	iwn_apm_stop(sc);
8520 }
8521 
8522 static void
8523 iwn_radio_on(void *arg0, int pending)
8524 {
8525 	struct iwn_softc *sc = arg0;
8526 	struct ifnet *ifp = sc->sc_ifp;
8527 	struct ieee80211com *ic = ifp->if_l2com;
8528 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8529 
8530 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8531 
8532 	if (vap != NULL) {
8533 		iwn_init(sc);
8534 		ieee80211_init(vap);
8535 	}
8536 }
8537 
8538 static void
8539 iwn_radio_off(void *arg0, int pending)
8540 {
8541 	struct iwn_softc *sc = arg0;
8542 	struct ifnet *ifp = sc->sc_ifp;
8543 	struct ieee80211com *ic = ifp->if_l2com;
8544 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8545 
8546 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8547 
8548 	iwn_stop(sc);
8549 	if (vap != NULL)
8550 		ieee80211_stop(vap);
8551 
8552 	/* Enable interrupts to get RF toggle notification. */
8553 	IWN_LOCK(sc);
8554 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
8555 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8556 	IWN_UNLOCK(sc);
8557 }
8558 
8559 static void
8560 iwn_panicked(void *arg0, int pending)
8561 {
8562 	struct iwn_softc *sc = arg0;
8563 	struct ifnet *ifp = sc->sc_ifp;
8564 	struct ieee80211com *ic = ifp->if_l2com;
8565 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8566 	int error;
8567 
8568 	if (vap == NULL) {
8569 		printf("%s: null vap\n", __func__);
8570 		return;
8571 	}
8572 
8573 	device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
8574 	    "resetting...\n", __func__, vap->iv_state);
8575 
8576 	IWN_LOCK(sc);
8577 
8578 	iwn_stop_locked(sc);
8579 	iwn_init_locked(sc);
8580 	if (vap->iv_state >= IEEE80211_S_AUTH &&
8581 	    (error = iwn_auth(sc, vap)) != 0) {
8582 		device_printf(sc->sc_dev,
8583 		    "%s: could not move to auth state\n", __func__);
8584 	}
8585 	if (vap->iv_state >= IEEE80211_S_RUN &&
8586 	    (error = iwn_run(sc, vap)) != 0) {
8587 		device_printf(sc->sc_dev,
8588 		    "%s: could not move to run state\n", __func__);
8589 	}
8590 
8591 	/* Only run start once the NIC is in a useful state, like associated */
8592 	iwn_start_locked(sc->sc_ifp);
8593 
8594 	IWN_UNLOCK(sc);
8595 }
8596 
8597 static void
8598 iwn_init_locked(struct iwn_softc *sc)
8599 {
8600 	struct ifnet *ifp = sc->sc_ifp;
8601 	int error;
8602 
8603 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8604 
8605 	IWN_LOCK_ASSERT(sc);
8606 
8607 	if ((error = iwn_hw_prepare(sc)) != 0) {
8608 		device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
8609 		    __func__, error);
8610 		goto fail;
8611 	}
8612 
8613 	/* Initialize interrupt mask to default value. */
8614 	sc->int_mask = IWN_INT_MASK_DEF;
8615 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8616 
8617 	/* Check that the radio is not disabled by hardware switch. */
8618 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
8619 		device_printf(sc->sc_dev,
8620 		    "radio is disabled by hardware switch\n");
8621 		/* Enable interrupts to get RF toggle notifications. */
8622 		IWN_WRITE(sc, IWN_INT, 0xffffffff);
8623 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8624 		return;
8625 	}
8626 
8627 	/* Read firmware images from the filesystem. */
8628 	if ((error = iwn_read_firmware(sc)) != 0) {
8629 		device_printf(sc->sc_dev,
8630 		    "%s: could not read firmware, error %d\n", __func__,
8631 		    error);
8632 		goto fail;
8633 	}
8634 
8635 	/* Initialize hardware and upload firmware. */
8636 	error = iwn_hw_init(sc);
8637 	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8638 	sc->fw_fp = NULL;
8639 	if (error != 0) {
8640 		device_printf(sc->sc_dev,
8641 		    "%s: could not initialize hardware, error %d\n", __func__,
8642 		    error);
8643 		goto fail;
8644 	}
8645 
8646 	/* Configure adapter now that it is ready. */
8647 	if ((error = iwn_config(sc)) != 0) {
8648 		device_printf(sc->sc_dev,
8649 		    "%s: could not configure device, error %d\n", __func__,
8650 		    error);
8651 		goto fail;
8652 	}
8653 
8654 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
8655 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
8656 
8657 	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
8658 
8659 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8660 
8661 	return;
8662 
8663 fail:	iwn_stop_locked(sc);
8664 	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
8665 }
8666 
8667 static void
8668 iwn_init(void *arg)
8669 {
8670 	struct iwn_softc *sc = arg;
8671 	struct ifnet *ifp = sc->sc_ifp;
8672 	struct ieee80211com *ic = ifp->if_l2com;
8673 
8674 	IWN_LOCK(sc);
8675 	iwn_init_locked(sc);
8676 	IWN_UNLOCK(sc);
8677 
8678 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
8679 		ieee80211_start_all(ic);
8680 }
8681 
8682 static void
8683 iwn_stop_locked(struct iwn_softc *sc)
8684 {
8685 	struct ifnet *ifp = sc->sc_ifp;
8686 
8687 	IWN_LOCK_ASSERT(sc);
8688 
8689 	sc->sc_is_scanning = 0;
8690 	sc->sc_tx_timer = 0;
8691 	callout_stop(&sc->watchdog_to);
8692 	callout_stop(&sc->calib_to);
8693 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
8694 
8695 	/* Power OFF hardware. */
8696 	iwn_hw_stop(sc);
8697 }
8698 
8699 static void
8700 iwn_stop(struct iwn_softc *sc)
8701 {
8702 	IWN_LOCK(sc);
8703 	iwn_stop_locked(sc);
8704 	IWN_UNLOCK(sc);
8705 }
8706 
8707 /*
8708  * Callback from net80211 to start a scan.
8709  */
8710 static void
8711 iwn_scan_start(struct ieee80211com *ic)
8712 {
8713 	struct ifnet *ifp = ic->ic_ifp;
8714 	struct iwn_softc *sc = ifp->if_softc;
8715 
8716 	IWN_LOCK(sc);
8717 	/* make the link LED blink while we're scanning */
8718 	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
8719 	IWN_UNLOCK(sc);
8720 }
8721 
8722 /*
8723  * Callback from net80211 to terminate a scan.
8724  */
8725 static void
8726 iwn_scan_end(struct ieee80211com *ic)
8727 {
8728 	struct ifnet *ifp = ic->ic_ifp;
8729 	struct iwn_softc *sc = ifp->if_softc;
8730 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8731 
8732 	IWN_LOCK(sc);
8733 	if (vap->iv_state == IEEE80211_S_RUN) {
8734 		/* Set link LED to ON status if we are associated */
8735 		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
8736 	}
8737 	IWN_UNLOCK(sc);
8738 }
8739 
8740 /*
8741  * Callback from net80211 to force a channel change.
8742  */
8743 static void
8744 iwn_set_channel(struct ieee80211com *ic)
8745 {
8746 	const struct ieee80211_channel *c = ic->ic_curchan;
8747 	struct ifnet *ifp = ic->ic_ifp;
8748 	struct iwn_softc *sc = ifp->if_softc;
8749 	int error;
8750 
8751 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8752 
8753 	IWN_LOCK(sc);
8754 	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
8755 	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
8756 	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
8757 	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
8758 
8759 	/*
8760 	 * Only need to set the channel in Monitor mode. AP scanning and auth
8761 	 * are already taken care of by their respective firmware commands.
8762 	 */
8763 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8764 		error = iwn_config(sc);
8765 		if (error != 0)
8766 		device_printf(sc->sc_dev,
8767 		    "%s: error %d settting channel\n", __func__, error);
8768 	}
8769 	IWN_UNLOCK(sc);
8770 }
8771 
8772 /*
8773  * Callback from net80211 to start scanning of the current channel.
8774  */
8775 static void
8776 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
8777 {
8778 	struct ieee80211vap *vap = ss->ss_vap;
8779 	struct ieee80211com *ic = vap->iv_ic;
8780 	struct iwn_softc *sc = ic->ic_softc;
8781 	int error;
8782 
8783 	IWN_LOCK(sc);
8784 	error = iwn_scan(sc, vap, ss, ic->ic_curchan);
8785 	IWN_UNLOCK(sc);
8786 	if (error != 0)
8787 		ieee80211_cancel_scan(vap);
8788 }
8789 
8790 /*
8791  * Callback from net80211 to handle the minimum dwell time being met.
8792  * The intent is to terminate the scan but we just let the firmware
8793  * notify us when it's finished as we have no safe way to abort it.
8794  */
8795 static void
8796 iwn_scan_mindwell(struct ieee80211_scan_state *ss)
8797 {
8798 	/* NB: don't try to abort scan; wait for firmware to finish */
8799 }
8800 
8801 static void
8802 iwn_hw_reset(void *arg0, int pending)
8803 {
8804 	struct iwn_softc *sc = arg0;
8805 	struct ifnet *ifp = sc->sc_ifp;
8806 	struct ieee80211com *ic = ifp->if_l2com;
8807 
8808 	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8809 
8810 	iwn_stop(sc);
8811 	iwn_init(sc);
8812 	ieee80211_notify_radio(ic, 1);
8813 }
8814 #ifdef	IWN_DEBUG
8815 #define	IWN_DESC(x) case x:	return #x
8816 
8817 /*
8818  * Translate CSR code to string
8819  */
8820 static char *iwn_get_csr_string(int csr)
8821 {
8822 	switch (csr) {
8823 		IWN_DESC(IWN_HW_IF_CONFIG);
8824 		IWN_DESC(IWN_INT_COALESCING);
8825 		IWN_DESC(IWN_INT);
8826 		IWN_DESC(IWN_INT_MASK);
8827 		IWN_DESC(IWN_FH_INT);
8828 		IWN_DESC(IWN_GPIO_IN);
8829 		IWN_DESC(IWN_RESET);
8830 		IWN_DESC(IWN_GP_CNTRL);
8831 		IWN_DESC(IWN_HW_REV);
8832 		IWN_DESC(IWN_EEPROM);
8833 		IWN_DESC(IWN_EEPROM_GP);
8834 		IWN_DESC(IWN_OTP_GP);
8835 		IWN_DESC(IWN_GIO);
8836 		IWN_DESC(IWN_GP_UCODE);
8837 		IWN_DESC(IWN_GP_DRIVER);
8838 		IWN_DESC(IWN_UCODE_GP1);
8839 		IWN_DESC(IWN_UCODE_GP2);
8840 		IWN_DESC(IWN_LED);
8841 		IWN_DESC(IWN_DRAM_INT_TBL);
8842 		IWN_DESC(IWN_GIO_CHICKEN);
8843 		IWN_DESC(IWN_ANA_PLL);
8844 		IWN_DESC(IWN_HW_REV_WA);
8845 		IWN_DESC(IWN_DBG_HPET_MEM);
8846 	default:
8847 		return "UNKNOWN CSR";
8848 	}
8849 }
8850 
8851 /*
8852  * This function print firmware register
8853  */
8854 static void
8855 iwn_debug_register(struct iwn_softc *sc)
8856 {
8857 	int i;
8858 	static const uint32_t csr_tbl[] = {
8859 		IWN_HW_IF_CONFIG,
8860 		IWN_INT_COALESCING,
8861 		IWN_INT,
8862 		IWN_INT_MASK,
8863 		IWN_FH_INT,
8864 		IWN_GPIO_IN,
8865 		IWN_RESET,
8866 		IWN_GP_CNTRL,
8867 		IWN_HW_REV,
8868 		IWN_EEPROM,
8869 		IWN_EEPROM_GP,
8870 		IWN_OTP_GP,
8871 		IWN_GIO,
8872 		IWN_GP_UCODE,
8873 		IWN_GP_DRIVER,
8874 		IWN_UCODE_GP1,
8875 		IWN_UCODE_GP2,
8876 		IWN_LED,
8877 		IWN_DRAM_INT_TBL,
8878 		IWN_GIO_CHICKEN,
8879 		IWN_ANA_PLL,
8880 		IWN_HW_REV_WA,
8881 		IWN_DBG_HPET_MEM,
8882 	};
8883 	DPRINTF(sc, IWN_DEBUG_REGISTER,
8884 	    "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
8885 	    "\n");
8886 	for (i = 0; i <  nitems(csr_tbl); i++){
8887 		DPRINTF(sc, IWN_DEBUG_REGISTER,"  %10s: 0x%08x ",
8888 			iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
8889 		if ((i+1) % 3 == 0)
8890 			DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
8891 	}
8892 	DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
8893 }
8894 #endif
8895