xref: /freebsd/sys/dev/iwn/if_iwn.c (revision b2db760808f74bb53c232900091c9da801ebbfcc)
1 /*-
2  * Copyright (c) 2007-2009
3  *	Damien Bergamini <damien.bergamini@free.fr>
4  * Copyright (c) 2008
5  *	Benjamin Close <benjsc@FreeBSD.org>
6  * Copyright (c) 2008 Sam Leffler, Errno Consulting
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23  * adapters.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include <sys/param.h>
30 #include <sys/sockio.h>
31 #include <sys/sysctl.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/bus.h>
38 #include <sys/rman.h>
39 #include <sys/endian.h>
40 #include <sys/firmware.h>
41 #include <sys/limits.h>
42 #include <sys/module.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/clock.h>
49 
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/ethernet.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/if_ether.h>
65 #include <netinet/ip.h>
66 
67 #include <net80211/ieee80211_var.h>
68 #include <net80211/ieee80211_radiotap.h>
69 #include <net80211/ieee80211_regdomain.h>
70 #include <net80211/ieee80211_ratectl.h>
71 
72 #include <dev/iwn/if_iwnreg.h>
73 #include <dev/iwn/if_iwnvar.h>
74 
75 static int	iwn_probe(device_t);
76 static int	iwn_attach(device_t);
77 static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *);
78 static void	iwn_radiotap_attach(struct iwn_softc *);
79 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
80 		    const char name[IFNAMSIZ], int unit, int opmode,
81 		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
82 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
83 static void	iwn_vap_delete(struct ieee80211vap *);
84 static int	iwn_cleanup(device_t);
85 static int	iwn_detach(device_t);
86 static int	iwn_nic_lock(struct iwn_softc *);
87 static int	iwn_eeprom_lock(struct iwn_softc *);
88 static int	iwn_init_otprom(struct iwn_softc *);
89 static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
90 static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
91 static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
92 		    void **, bus_size_t, bus_size_t, int);
93 static void	iwn_dma_contig_free(struct iwn_dma_info *);
94 static int	iwn_alloc_sched(struct iwn_softc *);
95 static void	iwn_free_sched(struct iwn_softc *);
96 static int	iwn_alloc_kw(struct iwn_softc *);
97 static void	iwn_free_kw(struct iwn_softc *);
98 static int	iwn_alloc_ict(struct iwn_softc *);
99 static void	iwn_free_ict(struct iwn_softc *);
100 static int	iwn_alloc_fwmem(struct iwn_softc *);
101 static void	iwn_free_fwmem(struct iwn_softc *);
102 static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
103 static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
104 static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
105 static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
106 		    int);
107 static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
108 static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
109 static void	iwn5000_ict_reset(struct iwn_softc *);
110 static int	iwn_read_eeprom(struct iwn_softc *,
111 		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
112 static void	iwn4965_read_eeprom(struct iwn_softc *);
113 static void	iwn4965_print_power_group(struct iwn_softc *, int);
114 static void	iwn5000_read_eeprom(struct iwn_softc *);
115 static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
116 static void	iwn_read_eeprom_band(struct iwn_softc *, int);
117 #if 0	/* HT */
118 static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
119 #endif
120 static void	iwn_read_eeprom_channels(struct iwn_softc *, int,
121 		    uint32_t);
122 static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
123 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
124 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
125 static void	iwn_newassoc(struct ieee80211_node *, int);
126 static int	iwn_media_change(struct ifnet *);
127 static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
128 static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
129 		    struct iwn_rx_data *);
130 static void	iwn_timer_timeout(void *);
131 static void	iwn_calib_reset(struct iwn_softc *);
132 static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
133 		    struct iwn_rx_data *);
134 #if 0	/* HT */
135 static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
136 		    struct iwn_rx_data *);
137 #endif
138 static void	iwn5000_rx_calib_results(struct iwn_softc *,
139 		    struct iwn_rx_desc *, struct iwn_rx_data *);
140 static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
141 		    struct iwn_rx_data *);
142 static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
143 		    struct iwn_rx_data *);
144 static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
145 		    struct iwn_rx_data *);
146 static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
147 		    uint8_t);
148 static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
149 static void	iwn_notif_intr(struct iwn_softc *);
150 static void	iwn_wakeup_intr(struct iwn_softc *);
151 static void	iwn_rftoggle_intr(struct iwn_softc *);
152 static void	iwn_fatal_intr(struct iwn_softc *);
153 static void	iwn_intr(void *);
154 static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
155 		    uint16_t);
156 static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
157 		    uint16_t);
158 #ifdef notyet
159 static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
160 #endif
161 static uint8_t	iwn_plcp_signal(int);
162 static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
163 		    struct ieee80211_node *, struct iwn_tx_ring *);
164 static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
165 		    const struct ieee80211_bpf_params *);
166 static void	iwn_start(struct ifnet *);
167 static void	iwn_start_locked(struct ifnet *);
168 static void	iwn_watchdog(struct iwn_softc *sc);
169 static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
170 static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
171 static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
172 		    int);
173 static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
174 		    int);
175 static int	iwn_set_link_quality(struct iwn_softc *, uint8_t, int);
176 static int	iwn_add_broadcast_node(struct iwn_softc *, int);
177 static int	iwn_wme_update(struct ieee80211com *);
178 static void	iwn_update_mcast(struct ifnet *);
179 static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
180 static int	iwn_set_critical_temp(struct iwn_softc *);
181 static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
182 static void	iwn4965_power_calibration(struct iwn_softc *, int);
183 static int	iwn4965_set_txpower(struct iwn_softc *,
184 		    struct ieee80211_channel *, int);
185 static int	iwn5000_set_txpower(struct iwn_softc *,
186 		    struct ieee80211_channel *, int);
187 static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
188 static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
189 static int	iwn_get_noise(const struct iwn_rx_general_stats *);
190 static int	iwn4965_get_temperature(struct iwn_softc *);
191 static int	iwn5000_get_temperature(struct iwn_softc *);
192 static int	iwn_init_sensitivity(struct iwn_softc *);
193 static void	iwn_collect_noise(struct iwn_softc *,
194 		    const struct iwn_rx_general_stats *);
195 static int	iwn4965_init_gains(struct iwn_softc *);
196 static int	iwn5000_init_gains(struct iwn_softc *);
197 static int	iwn4965_set_gains(struct iwn_softc *);
198 static int	iwn5000_set_gains(struct iwn_softc *);
199 static void	iwn_tune_sensitivity(struct iwn_softc *,
200 		    const struct iwn_rx_stats *);
201 static int	iwn_send_sensitivity(struct iwn_softc *);
202 static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
203 static int	iwn_config(struct iwn_softc *);
204 static int	iwn_scan(struct iwn_softc *);
205 static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
206 static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
207 #if 0	/* HT */
208 static int	iwn_ampdu_rx_start(struct ieee80211com *,
209 		    struct ieee80211_node *, uint8_t);
210 static void	iwn_ampdu_rx_stop(struct ieee80211com *,
211 		    struct ieee80211_node *, uint8_t);
212 static int	iwn_ampdu_tx_start(struct ieee80211com *,
213 		    struct ieee80211_node *, uint8_t);
214 static void	iwn_ampdu_tx_stop(struct ieee80211com *,
215 		    struct ieee80211_node *, uint8_t);
216 static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
217 		    struct ieee80211_node *, uint8_t, uint16_t);
218 static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
219 static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
220 		    struct ieee80211_node *, uint8_t, uint16_t);
221 static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
222 #endif
223 static int	iwn5000_query_calibration(struct iwn_softc *);
224 static int	iwn5000_send_calibration(struct iwn_softc *);
225 static int	iwn5000_send_wimax_coex(struct iwn_softc *);
226 static int	iwn4965_post_alive(struct iwn_softc *);
227 static int	iwn5000_post_alive(struct iwn_softc *);
228 static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
229 		    int);
230 static int	iwn4965_load_firmware(struct iwn_softc *);
231 static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
232 		    const uint8_t *, int);
233 static int	iwn5000_load_firmware(struct iwn_softc *);
234 static int	iwn_read_firmware_leg(struct iwn_softc *,
235 		    struct iwn_fw_info *);
236 static int	iwn_read_firmware_tlv(struct iwn_softc *,
237 		    struct iwn_fw_info *, uint16_t);
238 static int	iwn_read_firmware(struct iwn_softc *);
239 static int	iwn_clock_wait(struct iwn_softc *);
240 static int	iwn_apm_init(struct iwn_softc *);
241 static void	iwn_apm_stop_master(struct iwn_softc *);
242 static void	iwn_apm_stop(struct iwn_softc *);
243 static int	iwn4965_nic_config(struct iwn_softc *);
244 static int	iwn5000_nic_config(struct iwn_softc *);
245 static int	iwn_hw_prepare(struct iwn_softc *);
246 static int	iwn_hw_init(struct iwn_softc *);
247 static void	iwn_hw_stop(struct iwn_softc *);
248 static void	iwn_init_locked(struct iwn_softc *);
249 static void	iwn_init(void *);
250 static void	iwn_stop_locked(struct iwn_softc *);
251 static void	iwn_stop(struct iwn_softc *);
252 static void 	iwn_scan_start(struct ieee80211com *);
253 static void 	iwn_scan_end(struct ieee80211com *);
254 static void 	iwn_set_channel(struct ieee80211com *);
255 static void 	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
256 static void 	iwn_scan_mindwell(struct ieee80211_scan_state *);
257 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
258 		    struct ieee80211_channel *);
259 static int	iwn_setregdomain(struct ieee80211com *,
260 		    struct ieee80211_regdomain *, int,
261 		    struct ieee80211_channel []);
262 static void	iwn_hw_reset(void *, int);
263 static void	iwn_radio_on(void *, int);
264 static void	iwn_radio_off(void *, int);
265 static void	iwn_sysctlattach(struct iwn_softc *);
266 static int	iwn_shutdown(device_t);
267 static int	iwn_suspend(device_t);
268 static int	iwn_resume(device_t);
269 
270 #define IWN_DEBUG
271 #ifdef IWN_DEBUG
272 enum {
273 	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
274 	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
275 	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
276 	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
277 	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
278 	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
279 	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
280 	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
281 	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
282 	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
283 	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
284 	IWN_DEBUG_LED		= 0x00000800,	/* led management */
285 	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
286 	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
287 	IWN_DEBUG_ANY		= 0xffffffff
288 };
289 
290 #define DPRINTF(sc, m, fmt, ...) do {			\
291 	if (sc->sc_debug & (m))				\
292 		printf(fmt, __VA_ARGS__);		\
293 } while (0)
294 
295 static const char *iwn_intr_str(uint8_t);
296 #else
297 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
298 #endif
299 
300 struct iwn_ident {
301 	uint16_t	vendor;
302 	uint16_t	device;
303 	const char	*name;
304 };
305 
306 static const struct iwn_ident iwn_ident_table [] = {
307 	{ 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" },
308 	{ 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" },
309 	{ 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" },
310 	{ 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" },
311 	{ 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" },
312 	{ 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" },
313 	{ 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" },
314 	{ 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" },
315 	{ 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" },
316 	{ 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" },
317 	{ 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" },
318 	{ 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" },
319 	{ 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" },
320 	{ 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" },
321 	{ 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" },
322 	{ 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" },
323 	{ 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" },
324 	{ 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" },
325 	{ 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" },
326 	{ 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" },
327 	{ 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" },
328 	{ 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" },
329 	{ 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" },
330 	{ 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" },
331 #ifdef notyet
332 	{ 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" },
333 	{ 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" },
334 	{ 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" },
335 	{ 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" },
336 	{ 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" },
337 #endif
338 	{ 0, 0, NULL }
339 };
340 
341 static const struct iwn_hal iwn4965_hal = {
342 	iwn4965_load_firmware,
343 	iwn4965_read_eeprom,
344 	iwn4965_post_alive,
345 	iwn4965_nic_config,
346 	iwn4965_update_sched,
347 	iwn4965_get_temperature,
348 	iwn4965_get_rssi,
349 	iwn4965_set_txpower,
350 	iwn4965_init_gains,
351 	iwn4965_set_gains,
352 	iwn4965_add_node,
353 	iwn4965_tx_done,
354 #if 0	/* HT */
355 	iwn4965_ampdu_tx_start,
356 	iwn4965_ampdu_tx_stop,
357 #endif
358 	IWN4965_NTXQUEUES,
359 	IWN4965_NDMACHNLS,
360 	IWN4965_ID_BROADCAST,
361 	IWN4965_RXONSZ,
362 	IWN4965_SCHEDSZ,
363 	IWN4965_FW_TEXT_MAXSZ,
364 	IWN4965_FW_DATA_MAXSZ,
365 	IWN4965_FWSZ,
366 	IWN4965_SCHED_TXFACT
367 };
368 
369 static const struct iwn_hal iwn5000_hal = {
370 	iwn5000_load_firmware,
371 	iwn5000_read_eeprom,
372 	iwn5000_post_alive,
373 	iwn5000_nic_config,
374 	iwn5000_update_sched,
375 	iwn5000_get_temperature,
376 	iwn5000_get_rssi,
377 	iwn5000_set_txpower,
378 	iwn5000_init_gains,
379 	iwn5000_set_gains,
380 	iwn5000_add_node,
381 	iwn5000_tx_done,
382 #if 0	/* HT */
383 	iwn5000_ampdu_tx_start,
384 	iwn5000_ampdu_tx_stop,
385 #endif
386 	IWN5000_NTXQUEUES,
387 	IWN5000_NDMACHNLS,
388 	IWN5000_ID_BROADCAST,
389 	IWN5000_RXONSZ,
390 	IWN5000_SCHEDSZ,
391 	IWN5000_FW_TEXT_MAXSZ,
392 	IWN5000_FW_DATA_MAXSZ,
393 	IWN5000_FWSZ,
394 	IWN5000_SCHED_TXFACT
395 };
396 
397 static int
398 iwn_probe(device_t dev)
399 {
400 	const struct iwn_ident *ident;
401 
402 	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
403 		if (pci_get_vendor(dev) == ident->vendor &&
404 		    pci_get_device(dev) == ident->device) {
405 			device_set_desc(dev, ident->name);
406 			return 0;
407 		}
408 	}
409 	return ENXIO;
410 }
411 
412 static int
413 iwn_attach(device_t dev)
414 {
415 	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
416 	struct ieee80211com *ic;
417 	struct ifnet *ifp;
418 	const struct iwn_hal *hal;
419 	uint32_t tmp;
420 	int i, error, result;
421 	uint8_t macaddr[IEEE80211_ADDR_LEN];
422 
423 	sc->sc_dev = dev;
424 
425 	/*
426 	 * Get the offset of the PCI Express Capability Structure in PCI
427 	 * Configuration Space.
428 	 */
429 	error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
430 	if (error != 0) {
431 		device_printf(dev, "PCIe capability structure not found!\n");
432 		return error;
433 	}
434 
435 	/* Clear device-specific "PCI retry timeout" register (41h). */
436 	pci_write_config(dev, 0x41, 0, 1);
437 
438 	/* Hardware bug workaround. */
439 	tmp = pci_read_config(dev, PCIR_COMMAND, 1);
440 	if (tmp & PCIM_CMD_INTxDIS) {
441 		DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
442 		    __func__);
443 		tmp &= ~PCIM_CMD_INTxDIS;
444 		pci_write_config(dev, PCIR_COMMAND, tmp, 1);
445 	}
446 
447 	/* Enable bus-mastering. */
448 	pci_enable_busmaster(dev);
449 
450 	sc->mem_rid = PCIR_BAR(0);
451 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
452 	    RF_ACTIVE);
453 	if (sc->mem == NULL ) {
454 		device_printf(dev, "could not allocate memory resources\n");
455 		error = ENOMEM;
456 		return error;
457 	}
458 
459 	sc->sc_st = rman_get_bustag(sc->mem);
460 	sc->sc_sh = rman_get_bushandle(sc->mem);
461 	sc->irq_rid = 0;
462 	if ((result = pci_msi_count(dev)) == 1 &&
463 	    pci_alloc_msi(dev, &result) == 0)
464 		sc->irq_rid = 1;
465 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
466 	    RF_ACTIVE | RF_SHAREABLE);
467 	if (sc->irq == NULL) {
468 		device_printf(dev, "could not allocate interrupt resource\n");
469 		error = ENOMEM;
470 		goto fail;
471 	}
472 
473 	IWN_LOCK_INIT(sc);
474 	callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0);
475 	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc );
476 	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc );
477 	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc );
478 
479 	/* Attach Hardware Abstraction Layer. */
480 	hal = iwn_hal_attach(sc);
481 	if (hal == NULL) {
482 		error = ENXIO;	/* XXX: Wrong error code? */
483 		goto fail;
484 	}
485 
486 	error = iwn_hw_prepare(sc);
487 	if (error != 0) {
488 		device_printf(dev, "hardware not ready, error %d\n", error);
489 		goto fail;
490 	}
491 
492 	/* Allocate DMA memory for firmware transfers. */
493 	error = iwn_alloc_fwmem(sc);
494 	if (error != 0) {
495 		device_printf(dev,
496 		    "could not allocate memory for firmware, error %d\n",
497 		    error);
498 		goto fail;
499 	}
500 
501 	/* Allocate "Keep Warm" page. */
502 	error = iwn_alloc_kw(sc);
503 	if (error != 0) {
504 		device_printf(dev,
505 		    "could not allocate \"Keep Warm\" page, error %d\n", error);
506 		goto fail;
507 	}
508 
509 	/* Allocate ICT table for 5000 Series. */
510 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
511 	    (error = iwn_alloc_ict(sc)) != 0) {
512 		device_printf(dev,
513 		    "%s: could not allocate ICT table, error %d\n",
514 		    __func__, error);
515 		goto fail;
516 	}
517 
518 	/* Allocate TX scheduler "rings". */
519 	error = iwn_alloc_sched(sc);
520 	if (error != 0) {
521 		device_printf(dev,
522 		    "could not allocate TX scheduler rings, error %d\n",
523 		    error);
524 		goto fail;
525 	}
526 
527 	/* Allocate TX rings (16 on 4965AGN, 20 on 5000). */
528 	for (i = 0; i < hal->ntxqs; i++) {
529 		error = iwn_alloc_tx_ring(sc, &sc->txq[i], i);
530 		if (error != 0) {
531 			device_printf(dev,
532 			    "could not allocate Tx ring %d, error %d\n",
533 			    i, error);
534 			goto fail;
535 		}
536 	}
537 
538 	/* Allocate RX ring. */
539 	error = iwn_alloc_rx_ring(sc, &sc->rxq);
540 	if (error != 0 ){
541 		device_printf(dev,
542 		    "could not allocate Rx ring, error %d\n", error);
543 		goto fail;
544 	}
545 
546 	/* Clear pending interrupts. */
547 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
548 
549 	/* Count the number of available chains. */
550 	sc->ntxchains =
551 	    ((sc->txchainmask >> 2) & 1) +
552 	    ((sc->txchainmask >> 1) & 1) +
553 	    ((sc->txchainmask >> 0) & 1);
554 	sc->nrxchains =
555 	    ((sc->rxchainmask >> 2) & 1) +
556 	    ((sc->rxchainmask >> 1) & 1) +
557 	    ((sc->rxchainmask >> 0) & 1);
558 
559 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
560 	if (ifp == NULL) {
561 		device_printf(dev, "can not allocate ifnet structure\n");
562 		goto fail;
563 	}
564 	ic = ifp->if_l2com;
565 
566 	ic->ic_ifp = ifp;
567 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
568 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
569 
570 	/* Set device capabilities. */
571 	ic->ic_caps =
572 		  IEEE80211_C_STA		/* station mode supported */
573 		| IEEE80211_C_MONITOR		/* monitor mode supported */
574 		| IEEE80211_C_TXPMGT		/* tx power management */
575 		| IEEE80211_C_SHSLOT		/* short slot time supported */
576 		| IEEE80211_C_WPA
577 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
578 		| IEEE80211_C_BGSCAN		/* background scanning */
579 #if 0
580 		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
581 #endif
582 		| IEEE80211_C_WME		/* WME */
583 		| IEEE80211_C_RATECTL		/* use ratectl */
584 		;
585 #if 0	/* HT */
586 	/* XXX disable until HT channel setup works */
587 	ic->ic_htcaps =
588 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
589 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
590 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
591 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
592 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
593 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
594 		/* s/w capabilities */
595 		| IEEE80211_HTC_HT		/* HT operation */
596 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
597 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
598 		;
599 
600 	/* Set HT capabilities. */
601 	ic->ic_htcaps =
602 #if IWN_RBUF_SIZE == 8192
603 	    IEEE80211_HTCAP_AMSDU7935 |
604 #endif
605 	    IEEE80211_HTCAP_CBW20_40 |
606 	    IEEE80211_HTCAP_SGI20 |
607 	    IEEE80211_HTCAP_SGI40;
608 	if (sc->hw_type != IWN_HW_REV_TYPE_4965)
609 		ic->ic_htcaps |= IEEE80211_HTCAP_GF;
610 	if (sc->hw_type == IWN_HW_REV_TYPE_6050)
611 		ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
612 	else
613 		ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
614 #endif
615 
616 	/* Read MAC address, channels, etc from EEPROM. */
617 	error = iwn_read_eeprom(sc, macaddr);
618 	if (error != 0) {
619 		device_printf(dev, "could not read EEPROM, error %d\n",
620 		    error);
621 		goto fail;
622 	}
623 
624 	device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n",
625 	    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
626 	    macaddr, ":");
627 
628 #if 0	/* HT */
629 	/* Set supported HT rates. */
630 	ic->ic_sup_mcs[0] = 0xff;
631 	if (sc->nrxchains > 1)
632 		ic->ic_sup_mcs[1] = 0xff;
633 	if (sc->nrxchains > 2)
634 		ic->ic_sup_mcs[2] = 0xff;
635 #endif
636 
637 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
638 	ifp->if_softc = sc;
639 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
640 	ifp->if_init = iwn_init;
641 	ifp->if_ioctl = iwn_ioctl;
642 	ifp->if_start = iwn_start;
643 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
644 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
645 	IFQ_SET_READY(&ifp->if_snd);
646 
647 	ieee80211_ifattach(ic, macaddr);
648 	ic->ic_vap_create = iwn_vap_create;
649 	ic->ic_vap_delete = iwn_vap_delete;
650 	ic->ic_raw_xmit = iwn_raw_xmit;
651 	ic->ic_node_alloc = iwn_node_alloc;
652 	ic->ic_newassoc = iwn_newassoc;
653 	ic->ic_wme.wme_update = iwn_wme_update;
654 	ic->ic_update_mcast = iwn_update_mcast;
655 	ic->ic_scan_start = iwn_scan_start;
656 	ic->ic_scan_end = iwn_scan_end;
657 	ic->ic_set_channel = iwn_set_channel;
658 	ic->ic_scan_curchan = iwn_scan_curchan;
659 	ic->ic_scan_mindwell = iwn_scan_mindwell;
660 	ic->ic_setregdomain = iwn_setregdomain;
661 #if 0	/* HT */
662 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
663 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
664 	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
665 	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
666 #endif
667 
668 	iwn_radiotap_attach(sc);
669 	iwn_sysctlattach(sc);
670 
671 	/*
672 	 * Hook our interrupt after all initialization is complete.
673 	 */
674 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
675 	    NULL, iwn_intr, sc, &sc->sc_ih);
676 	if (error != 0) {
677 		device_printf(dev, "could not set up interrupt, error %d\n",
678 		    error);
679 		goto fail;
680 	}
681 
682 	ieee80211_announce(ic);
683 	return 0;
684 fail:
685 	iwn_cleanup(dev);
686 	return error;
687 }
688 
689 static const struct iwn_hal *
690 iwn_hal_attach(struct iwn_softc *sc)
691 {
692 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
693 
694 	switch (sc->hw_type) {
695 	case IWN_HW_REV_TYPE_4965:
696 		sc->sc_hal = &iwn4965_hal;
697 		sc->limits = &iwn4965_sensitivity_limits;
698 		sc->fwname = "iwn4965fw";
699 		sc->txchainmask = IWN_ANT_AB;
700 		sc->rxchainmask = IWN_ANT_ABC;
701 		break;
702 	case IWN_HW_REV_TYPE_5100:
703 		sc->sc_hal = &iwn5000_hal;
704 		sc->limits = &iwn5000_sensitivity_limits;
705 		sc->fwname = "iwn5000fw";
706 		sc->txchainmask = IWN_ANT_B;
707 		sc->rxchainmask = IWN_ANT_AB;
708 		break;
709 	case IWN_HW_REV_TYPE_5150:
710 		sc->sc_hal = &iwn5000_hal;
711 		sc->limits = &iwn5150_sensitivity_limits;
712 		sc->fwname = "iwn5150fw";
713 		sc->txchainmask = IWN_ANT_A;
714 		sc->rxchainmask = IWN_ANT_AB;
715 		break;
716 	case IWN_HW_REV_TYPE_5300:
717 	case IWN_HW_REV_TYPE_5350:
718 		sc->sc_hal = &iwn5000_hal;
719 		sc->limits = &iwn5000_sensitivity_limits;
720 		sc->fwname = "iwn5000fw";
721 		sc->txchainmask = IWN_ANT_ABC;
722 		sc->rxchainmask = IWN_ANT_ABC;
723 		break;
724 	case IWN_HW_REV_TYPE_1000:
725 		sc->sc_hal = &iwn5000_hal;
726 		sc->limits = &iwn1000_sensitivity_limits;
727 		sc->fwname = "iwn1000fw";
728 		sc->txchainmask = IWN_ANT_A;
729 		sc->rxchainmask = IWN_ANT_AB;
730 		break;
731 	case IWN_HW_REV_TYPE_6000:
732 		sc->sc_hal = &iwn5000_hal;
733 		sc->limits = &iwn6000_sensitivity_limits;
734 		sc->fwname = "iwn6000fw";
735 		switch (pci_get_device(sc->sc_dev)) {
736 		case 0x422C:
737 		case 0x4239:
738 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
739 			sc->txchainmask = IWN_ANT_BC;
740 			sc->rxchainmask = IWN_ANT_BC;
741 			break;
742 		default:
743 			sc->txchainmask = IWN_ANT_ABC;
744 			sc->rxchainmask = IWN_ANT_ABC;
745 			break;
746 		}
747 		break;
748 	case IWN_HW_REV_TYPE_6050:
749 		sc->sc_hal = &iwn5000_hal;
750 		sc->limits = &iwn6000_sensitivity_limits;
751 		sc->fwname = "iwn6050fw";
752 		sc->txchainmask = IWN_ANT_AB;
753 		sc->rxchainmask = IWN_ANT_AB;
754 		break;
755 	case IWN_HW_REV_TYPE_6005:
756 		sc->sc_hal = &iwn5000_hal;
757 		sc->limits = &iwn6000_sensitivity_limits;
758 		sc->fwname = "iwn6005fw";
759 		sc->txchainmask = IWN_ANT_AB;
760 		sc->rxchainmask = IWN_ANT_AB;
761 		break;
762 	default:
763 		device_printf(sc->sc_dev, "adapter type %d not supported\n",
764 		    sc->hw_type);
765 		return NULL;
766 	}
767 	return sc->sc_hal;
768 }
769 
770 /*
771  * Attach the interface to 802.11 radiotap.
772  */
773 static void
774 iwn_radiotap_attach(struct iwn_softc *sc)
775 {
776 	struct ifnet *ifp = sc->sc_ifp;
777 	struct ieee80211com *ic = ifp->if_l2com;
778 
779 	ieee80211_radiotap_attach(ic,
780 	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
781 		IWN_TX_RADIOTAP_PRESENT,
782 	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
783 		IWN_RX_RADIOTAP_PRESENT);
784 }
785 
786 static struct ieee80211vap *
787 iwn_vap_create(struct ieee80211com *ic,
788 	const char name[IFNAMSIZ], int unit, int opmode, int flags,
789 	const uint8_t bssid[IEEE80211_ADDR_LEN],
790 	const uint8_t mac[IEEE80211_ADDR_LEN])
791 {
792 	struct iwn_vap *ivp;
793 	struct ieee80211vap *vap;
794 
795 	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
796 		return NULL;
797 	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
798 	    M_80211_VAP, M_NOWAIT | M_ZERO);
799 	if (ivp == NULL)
800 		return NULL;
801 	vap = &ivp->iv_vap;
802 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
803 	vap->iv_bmissthreshold = 10;		/* override default */
804 	/* Override with driver methods. */
805 	ivp->iv_newstate = vap->iv_newstate;
806 	vap->iv_newstate = iwn_newstate;
807 
808 	ieee80211_ratectl_init(vap);
809 	/* Complete setup. */
810 	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
811 	ic->ic_opmode = opmode;
812 	return vap;
813 }
814 
815 static void
816 iwn_vap_delete(struct ieee80211vap *vap)
817 {
818 	struct iwn_vap *ivp = IWN_VAP(vap);
819 
820 	ieee80211_ratectl_deinit(vap);
821 	ieee80211_vap_detach(vap);
822 	free(ivp, M_80211_VAP);
823 }
824 
825 static int
826 iwn_cleanup(device_t dev)
827 {
828 	struct iwn_softc *sc = device_get_softc(dev);
829 	struct ifnet *ifp = sc->sc_ifp;
830 	struct ieee80211com *ic;
831 	int i;
832 
833 	if (ifp != NULL) {
834 		ic = ifp->if_l2com;
835 
836 		ieee80211_draintask(ic, &sc->sc_reinit_task);
837 		ieee80211_draintask(ic, &sc->sc_radioon_task);
838 		ieee80211_draintask(ic, &sc->sc_radiooff_task);
839 
840 		iwn_stop(sc);
841 		callout_drain(&sc->sc_timer_to);
842 		ieee80211_ifdetach(ic);
843 	}
844 
845 	/* Free DMA resources. */
846 	iwn_free_rx_ring(sc, &sc->rxq);
847 	if (sc->sc_hal != NULL)
848 		for (i = 0; i < sc->sc_hal->ntxqs; i++)
849 			iwn_free_tx_ring(sc, &sc->txq[i]);
850 	iwn_free_sched(sc);
851 	iwn_free_kw(sc);
852 	if (sc->ict != NULL)
853 		iwn_free_ict(sc);
854 	iwn_free_fwmem(sc);
855 
856 	if (sc->irq != NULL) {
857 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
858 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
859 		if (sc->irq_rid == 1)
860 			pci_release_msi(dev);
861 	}
862 
863 	if (sc->mem != NULL)
864 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
865 
866 	if (ifp != NULL)
867 		if_free(ifp);
868 
869 	IWN_LOCK_DESTROY(sc);
870 	return 0;
871 }
872 
873 static int
874 iwn_detach(device_t dev)
875 {
876 	iwn_cleanup(dev);
877 	return 0;
878 }
879 
880 static int
881 iwn_nic_lock(struct iwn_softc *sc)
882 {
883 	int ntries;
884 
885 	/* Request exclusive access to NIC. */
886 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
887 
888 	/* Spin until we actually get the lock. */
889 	for (ntries = 0; ntries < 1000; ntries++) {
890 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
891 		    (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
892 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
893 			return 0;
894 		DELAY(10);
895 	}
896 	return ETIMEDOUT;
897 }
898 
899 static __inline void
900 iwn_nic_unlock(struct iwn_softc *sc)
901 {
902 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
903 }
904 
905 static __inline uint32_t
906 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
907 {
908 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
909 	IWN_BARRIER_READ_WRITE(sc);
910 	return IWN_READ(sc, IWN_PRPH_RDATA);
911 }
912 
913 static __inline void
914 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
915 {
916 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
917 	IWN_BARRIER_WRITE(sc);
918 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
919 }
920 
921 static __inline void
922 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
923 {
924 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
925 }
926 
927 static __inline void
928 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
929 {
930 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
931 }
932 
933 static __inline void
934 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
935     const uint32_t *data, int count)
936 {
937 	for (; count > 0; count--, data++, addr += 4)
938 		iwn_prph_write(sc, addr, *data);
939 }
940 
941 static __inline uint32_t
942 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
943 {
944 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
945 	IWN_BARRIER_READ_WRITE(sc);
946 	return IWN_READ(sc, IWN_MEM_RDATA);
947 }
948 
949 static __inline void
950 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
951 {
952 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
953 	IWN_BARRIER_WRITE(sc);
954 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
955 }
956 
957 static __inline void
958 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
959 {
960 	uint32_t tmp;
961 
962 	tmp = iwn_mem_read(sc, addr & ~3);
963 	if (addr & 3)
964 		tmp = (tmp & 0x0000ffff) | data << 16;
965 	else
966 		tmp = (tmp & 0xffff0000) | data;
967 	iwn_mem_write(sc, addr & ~3, tmp);
968 }
969 
970 static __inline void
971 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
972     int count)
973 {
974 	for (; count > 0; count--, addr += 4)
975 		*data++ = iwn_mem_read(sc, addr);
976 }
977 
978 static __inline void
979 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
980     int count)
981 {
982 	for (; count > 0; count--, addr += 4)
983 		iwn_mem_write(sc, addr, val);
984 }
985 
986 static int
987 iwn_eeprom_lock(struct iwn_softc *sc)
988 {
989 	int i, ntries;
990 
991 	for (i = 0; i < 100; i++) {
992 		/* Request exclusive access to EEPROM. */
993 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
994 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
995 
996 		/* Spin until we actually get the lock. */
997 		for (ntries = 0; ntries < 100; ntries++) {
998 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
999 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1000 				return 0;
1001 			DELAY(10);
1002 		}
1003 	}
1004 	return ETIMEDOUT;
1005 }
1006 
1007 static __inline void
1008 iwn_eeprom_unlock(struct iwn_softc *sc)
1009 {
1010 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1011 }
1012 
1013 /*
1014  * Initialize access by host to One Time Programmable ROM.
1015  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1016  */
1017 static int
1018 iwn_init_otprom(struct iwn_softc *sc)
1019 {
1020 	uint16_t prev, base, next;
1021 	int count, error;
1022 
1023 	/* Wait for clock stabilization before accessing prph. */
1024 	error = iwn_clock_wait(sc);
1025 	if (error != 0)
1026 		return error;
1027 
1028 	error = iwn_nic_lock(sc);
1029 	if (error != 0)
1030 		return error;
1031 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1032 	DELAY(5);
1033 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1034 	iwn_nic_unlock(sc);
1035 
1036 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1037 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1038 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1039 		    IWN_RESET_LINK_PWR_MGMT_DIS);
1040 	}
1041 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1042 	/* Clear ECC status. */
1043 	IWN_SETBITS(sc, IWN_OTP_GP,
1044 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1045 
1046 	/*
1047 	 * Find the block before last block (contains the EEPROM image)
1048 	 * for HW without OTP shadow RAM.
1049 	 */
1050 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1051 		/* Switch to absolute addressing mode. */
1052 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1053 		base = prev = 0;
1054 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1055 			error = iwn_read_prom_data(sc, base, &next, 2);
1056 			if (error != 0)
1057 				return error;
1058 			if (next == 0)	/* End of linked-list. */
1059 				break;
1060 			prev = base;
1061 			base = le16toh(next);
1062 		}
1063 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1064 			return EIO;
1065 		/* Skip "next" word. */
1066 		sc->prom_base = prev + 1;
1067 	}
1068 	return 0;
1069 }
1070 
1071 static int
1072 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1073 {
1074 	uint32_t val, tmp;
1075 	int ntries;
1076 	uint8_t *out = data;
1077 
1078 	addr += sc->prom_base;
1079 	for (; count > 0; count -= 2, addr++) {
1080 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1081 		for (ntries = 0; ntries < 10; ntries++) {
1082 			val = IWN_READ(sc, IWN_EEPROM);
1083 			if (val & IWN_EEPROM_READ_VALID)
1084 				break;
1085 			DELAY(5);
1086 		}
1087 		if (ntries == 10) {
1088 			device_printf(sc->sc_dev,
1089 			    "timeout reading ROM at 0x%x\n", addr);
1090 			return ETIMEDOUT;
1091 		}
1092 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1093 			/* OTPROM, check for ECC errors. */
1094 			tmp = IWN_READ(sc, IWN_OTP_GP);
1095 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1096 				device_printf(sc->sc_dev,
1097 				    "OTPROM ECC error at 0x%x\n", addr);
1098 				return EIO;
1099 			}
1100 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1101 				/* Correctable ECC error, clear bit. */
1102 				IWN_SETBITS(sc, IWN_OTP_GP,
1103 				    IWN_OTP_GP_ECC_CORR_STTS);
1104 			}
1105 		}
1106 		*out++ = val >> 16;
1107 		if (count > 1)
1108 			*out++ = val >> 24;
1109 	}
1110 	return 0;
1111 }
1112 
1113 static void
1114 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1115 {
1116 	if (error != 0)
1117 		return;
1118 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1119 	*(bus_addr_t *)arg = segs[0].ds_addr;
1120 }
1121 
1122 static int
1123 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1124 	void **kvap, bus_size_t size, bus_size_t alignment, int flags)
1125 {
1126 	int error;
1127 
1128 	dma->size = size;
1129 	dma->tag = NULL;
1130 
1131 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1132 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1133 	    1, size, flags, NULL, NULL, &dma->tag);
1134 	if (error != 0) {
1135 		device_printf(sc->sc_dev,
1136 		    "%s: bus_dma_tag_create failed, error %d\n",
1137 		    __func__, error);
1138 		goto fail;
1139 	}
1140 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1141 	    flags | BUS_DMA_ZERO, &dma->map);
1142 	if (error != 0) {
1143 		device_printf(sc->sc_dev,
1144 		    "%s: bus_dmamem_alloc failed, error %d\n", __func__, error);
1145 		goto fail;
1146 	}
1147 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
1148 	    size, iwn_dma_map_addr, &dma->paddr, flags);
1149 	if (error != 0) {
1150 		device_printf(sc->sc_dev,
1151 		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1152 		goto fail;
1153 	}
1154 
1155 	if (kvap != NULL)
1156 		*kvap = dma->vaddr;
1157 	return 0;
1158 fail:
1159 	iwn_dma_contig_free(dma);
1160 	return error;
1161 }
1162 
1163 static void
1164 iwn_dma_contig_free(struct iwn_dma_info *dma)
1165 {
1166 	if (dma->tag != NULL) {
1167 		if (dma->map != NULL) {
1168 			if (dma->paddr == 0) {
1169 				bus_dmamap_sync(dma->tag, dma->map,
1170 				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1171 				bus_dmamap_unload(dma->tag, dma->map);
1172 			}
1173 			bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1174 		}
1175 		bus_dma_tag_destroy(dma->tag);
1176 	}
1177 }
1178 
1179 static int
1180 iwn_alloc_sched(struct iwn_softc *sc)
1181 {
1182 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1183 	return iwn_dma_contig_alloc(sc, &sc->sched_dma,
1184 	    (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT);
1185 }
1186 
1187 static void
1188 iwn_free_sched(struct iwn_softc *sc)
1189 {
1190 	iwn_dma_contig_free(&sc->sched_dma);
1191 }
1192 
1193 static int
1194 iwn_alloc_kw(struct iwn_softc *sc)
1195 {
1196 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1197 	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096,
1198 	    BUS_DMA_NOWAIT);
1199 }
1200 
1201 static void
1202 iwn_free_kw(struct iwn_softc *sc)
1203 {
1204 	iwn_dma_contig_free(&sc->kw_dma);
1205 }
1206 
1207 static int
1208 iwn_alloc_ict(struct iwn_softc *sc)
1209 {
1210 	/* ICT table must be aligned on a 4KB boundary. */
1211 	return iwn_dma_contig_alloc(sc, &sc->ict_dma,
1212 	    (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT);
1213 }
1214 
1215 static void
1216 iwn_free_ict(struct iwn_softc *sc)
1217 {
1218 	iwn_dma_contig_free(&sc->ict_dma);
1219 }
1220 
1221 static int
1222 iwn_alloc_fwmem(struct iwn_softc *sc)
1223 {
1224 	/* Must be aligned on a 16-byte boundary. */
1225 	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1226 	    sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT);
1227 }
1228 
1229 static void
1230 iwn_free_fwmem(struct iwn_softc *sc)
1231 {
1232 	iwn_dma_contig_free(&sc->fw_dma);
1233 }
1234 
1235 static int
1236 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1237 {
1238 	bus_size_t size;
1239 	int i, error;
1240 
1241 	ring->cur = 0;
1242 
1243 	/* Allocate RX descriptors (256-byte aligned). */
1244 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1245 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1246 	    (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1247 	if (error != 0) {
1248 		device_printf(sc->sc_dev,
1249 		    "%s: could not allocate Rx ring DMA memory, error %d\n",
1250 		    __func__, error);
1251 		goto fail;
1252 	}
1253 
1254 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1255 	    BUS_SPACE_MAXADDR_32BIT,
1256 	    BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1,
1257 	    MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1258 	if (error != 0) {
1259 		device_printf(sc->sc_dev,
1260 		    "%s: bus_dma_tag_create_failed, error %d\n",
1261 		    __func__, error);
1262 		goto fail;
1263 	}
1264 
1265 	/* Allocate RX status area (16-byte aligned). */
1266 	error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1267 	    (void **)&ring->stat, sizeof (struct iwn_rx_status),
1268 	    16, BUS_DMA_NOWAIT);
1269 	if (error != 0) {
1270 		device_printf(sc->sc_dev,
1271 		    "%s: could not allocate Rx status DMA memory, error %d\n",
1272 		    __func__, error);
1273 		goto fail;
1274 	}
1275 
1276 	/*
1277 	 * Allocate and map RX buffers.
1278 	 */
1279 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1280 		struct iwn_rx_data *data = &ring->data[i];
1281 		bus_addr_t paddr;
1282 
1283 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1284 		if (error != 0) {
1285 			device_printf(sc->sc_dev,
1286 			    "%s: bus_dmamap_create failed, error %d\n",
1287 			    __func__, error);
1288 			goto fail;
1289 		}
1290 
1291 		data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1292 		if (data->m == NULL) {
1293 			device_printf(sc->sc_dev,
1294 			    "%s: could not allocate rx mbuf\n", __func__);
1295 			error = ENOMEM;
1296 			goto fail;
1297 		}
1298 
1299 		/* Map page. */
1300 		error = bus_dmamap_load(ring->data_dmat, data->map,
1301 		    mtod(data->m, caddr_t), MJUMPAGESIZE,
1302 		    iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1303 		if (error != 0 && error != EFBIG) {
1304 			device_printf(sc->sc_dev,
1305 			    "%s: bus_dmamap_load failed, error %d\n",
1306 			    __func__, error);
1307 			m_freem(data->m);
1308 			error = ENOMEM;	/* XXX unique code */
1309 			goto fail;
1310 		}
1311 		bus_dmamap_sync(ring->data_dmat, data->map,
1312 		    BUS_DMASYNC_PREWRITE);
1313 
1314 		/* Set physical address of RX buffer (256-byte aligned). */
1315 		ring->desc[i] = htole32(paddr >> 8);
1316 	}
1317 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1318 	    BUS_DMASYNC_PREWRITE);
1319 	return 0;
1320 fail:
1321 	iwn_free_rx_ring(sc, ring);
1322 	return error;
1323 }
1324 
1325 static void
1326 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1327 {
1328 	int ntries;
1329 
1330 	if (iwn_nic_lock(sc) == 0) {
1331 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1332 		for (ntries = 0; ntries < 1000; ntries++) {
1333 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1334 			    IWN_FH_RX_STATUS_IDLE)
1335 				break;
1336 			DELAY(10);
1337 		}
1338 		iwn_nic_unlock(sc);
1339 #ifdef IWN_DEBUG
1340 		if (ntries == 1000)
1341 			DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
1342 			    "timeout resetting Rx ring");
1343 #endif
1344 	}
1345 	ring->cur = 0;
1346 	sc->last_rx_valid = 0;
1347 }
1348 
1349 static void
1350 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1351 {
1352 	int i;
1353 
1354 	iwn_dma_contig_free(&ring->desc_dma);
1355 	iwn_dma_contig_free(&ring->stat_dma);
1356 
1357 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1358 		struct iwn_rx_data *data = &ring->data[i];
1359 
1360 		if (data->m != NULL) {
1361 			bus_dmamap_sync(ring->data_dmat, data->map,
1362 			    BUS_DMASYNC_POSTREAD);
1363 			bus_dmamap_unload(ring->data_dmat, data->map);
1364 			m_freem(data->m);
1365 		}
1366 		if (data->map != NULL)
1367 			bus_dmamap_destroy(ring->data_dmat, data->map);
1368 	}
1369 }
1370 
1371 static int
1372 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1373 {
1374 	bus_size_t size;
1375 	bus_addr_t paddr;
1376 	int i, error;
1377 
1378 	ring->qid = qid;
1379 	ring->queued = 0;
1380 	ring->cur = 0;
1381 
1382 	/* Allocate TX descriptors (256-byte aligned.) */
1383 	size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc);
1384 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1385 	    (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1386 	if (error != 0) {
1387 		device_printf(sc->sc_dev,
1388 		    "%s: could not allocate TX ring DMA memory, error %d\n",
1389 		    __func__, error);
1390 		goto fail;
1391 	}
1392 
1393 	/*
1394 	 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1395 	 * to allocate commands space for other rings.
1396 	 */
1397 	if (qid > 4)
1398 		return 0;
1399 
1400 	size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd);
1401 	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma,
1402 	    (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT);
1403 	if (error != 0) {
1404 		device_printf(sc->sc_dev,
1405 		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1406 		    __func__, error);
1407 		goto fail;
1408 	}
1409 
1410 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1411 	    BUS_SPACE_MAXADDR_32BIT,
1412 	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1,
1413 	    MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1414 	if (error != 0) {
1415 		device_printf(sc->sc_dev,
1416 		    "%s: bus_dma_tag_create_failed, error %d\n",
1417 		    __func__, error);
1418 		goto fail;
1419 	}
1420 
1421 	paddr = ring->cmd_dma.paddr;
1422 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1423 		struct iwn_tx_data *data = &ring->data[i];
1424 
1425 		data->cmd_paddr = paddr;
1426 		data->scratch_paddr = paddr + 12;
1427 		paddr += sizeof (struct iwn_tx_cmd);
1428 
1429 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1430 		if (error != 0) {
1431 			device_printf(sc->sc_dev,
1432 			    "%s: bus_dmamap_create failed, error %d\n",
1433 			    __func__, error);
1434 			goto fail;
1435 		}
1436 		bus_dmamap_sync(ring->data_dmat, data->map,
1437 		    BUS_DMASYNC_PREWRITE);
1438 	}
1439 	return 0;
1440 fail:
1441 	iwn_free_tx_ring(sc, ring);
1442 	return error;
1443 }
1444 
1445 static void
1446 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1447 {
1448 	int i;
1449 
1450 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1451 		struct iwn_tx_data *data = &ring->data[i];
1452 
1453 		if (data->m != NULL) {
1454 			bus_dmamap_unload(ring->data_dmat, data->map);
1455 			m_freem(data->m);
1456 			data->m = NULL;
1457 		}
1458 	}
1459 	/* Clear TX descriptors. */
1460 	memset(ring->desc, 0, ring->desc_dma.size);
1461 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1462 	    BUS_DMASYNC_PREWRITE);
1463 	sc->qfullmsk &= ~(1 << ring->qid);
1464 	ring->queued = 0;
1465 	ring->cur = 0;
1466 }
1467 
1468 static void
1469 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1470 {
1471 	int i;
1472 
1473 	iwn_dma_contig_free(&ring->desc_dma);
1474 	iwn_dma_contig_free(&ring->cmd_dma);
1475 
1476 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1477 		struct iwn_tx_data *data = &ring->data[i];
1478 
1479 		if (data->m != NULL) {
1480 			bus_dmamap_sync(ring->data_dmat, data->map,
1481 			    BUS_DMASYNC_POSTWRITE);
1482 			bus_dmamap_unload(ring->data_dmat, data->map);
1483 			m_freem(data->m);
1484 		}
1485 		if (data->map != NULL)
1486 			bus_dmamap_destroy(ring->data_dmat, data->map);
1487 	}
1488 }
1489 
1490 static void
1491 iwn5000_ict_reset(struct iwn_softc *sc)
1492 {
1493 	/* Disable interrupts. */
1494 	IWN_WRITE(sc, IWN_INT_MASK, 0);
1495 
1496 	/* Reset ICT table. */
1497 	memset(sc->ict, 0, IWN_ICT_SIZE);
1498 	sc->ict_cur = 0;
1499 
1500 	/* Set physical address of ICT table (4KB aligned.) */
1501 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1502 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1503 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1504 
1505 	/* Enable periodic RX interrupt. */
1506 	sc->int_mask |= IWN_INT_RX_PERIODIC;
1507 	/* Switch to ICT interrupt mode in driver. */
1508 	sc->sc_flags |= IWN_FLAG_USE_ICT;
1509 
1510 	/* Re-enable interrupts. */
1511 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1512 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1513 }
1514 
1515 static int
1516 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1517 {
1518 	const struct iwn_hal *hal = sc->sc_hal;
1519 	int error;
1520 	uint16_t val;
1521 
1522 	/* Check whether adapter has an EEPROM or an OTPROM. */
1523 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1524 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1525 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1526 	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1527 	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1528 
1529 	/* Adapter has to be powered on for EEPROM access to work. */
1530 	error = iwn_apm_init(sc);
1531 	if (error != 0) {
1532 		device_printf(sc->sc_dev,
1533 		    "%s: could not power ON adapter, error %d\n",
1534 		    __func__, error);
1535 		return error;
1536 	}
1537 
1538 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1539 		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1540 		return EIO;
1541 	}
1542 	error = iwn_eeprom_lock(sc);
1543 	if (error != 0) {
1544 		device_printf(sc->sc_dev,
1545 		    "%s: could not lock ROM, error %d\n",
1546 		    __func__, error);
1547 		return error;
1548 	}
1549 
1550 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1551 		error = iwn_init_otprom(sc);
1552 		if (error != 0) {
1553 			device_printf(sc->sc_dev,
1554 			    "%s: could not initialize OTPROM, error %d\n",
1555 			    __func__, error);
1556 			return error;
1557 		}
1558 	}
1559 
1560 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1561 	sc->rfcfg = le16toh(val);
1562 	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1563 
1564 	/* Read MAC address. */
1565 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1566 
1567 	/* Read adapter-specific information from EEPROM. */
1568 	hal->read_eeprom(sc);
1569 
1570 	iwn_apm_stop(sc);	/* Power OFF adapter. */
1571 
1572 	iwn_eeprom_unlock(sc);
1573 	return 0;
1574 }
1575 
1576 static void
1577 iwn4965_read_eeprom(struct iwn_softc *sc)
1578 {
1579 	uint32_t addr;
1580 	int i;
1581 	uint16_t val;
1582 
1583 	/* Read regulatory domain (4 ASCII characters.) */
1584 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1585 
1586 	/* Read the list of authorized channels (20MHz ones only.) */
1587 	for (i = 0; i < 5; i++) {
1588 		addr = iwn4965_regulatory_bands[i];
1589 		iwn_read_eeprom_channels(sc, i, addr);
1590 	}
1591 
1592 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1593 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1594 	sc->maxpwr2GHz = val & 0xff;
1595 	sc->maxpwr5GHz = val >> 8;
1596 	/* Check that EEPROM values are within valid range. */
1597 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1598 		sc->maxpwr5GHz = 38;
1599 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1600 		sc->maxpwr2GHz = 38;
1601 	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1602 	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1603 
1604 	/* Read samples for each TX power group. */
1605 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1606 	    sizeof sc->bands);
1607 
1608 	/* Read voltage at which samples were taken. */
1609 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1610 	sc->eeprom_voltage = (int16_t)le16toh(val);
1611 	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1612 	    sc->eeprom_voltage);
1613 
1614 #ifdef IWN_DEBUG
1615 	/* Print samples. */
1616 	if (sc->sc_debug & IWN_DEBUG_ANY) {
1617 		for (i = 0; i < IWN_NBANDS; i++)
1618 			iwn4965_print_power_group(sc, i);
1619 	}
1620 #endif
1621 }
1622 
1623 #ifdef IWN_DEBUG
1624 static void
1625 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1626 {
1627 	struct iwn4965_eeprom_band *band = &sc->bands[i];
1628 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1629 	int j, c;
1630 
1631 	printf("===band %d===\n", i);
1632 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1633 	printf("chan1 num=%d\n", chans[0].num);
1634 	for (c = 0; c < 2; c++) {
1635 		for (j = 0; j < IWN_NSAMPLES; j++) {
1636 			printf("chain %d, sample %d: temp=%d gain=%d "
1637 			    "power=%d pa_det=%d\n", c, j,
1638 			    chans[0].samples[c][j].temp,
1639 			    chans[0].samples[c][j].gain,
1640 			    chans[0].samples[c][j].power,
1641 			    chans[0].samples[c][j].pa_det);
1642 		}
1643 	}
1644 	printf("chan2 num=%d\n", chans[1].num);
1645 	for (c = 0; c < 2; c++) {
1646 		for (j = 0; j < IWN_NSAMPLES; j++) {
1647 			printf("chain %d, sample %d: temp=%d gain=%d "
1648 			    "power=%d pa_det=%d\n", c, j,
1649 			    chans[1].samples[c][j].temp,
1650 			    chans[1].samples[c][j].gain,
1651 			    chans[1].samples[c][j].power,
1652 			    chans[1].samples[c][j].pa_det);
1653 		}
1654 	}
1655 }
1656 #endif
1657 
1658 static void
1659 iwn5000_read_eeprom(struct iwn_softc *sc)
1660 {
1661 	struct iwn5000_eeprom_calib_hdr hdr;
1662 	int32_t temp, volt;
1663 	uint32_t addr, base;
1664 	int i;
1665 	uint16_t val;
1666 
1667 	/* Read regulatory domain (4 ASCII characters.) */
1668 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1669 	base = le16toh(val);
1670 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1671 	    sc->eeprom_domain, 4);
1672 
1673 	/* Read the list of authorized channels (20MHz ones only.) */
1674 	for (i = 0; i < 5; i++) {
1675 		addr = base + iwn5000_regulatory_bands[i];
1676 		iwn_read_eeprom_channels(sc, i, addr);
1677 	}
1678 
1679 	/* Read enhanced TX power information for 6000 Series. */
1680 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1681 		iwn_read_eeprom_enhinfo(sc);
1682 
1683 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1684 	base = le16toh(val);
1685 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1686 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1687 	    "%s: calib version=%u pa type=%u voltage=%u\n",
1688 	    __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt));
1689 	sc->calib_ver = hdr.version;
1690 
1691 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1692 		/* Compute temperature offset. */
1693 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1694 		temp = le16toh(val);
1695 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1696 		volt = le16toh(val);
1697 		sc->temp_off = temp - (volt / -5);
1698 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1699 		    temp, volt, sc->temp_off);
1700 	} else {
1701 		/* Read crystal calibration. */
1702 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1703 		    &sc->eeprom_crystal, sizeof (uint32_t));
1704 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1705 		le32toh(sc->eeprom_crystal));
1706 	}
1707 }
1708 
1709 /*
1710  * Translate EEPROM flags to net80211.
1711  */
1712 static uint32_t
1713 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1714 {
1715 	uint32_t nflags;
1716 
1717 	nflags = 0;
1718 	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1719 		nflags |= IEEE80211_CHAN_PASSIVE;
1720 	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1721 		nflags |= IEEE80211_CHAN_NOADHOC;
1722 	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1723 		nflags |= IEEE80211_CHAN_DFS;
1724 		/* XXX apparently IBSS may still be marked */
1725 		nflags |= IEEE80211_CHAN_NOADHOC;
1726 	}
1727 
1728 	return nflags;
1729 }
1730 
1731 static void
1732 iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1733 {
1734 	struct ifnet *ifp = sc->sc_ifp;
1735 	struct ieee80211com *ic = ifp->if_l2com;
1736 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1737 	const struct iwn_chan_band *band = &iwn_bands[n];
1738 	struct ieee80211_channel *c;
1739 	int i, chan, nflags;
1740 
1741 	for (i = 0; i < band->nchan; i++) {
1742 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1743 			DPRINTF(sc, IWN_DEBUG_RESET,
1744 			    "skip chan %d flags 0x%x maxpwr %d\n",
1745 			    band->chan[i], channels[i].flags,
1746 			    channels[i].maxpwr);
1747 			continue;
1748 		}
1749 		chan = band->chan[i];
1750 		nflags = iwn_eeprom_channel_flags(&channels[i]);
1751 
1752 		DPRINTF(sc, IWN_DEBUG_RESET,
1753 		    "add chan %d flags 0x%x maxpwr %d\n",
1754 		    chan, channels[i].flags, channels[i].maxpwr);
1755 
1756 		c = &ic->ic_channels[ic->ic_nchans++];
1757 		c->ic_ieee = chan;
1758 		c->ic_maxregpower = channels[i].maxpwr;
1759 		c->ic_maxpower = 2*c->ic_maxregpower;
1760 
1761 		/* Save maximum allowed TX power for this channel. */
1762 		sc->maxpwr[chan] = channels[i].maxpwr;
1763 
1764 		if (n == 0) {	/* 2GHz band */
1765 			c->ic_freq = ieee80211_ieee2mhz(chan,
1766 			    IEEE80211_CHAN_G);
1767 
1768 			/* G =>'s B is supported */
1769 			c->ic_flags = IEEE80211_CHAN_B | nflags;
1770 
1771 			c = &ic->ic_channels[ic->ic_nchans++];
1772 			c[0] = c[-1];
1773 			c->ic_flags = IEEE80211_CHAN_G | nflags;
1774 		} else {	/* 5GHz band */
1775 			c->ic_freq = ieee80211_ieee2mhz(chan,
1776 			    IEEE80211_CHAN_A);
1777 			c->ic_flags = IEEE80211_CHAN_A | nflags;
1778 			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1779 		}
1780 #if 0	/* HT */
1781 		/* XXX no constraints on using HT20 */
1782 		/* add HT20, HT40 added separately */
1783 		c = &ic->ic_channels[ic->ic_nchans++];
1784 		c[0] = c[-1];
1785 		c->ic_flags |= IEEE80211_CHAN_HT20;
1786 		/* XXX NARROW =>'s 1/2 and 1/4 width? */
1787 #endif
1788 	}
1789 }
1790 
1791 #if 0	/* HT */
1792 static void
1793 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1794 {
1795 	struct ifnet *ifp = sc->sc_ifp;
1796 	struct ieee80211com *ic = ifp->if_l2com;
1797 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1798 	const struct iwn_chan_band *band = &iwn_bands[n];
1799 	struct ieee80211_channel *c, *cent, *extc;
1800 	int i;
1801 
1802 	for (i = 0; i < band->nchan; i++) {
1803 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1804 		    !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1805 			DPRINTF(sc, IWN_DEBUG_RESET,
1806 			    "skip chan %d flags 0x%x maxpwr %d\n",
1807 			    band->chan[i], channels[i].flags,
1808 			    channels[i].maxpwr);
1809 			continue;
1810 		}
1811 		/*
1812 		 * Each entry defines an HT40 channel pair; find the
1813 		 * center channel, then the extension channel above.
1814 		 */
1815 		cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1816 		    band->flags & ~IEEE80211_CHAN_HT);
1817 		if (cent == NULL) {	/* XXX shouldn't happen */
1818 			device_printf(sc->sc_dev,
1819 			    "%s: no entry for channel %d\n",
1820 			    __func__, band->chan[i]);
1821 			continue;
1822 		}
1823 		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1824 		    band->flags & ~IEEE80211_CHAN_HT);
1825 		if (extc == NULL) {
1826 			DPRINTF(sc, IWN_DEBUG_RESET,
1827 			    "skip chan %d, extension channel not found\n",
1828 			    band->chan[i]);
1829 			continue;
1830 		}
1831 
1832 		DPRINTF(sc, IWN_DEBUG_RESET,
1833 		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
1834 		    band->chan[i], channels[i].flags, channels[i].maxpwr);
1835 
1836 		c = &ic->ic_channels[ic->ic_nchans++];
1837 		c[0] = cent[0];
1838 		c->ic_extieee = extc->ic_ieee;
1839 		c->ic_flags &= ~IEEE80211_CHAN_HT;
1840 		c->ic_flags |= IEEE80211_CHAN_HT40U;
1841 		c = &ic->ic_channels[ic->ic_nchans++];
1842 		c[0] = extc[0];
1843 		c->ic_extieee = cent->ic_ieee;
1844 		c->ic_flags &= ~IEEE80211_CHAN_HT;
1845 		c->ic_flags |= IEEE80211_CHAN_HT40D;
1846 	}
1847 }
1848 #endif
1849 
1850 static void
1851 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1852 {
1853 	struct ifnet *ifp = sc->sc_ifp;
1854 	struct ieee80211com *ic = ifp->if_l2com;
1855 
1856 	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1857 	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1858 
1859 	if (n < 5)
1860 		iwn_read_eeprom_band(sc, n);
1861 #if 0	/* HT */
1862 	else
1863 		iwn_read_eeprom_ht40(sc, n);
1864 #endif
1865 	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1866 }
1867 
1868 #define nitems(_a)	(sizeof((_a)) / sizeof((_a)[0]))
1869 
1870 static void
1871 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1872 {
1873 	struct iwn_eeprom_enhinfo enhinfo[35];
1874 	uint16_t val, base;
1875 	int8_t maxpwr;
1876 	int i;
1877 
1878 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1879 	base = le16toh(val);
1880 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1881 	    enhinfo, sizeof enhinfo);
1882 
1883 	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1884 	for (i = 0; i < nitems(enhinfo); i++) {
1885 		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1886 			continue;	/* Skip invalid entries. */
1887 
1888 		maxpwr = 0;
1889 		if (sc->txchainmask & IWN_ANT_A)
1890 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1891 		if (sc->txchainmask & IWN_ANT_B)
1892 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1893 		if (sc->txchainmask & IWN_ANT_C)
1894 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1895 		if (sc->ntxchains == 2)
1896 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1897 		else if (sc->ntxchains == 3)
1898 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1899 		maxpwr /= 2;	/* Convert half-dBm to dBm. */
1900 
1901 		DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
1902 		    maxpwr);
1903 		sc->enh_maxpwr[i] = maxpwr;
1904 	}
1905 }
1906 
1907 static struct ieee80211_node *
1908 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1909 {
1910 	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
1911 }
1912 
1913 static void
1914 iwn_newassoc(struct ieee80211_node *ni, int isnew)
1915 {
1916 	/* XXX move */
1917 	ieee80211_ratectl_node_init(ni);
1918 }
1919 
1920 static int
1921 iwn_media_change(struct ifnet *ifp)
1922 {
1923 	int error = ieee80211_media_change(ifp);
1924 	/* NB: only the fixed rate can change and that doesn't need a reset */
1925 	return (error == ENETRESET ? 0 : error);
1926 }
1927 
1928 static int
1929 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1930 {
1931 	struct iwn_vap *ivp = IWN_VAP(vap);
1932 	struct ieee80211com *ic = vap->iv_ic;
1933 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
1934 	int error;
1935 
1936 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1937 		ieee80211_state_name[vap->iv_state],
1938 		ieee80211_state_name[nstate]);
1939 
1940 	IEEE80211_UNLOCK(ic);
1941 	IWN_LOCK(sc);
1942 	callout_stop(&sc->sc_timer_to);
1943 
1944 	switch (nstate) {
1945 	case IEEE80211_S_ASSOC:
1946 		if (vap->iv_state != IEEE80211_S_RUN)
1947 			break;
1948 		/* FALLTHROUGH */
1949 	case IEEE80211_S_AUTH:
1950 		if (vap->iv_state == IEEE80211_S_AUTH)
1951 			break;
1952 
1953 		/*
1954 		 * !AUTH -> AUTH transition requires state reset to handle
1955 		 * reassociations correctly.
1956 		 */
1957 		sc->rxon.associd = 0;
1958 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1959 		iwn_calib_reset(sc);
1960 		error = iwn_auth(sc, vap);
1961 		break;
1962 
1963 	case IEEE80211_S_RUN:
1964 		/*
1965 		 * RUN -> RUN transition; Just restart the timers.
1966 		 */
1967 		if (vap->iv_state == IEEE80211_S_RUN) {
1968 			iwn_calib_reset(sc);
1969 			break;
1970 		}
1971 
1972 		/*
1973 		 * !RUN -> RUN requires setting the association id
1974 		 * which is done with a firmware cmd.  We also defer
1975 		 * starting the timers until that work is done.
1976 		 */
1977 		error = iwn_run(sc, vap);
1978 		break;
1979 
1980 	default:
1981 		break;
1982 	}
1983 	IWN_UNLOCK(sc);
1984 	IEEE80211_LOCK(ic);
1985 	return ivp->iv_newstate(vap, nstate, arg);
1986 }
1987 
1988 /*
1989  * Process an RX_PHY firmware notification.  This is usually immediately
1990  * followed by an MPDU_RX_DONE notification.
1991  */
1992 static void
1993 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1994     struct iwn_rx_data *data)
1995 {
1996 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1997 
1998 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
1999 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2000 
2001 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2002 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2003 	sc->last_rx_valid = 1;
2004 }
2005 
2006 static void
2007 iwn_timer_timeout(void *arg)
2008 {
2009 	struct iwn_softc *sc = arg;
2010 	uint32_t flags = 0;
2011 
2012 	IWN_LOCK_ASSERT(sc);
2013 
2014 	if (sc->calib_cnt && --sc->calib_cnt == 0) {
2015 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2016 		    "send statistics request");
2017 		(void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2018 		    sizeof flags, 1);
2019 		sc->calib_cnt = 60;	/* do calibration every 60s */
2020 	}
2021 	iwn_watchdog(sc);		/* NB: piggyback tx watchdog */
2022 	callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2023 }
2024 
2025 static void
2026 iwn_calib_reset(struct iwn_softc *sc)
2027 {
2028 	callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2029 	sc->calib_cnt = 60;		/* do calibration every 60s */
2030 }
2031 
2032 /*
2033  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2034  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2035  */
2036 static void
2037 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2038     struct iwn_rx_data *data)
2039 {
2040 	const struct iwn_hal *hal = sc->sc_hal;
2041 	struct ifnet *ifp = sc->sc_ifp;
2042 	struct ieee80211com *ic = ifp->if_l2com;
2043 	struct iwn_rx_ring *ring = &sc->rxq;
2044 	struct ieee80211_frame *wh;
2045 	struct ieee80211_node *ni;
2046 	struct mbuf *m, *m1;
2047 	struct iwn_rx_stat *stat;
2048 	caddr_t head;
2049 	bus_addr_t paddr;
2050 	uint32_t flags;
2051 	int error, len, rssi, nf;
2052 
2053 	if (desc->type == IWN_MPDU_RX_DONE) {
2054 		/* Check for prior RX_PHY notification. */
2055 		if (!sc->last_rx_valid) {
2056 			DPRINTF(sc, IWN_DEBUG_ANY,
2057 			    "%s: missing RX_PHY\n", __func__);
2058 			ifp->if_ierrors++;
2059 			return;
2060 		}
2061 		sc->last_rx_valid = 0;
2062 		stat = &sc->last_rx_stat;
2063 	} else
2064 		stat = (struct iwn_rx_stat *)(desc + 1);
2065 
2066 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2067 
2068 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2069 		device_printf(sc->sc_dev,
2070 		    "%s: invalid rx statistic header, len %d\n",
2071 		    __func__, stat->cfg_phy_len);
2072 		ifp->if_ierrors++;
2073 		return;
2074 	}
2075 	if (desc->type == IWN_MPDU_RX_DONE) {
2076 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2077 		head = (caddr_t)(mpdu + 1);
2078 		len = le16toh(mpdu->len);
2079 	} else {
2080 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2081 		len = le16toh(stat->len);
2082 	}
2083 
2084 	flags = le32toh(*(uint32_t *)(head + len));
2085 
2086 	/* Discard frames with a bad FCS early. */
2087 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2088 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n",
2089 		    __func__, flags);
2090 		ifp->if_ierrors++;
2091 		return;
2092 	}
2093 	/* Discard frames that are too short. */
2094 	if (len < sizeof (*wh)) {
2095 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2096 		    __func__, len);
2097 		ifp->if_ierrors++;
2098 		return;
2099 	}
2100 
2101 	/* XXX don't need mbuf, just dma buffer */
2102 	m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
2103 	if (m1 == NULL) {
2104 		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2105 		    __func__);
2106 		ifp->if_ierrors++;
2107 		return;
2108 	}
2109 	bus_dmamap_unload(ring->data_dmat, data->map);
2110 
2111 	error = bus_dmamap_load(ring->data_dmat, data->map,
2112 	    mtod(m1, caddr_t), MJUMPAGESIZE,
2113 	    iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2114 	if (error != 0 && error != EFBIG) {
2115 		device_printf(sc->sc_dev,
2116 		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2117 		m_freem(m1);
2118 		ifp->if_ierrors++;
2119 		return;
2120 	}
2121 
2122 	m = data->m;
2123 	data->m = m1;
2124 	/* Update RX descriptor. */
2125 	ring->desc[ring->cur] = htole32(paddr >> 8);
2126 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2127 	    BUS_DMASYNC_PREWRITE);
2128 
2129 	/* Finalize mbuf. */
2130 	m->m_pkthdr.rcvif = ifp;
2131 	m->m_data = head;
2132 	m->m_pkthdr.len = m->m_len = len;
2133 
2134 	rssi = hal->get_rssi(sc, stat);
2135 
2136 	/* Grab a reference to the source node. */
2137 	wh = mtod(m, struct ieee80211_frame *);
2138 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2139 	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2140 	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2141 
2142 	if (ieee80211_radiotap_active(ic)) {
2143 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2144 
2145 		tap->wr_tsft = htole64(stat->tstamp);
2146 		tap->wr_flags = 0;
2147 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2148 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2149 		switch (stat->rate) {
2150 		/* CCK rates. */
2151 		case  10: tap->wr_rate =   2; break;
2152 		case  20: tap->wr_rate =   4; break;
2153 		case  55: tap->wr_rate =  11; break;
2154 		case 110: tap->wr_rate =  22; break;
2155 		/* OFDM rates. */
2156 		case 0xd: tap->wr_rate =  12; break;
2157 		case 0xf: tap->wr_rate =  18; break;
2158 		case 0x5: tap->wr_rate =  24; break;
2159 		case 0x7: tap->wr_rate =  36; break;
2160 		case 0x9: tap->wr_rate =  48; break;
2161 		case 0xb: tap->wr_rate =  72; break;
2162 		case 0x1: tap->wr_rate =  96; break;
2163 		case 0x3: tap->wr_rate = 108; break;
2164 		/* Unknown rate: should not happen. */
2165 		default:  tap->wr_rate =   0;
2166 		}
2167 		tap->wr_dbm_antsignal = rssi;
2168 		tap->wr_dbm_antnoise = nf;
2169 	}
2170 
2171 	IWN_UNLOCK(sc);
2172 
2173 	/* Send the frame to the 802.11 layer. */
2174 	if (ni != NULL) {
2175 		(void) ieee80211_input(ni, m, rssi - nf, nf);
2176 		/* Node is no longer needed. */
2177 		ieee80211_free_node(ni);
2178 	} else
2179 		(void) ieee80211_input_all(ic, m, rssi - nf, nf);
2180 
2181 	IWN_LOCK(sc);
2182 }
2183 
2184 #if 0	/* HT */
2185 /* Process an incoming Compressed BlockAck. */
2186 static void
2187 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2188     struct iwn_rx_data *data)
2189 {
2190 	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2191 	struct iwn_tx_ring *txq;
2192 
2193 	txq = &sc->txq[letoh16(ba->qid)];
2194 	/* XXX TBD */
2195 }
2196 #endif
2197 
2198 /*
2199  * Process a CALIBRATION_RESULT notification sent by the initialization
2200  * firmware on response to a CMD_CALIB_CONFIG command (5000 only.)
2201  */
2202 static void
2203 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2204     struct iwn_rx_data *data)
2205 {
2206 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2207 	int len, idx = -1;
2208 
2209 	/* Runtime firmware should not send such a notification. */
2210 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2211 		return;
2212 
2213 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2214 	len = (le32toh(desc->len) & 0x3fff) - 4;
2215 
2216 	switch (calib->code) {
2217 	case IWN5000_PHY_CALIB_DC:
2218 		if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2219 		    sc->hw_type == IWN_HW_REV_TYPE_6050)
2220 			idx = 0;
2221 		break;
2222 	case IWN5000_PHY_CALIB_LO:
2223 		idx = 1;
2224 		break;
2225 	case IWN5000_PHY_CALIB_TX_IQ:
2226 		idx = 2;
2227 		break;
2228 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2229 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2230 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2231 			idx = 3;
2232 		break;
2233 	case IWN5000_PHY_CALIB_BASE_BAND:
2234 		idx = 4;
2235 		break;
2236 	}
2237 	if (idx == -1)	/* Ignore other results. */
2238 		return;
2239 
2240 	/* Save calibration result. */
2241 	if (sc->calibcmd[idx].buf != NULL)
2242 		free(sc->calibcmd[idx].buf, M_DEVBUF);
2243 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2244 	if (sc->calibcmd[idx].buf == NULL) {
2245 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2246 		    "not enough memory for calibration result %d\n",
2247 		    calib->code);
2248 		return;
2249 	}
2250 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2251 	    "saving calibration result code=%d len=%d\n", calib->code, len);
2252 	sc->calibcmd[idx].len = len;
2253 	memcpy(sc->calibcmd[idx].buf, calib, len);
2254 }
2255 
2256 /*
2257  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2258  * The latter is sent by the firmware after each received beacon.
2259  */
2260 static void
2261 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2262     struct iwn_rx_data *data)
2263 {
2264 	const struct iwn_hal *hal = sc->sc_hal;
2265 	struct ifnet *ifp = sc->sc_ifp;
2266 	struct ieee80211com *ic = ifp->if_l2com;
2267 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2268 	struct iwn_calib_state *calib = &sc->calib;
2269 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2270 	int temp;
2271 
2272 	/* Beacon stats are meaningful only when associated and not scanning. */
2273 	if (vap->iv_state != IEEE80211_S_RUN ||
2274 	    (ic->ic_flags & IEEE80211_F_SCAN))
2275 		return;
2276 
2277 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2278 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type);
2279 	iwn_calib_reset(sc);	/* Reset TX power calibration timeout. */
2280 
2281 	/* Test if temperature has changed. */
2282 	if (stats->general.temp != sc->rawtemp) {
2283 		/* Convert "raw" temperature to degC. */
2284 		sc->rawtemp = stats->general.temp;
2285 		temp = hal->get_temperature(sc);
2286 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2287 		    __func__, temp);
2288 
2289 		/* Update TX power if need be (4965AGN only.) */
2290 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2291 			iwn4965_power_calibration(sc, temp);
2292 	}
2293 
2294 	if (desc->type != IWN_BEACON_STATISTICS)
2295 		return;	/* Reply to a statistics request. */
2296 
2297 	sc->noise = iwn_get_noise(&stats->rx.general);
2298 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2299 
2300 	/* Test that RSSI and noise are present in stats report. */
2301 	if (le32toh(stats->rx.general.flags) != 1) {
2302 		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2303 		    "received statistics without RSSI");
2304 		return;
2305 	}
2306 
2307 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2308 		iwn_collect_noise(sc, &stats->rx.general);
2309 	else if (calib->state == IWN_CALIB_STATE_RUN)
2310 		iwn_tune_sensitivity(sc, &stats->rx);
2311 }
2312 
2313 /*
2314  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2315  * and 5000 adapters have different incompatible TX status formats.
2316  */
2317 static void
2318 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2319     struct iwn_rx_data *data)
2320 {
2321 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2322 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2323 
2324 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2325 	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2326 	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2327 	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2328 	    le32toh(stat->status));
2329 
2330 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2331 	iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2332 }
2333 
2334 static void
2335 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2336     struct iwn_rx_data *data)
2337 {
2338 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2339 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2340 
2341 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2342 	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2343 	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2344 	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2345 	    le32toh(stat->status));
2346 
2347 #ifdef notyet
2348 	/* Reset TX scheduler slot. */
2349 	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2350 #endif
2351 
2352 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2353 	iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2354 }
2355 
2356 /*
2357  * Adapter-independent backend for TX_DONE firmware notifications.
2358  */
2359 static void
2360 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2361     uint8_t status)
2362 {
2363 	struct ifnet *ifp = sc->sc_ifp;
2364 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2365 	struct iwn_tx_data *data = &ring->data[desc->idx];
2366 	struct mbuf *m;
2367 	struct ieee80211_node *ni;
2368 	struct ieee80211vap *vap;
2369 
2370 	KASSERT(data->ni != NULL, ("no node"));
2371 
2372 	/* Unmap and free mbuf. */
2373 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2374 	bus_dmamap_unload(ring->data_dmat, data->map);
2375 	m = data->m, data->m = NULL;
2376 	ni = data->ni, data->ni = NULL;
2377 	vap = ni->ni_vap;
2378 
2379 	if (m->m_flags & M_TXCB) {
2380 		/*
2381 		 * Channels marked for "radar" require traffic to be received
2382 		 * to unlock before we can transmit.  Until traffic is seen
2383 		 * any attempt to transmit is returned immediately with status
2384 		 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2385 		 * happen on first authenticate after scanning.  To workaround
2386 		 * this we ignore a failure of this sort in AUTH state so the
2387 		 * 802.11 layer will fall back to using a timeout to wait for
2388 		 * the AUTH reply.  This allows the firmware time to see
2389 		 * traffic so a subsequent retry of AUTH succeeds.  It's
2390 		 * unclear why the firmware does not maintain state for
2391 		 * channels recently visited as this would allow immediate
2392 		 * use of the channel after a scan (where we see traffic).
2393 		 */
2394 		if (status == IWN_TX_FAIL_TX_LOCKED &&
2395 		    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2396 			ieee80211_process_callback(ni, m, 0);
2397 		else
2398 			ieee80211_process_callback(ni, m,
2399 			    (status & IWN_TX_FAIL) != 0);
2400 	}
2401 
2402 	/*
2403 	 * Update rate control statistics for the node.
2404 	 */
2405 	if (status & 0x80) {
2406 		ifp->if_oerrors++;
2407 		ieee80211_ratectl_tx_complete(vap, ni,
2408 		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2409 	} else {
2410 		ieee80211_ratectl_tx_complete(vap, ni,
2411 		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2412 	}
2413 	m_freem(m);
2414 	ieee80211_free_node(ni);
2415 
2416 	sc->sc_tx_timer = 0;
2417 	if (--ring->queued < IWN_TX_RING_LOMARK) {
2418 		sc->qfullmsk &= ~(1 << ring->qid);
2419 		if (sc->qfullmsk == 0 &&
2420 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2421 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2422 			iwn_start_locked(ifp);
2423 		}
2424 	}
2425 }
2426 
2427 /*
2428  * Process a "command done" firmware notification.  This is where we wakeup
2429  * processes waiting for a synchronous command completion.
2430  */
2431 static void
2432 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2433 {
2434 	struct iwn_tx_ring *ring = &sc->txq[4];
2435 	struct iwn_tx_data *data;
2436 
2437 	if ((desc->qid & 0xf) != 4)
2438 		return;	/* Not a command ack. */
2439 
2440 	data = &ring->data[desc->idx];
2441 
2442 	/* If the command was mapped in an mbuf, free it. */
2443 	if (data->m != NULL) {
2444 		bus_dmamap_unload(ring->data_dmat, data->map);
2445 		m_freem(data->m);
2446 		data->m = NULL;
2447 	}
2448 	wakeup(&ring->desc[desc->idx]);
2449 }
2450 
2451 /*
2452  * Process an INT_FH_RX or INT_SW_RX interrupt.
2453  */
2454 static void
2455 iwn_notif_intr(struct iwn_softc *sc)
2456 {
2457 	struct ifnet *ifp = sc->sc_ifp;
2458 	struct ieee80211com *ic = ifp->if_l2com;
2459 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2460 	uint16_t hw;
2461 
2462 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2463 	    BUS_DMASYNC_POSTREAD);
2464 
2465 	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2466 	while (sc->rxq.cur != hw) {
2467 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2468 		struct iwn_rx_desc *desc;
2469 
2470 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2471 		    BUS_DMASYNC_POSTREAD);
2472 		desc = mtod(data->m, struct iwn_rx_desc *);
2473 
2474 		DPRINTF(sc, IWN_DEBUG_RECV,
2475 		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2476 		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
2477 		    desc->type, iwn_intr_str(desc->type),
2478 		    le16toh(desc->len));
2479 
2480 		if (!(desc->qid & 0x80))	/* Reply to a command. */
2481 			iwn_cmd_done(sc, desc);
2482 
2483 		switch (desc->type) {
2484 		case IWN_RX_PHY:
2485 			iwn_rx_phy(sc, desc, data);
2486 			break;
2487 
2488 		case IWN_RX_DONE:		/* 4965AGN only. */
2489 		case IWN_MPDU_RX_DONE:
2490 			/* An 802.11 frame has been received. */
2491 			iwn_rx_done(sc, desc, data);
2492 			break;
2493 
2494 #if 0	/* HT */
2495 		case IWN_RX_COMPRESSED_BA:
2496 			/* A Compressed BlockAck has been received. */
2497 			iwn_rx_compressed_ba(sc, desc, data);
2498 			break;
2499 #endif
2500 
2501 		case IWN_TX_DONE:
2502 			/* An 802.11 frame has been transmitted. */
2503 			sc->sc_hal->tx_done(sc, desc, data);
2504 			break;
2505 
2506 		case IWN_RX_STATISTICS:
2507 		case IWN_BEACON_STATISTICS:
2508 			iwn_rx_statistics(sc, desc, data);
2509 			break;
2510 
2511 		case IWN_BEACON_MISSED:
2512 		{
2513 			struct iwn_beacon_missed *miss =
2514 			    (struct iwn_beacon_missed *)(desc + 1);
2515 			int misses;
2516 
2517 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2518 			    BUS_DMASYNC_POSTREAD);
2519 			misses = le32toh(miss->consecutive);
2520 
2521 			/* XXX not sure why we're notified w/ zero */
2522 			if (misses == 0)
2523 				break;
2524 			DPRINTF(sc, IWN_DEBUG_STATE,
2525 			    "%s: beacons missed %d/%d\n", __func__,
2526 			    misses, le32toh(miss->total));
2527 
2528 			/*
2529 			 * If more than 5 consecutive beacons are missed,
2530 			 * reinitialize the sensitivity state machine.
2531 			 */
2532 			if (vap->iv_state == IEEE80211_S_RUN && misses > 5)
2533 				(void) iwn_init_sensitivity(sc);
2534 			if (misses >= vap->iv_bmissthreshold) {
2535 				IWN_UNLOCK(sc);
2536 				ieee80211_beacon_miss(ic);
2537 				IWN_LOCK(sc);
2538 			}
2539 			break;
2540 		}
2541 		case IWN_UC_READY:
2542 		{
2543 			struct iwn_ucode_info *uc =
2544 			    (struct iwn_ucode_info *)(desc + 1);
2545 
2546 			/* The microcontroller is ready. */
2547 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2548 			    BUS_DMASYNC_POSTREAD);
2549 			DPRINTF(sc, IWN_DEBUG_RESET,
2550 			    "microcode alive notification version=%d.%d "
2551 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
2552 			    uc->subtype, le32toh(uc->valid));
2553 
2554 			if (le32toh(uc->valid) != 1) {
2555 				device_printf(sc->sc_dev,
2556 				    "microcontroller initialization failed");
2557 				break;
2558 			}
2559 			if (uc->subtype == IWN_UCODE_INIT) {
2560 				/* Save microcontroller report. */
2561 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
2562 			}
2563 			/* Save the address of the error log in SRAM. */
2564 			sc->errptr = le32toh(uc->errptr);
2565 			break;
2566 		}
2567 		case IWN_STATE_CHANGED:
2568 		{
2569 			uint32_t *status = (uint32_t *)(desc + 1);
2570 
2571 			/*
2572 			 * State change allows hardware switch change to be
2573 			 * noted. However, we handle this in iwn_intr as we
2574 			 * get both the enable/disble intr.
2575 			 */
2576 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2577 			    BUS_DMASYNC_POSTREAD);
2578 			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2579 			    le32toh(*status));
2580 			break;
2581 		}
2582 		case IWN_START_SCAN:
2583 		{
2584 			struct iwn_start_scan *scan =
2585 			    (struct iwn_start_scan *)(desc + 1);
2586 
2587 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2588 			    BUS_DMASYNC_POSTREAD);
2589 			DPRINTF(sc, IWN_DEBUG_ANY,
2590 			    "%s: scanning channel %d status %x\n",
2591 			    __func__, scan->chan, le32toh(scan->status));
2592 			break;
2593 		}
2594 		case IWN_STOP_SCAN:
2595 		{
2596 			struct iwn_stop_scan *scan =
2597 			    (struct iwn_stop_scan *)(desc + 1);
2598 
2599 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2600 			    BUS_DMASYNC_POSTREAD);
2601 			DPRINTF(sc, IWN_DEBUG_STATE,
2602 			    "scan finished nchan=%d status=%d chan=%d\n",
2603 			    scan->nchan, scan->status, scan->chan);
2604 
2605 			IWN_UNLOCK(sc);
2606 			ieee80211_scan_next(vap);
2607 			IWN_LOCK(sc);
2608 			break;
2609 		}
2610 		case IWN5000_CALIBRATION_RESULT:
2611 			iwn5000_rx_calib_results(sc, desc, data);
2612 			break;
2613 
2614 		case IWN5000_CALIBRATION_DONE:
2615 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2616 			wakeup(sc);
2617 			break;
2618 		}
2619 
2620 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2621 	}
2622 
2623 	/* Tell the firmware what we have processed. */
2624 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2625 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2626 }
2627 
2628 /*
2629  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2630  * from power-down sleep mode.
2631  */
2632 static void
2633 iwn_wakeup_intr(struct iwn_softc *sc)
2634 {
2635 	int qid;
2636 
2637 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2638 	    __func__);
2639 
2640 	/* Wakeup RX and TX rings. */
2641 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2642 	for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) {
2643 		struct iwn_tx_ring *ring = &sc->txq[qid];
2644 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2645 	}
2646 }
2647 
2648 static void
2649 iwn_rftoggle_intr(struct iwn_softc *sc)
2650 {
2651 	struct ifnet *ifp = sc->sc_ifp;
2652 	struct ieee80211com *ic = ifp->if_l2com;
2653 	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2654 
2655 	IWN_LOCK_ASSERT(sc);
2656 
2657 	device_printf(sc->sc_dev, "RF switch: radio %s\n",
2658 	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2659 	if (tmp & IWN_GP_CNTRL_RFKILL)
2660 		ieee80211_runtask(ic, &sc->sc_radioon_task);
2661 	else
2662 		ieee80211_runtask(ic, &sc->sc_radiooff_task);
2663 }
2664 
2665 /*
2666  * Dump the error log of the firmware when a firmware panic occurs.  Although
2667  * we can't debug the firmware because it is neither open source nor free, it
2668  * can help us to identify certain classes of problems.
2669  */
2670 static void
2671 iwn_fatal_intr(struct iwn_softc *sc)
2672 {
2673 	const struct iwn_hal *hal = sc->sc_hal;
2674 	struct iwn_fw_dump dump;
2675 	int i;
2676 
2677 	IWN_LOCK_ASSERT(sc);
2678 
2679 	/* Force a complete recalibration on next init. */
2680 	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2681 
2682 	/* Check that the error log address is valid. */
2683 	if (sc->errptr < IWN_FW_DATA_BASE ||
2684 	    sc->errptr + sizeof (dump) >
2685 	    IWN_FW_DATA_BASE + hal->fw_data_maxsz) {
2686 		printf("%s: bad firmware error log address 0x%08x\n",
2687 		    __func__, sc->errptr);
2688 		return;
2689 	}
2690 	if (iwn_nic_lock(sc) != 0) {
2691 		printf("%s: could not read firmware error log\n",
2692 		    __func__);
2693 		return;
2694 	}
2695 	/* Read firmware error log from SRAM. */
2696 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2697 	    sizeof (dump) / sizeof (uint32_t));
2698 	iwn_nic_unlock(sc);
2699 
2700 	if (dump.valid == 0) {
2701 		printf("%s: firmware error log is empty\n",
2702 		    __func__);
2703 		return;
2704 	}
2705 	printf("firmware error log:\n");
2706 	printf("  error type      = \"%s\" (0x%08X)\n",
2707 	    (dump.id < nitems(iwn_fw_errmsg)) ?
2708 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
2709 	    dump.id);
2710 	printf("  program counter = 0x%08X\n", dump.pc);
2711 	printf("  source line     = 0x%08X\n", dump.src_line);
2712 	printf("  error data      = 0x%08X%08X\n",
2713 	    dump.error_data[0], dump.error_data[1]);
2714 	printf("  branch link     = 0x%08X%08X\n",
2715 	    dump.branch_link[0], dump.branch_link[1]);
2716 	printf("  interrupt link  = 0x%08X%08X\n",
2717 	    dump.interrupt_link[0], dump.interrupt_link[1]);
2718 	printf("  time            = %u\n", dump.time[0]);
2719 
2720 	/* Dump driver status (TX and RX rings) while we're here. */
2721 	printf("driver status:\n");
2722 	for (i = 0; i < hal->ntxqs; i++) {
2723 		struct iwn_tx_ring *ring = &sc->txq[i];
2724 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2725 		    i, ring->qid, ring->cur, ring->queued);
2726 	}
2727 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
2728 }
2729 
2730 static void
2731 iwn_intr(void *arg)
2732 {
2733 	struct iwn_softc *sc = arg;
2734 	struct ifnet *ifp = sc->sc_ifp;
2735 	uint32_t r1, r2, tmp;
2736 
2737 	IWN_LOCK(sc);
2738 
2739 	/* Disable interrupts. */
2740 	IWN_WRITE(sc, IWN_INT_MASK, 0);
2741 
2742 	/* Read interrupts from ICT (fast) or from registers (slow). */
2743 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2744 		tmp = 0;
2745 		while (sc->ict[sc->ict_cur] != 0) {
2746 			tmp |= sc->ict[sc->ict_cur];
2747 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
2748 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2749 		}
2750 		tmp = le32toh(tmp);
2751 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
2752 			tmp = 0;
2753 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
2754 			tmp |= 0x8000;
2755 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2756 		r2 = 0;	/* Unused. */
2757 	} else {
2758 		r1 = IWN_READ(sc, IWN_INT);
2759 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2760 			return;	/* Hardware gone! */
2761 		r2 = IWN_READ(sc, IWN_FH_INT);
2762 	}
2763 
2764 	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2765 
2766 	if (r1 == 0 && r2 == 0)
2767 		goto done;	/* Interrupt not for us. */
2768 
2769 	/* Acknowledge interrupts. */
2770 	IWN_WRITE(sc, IWN_INT, r1);
2771 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2772 		IWN_WRITE(sc, IWN_FH_INT, r2);
2773 
2774 	if (r1 & IWN_INT_RF_TOGGLED) {
2775 		iwn_rftoggle_intr(sc);
2776 		goto done;
2777 	}
2778 	if (r1 & IWN_INT_CT_REACHED) {
2779 		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2780 		    __func__);
2781 	}
2782 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2783 		iwn_fatal_intr(sc);
2784 		ifp->if_flags &= ~IFF_UP;
2785 		iwn_stop_locked(sc);
2786 		goto done;
2787 	}
2788 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2789 	    (r2 & IWN_FH_INT_RX)) {
2790 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2791 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2792 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2793 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2794 			    IWN_INT_PERIODIC_DIS);
2795 			iwn_notif_intr(sc);
2796 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2797 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2798 				    IWN_INT_PERIODIC_ENA);
2799 			}
2800 		} else
2801 			iwn_notif_intr(sc);
2802 	}
2803 
2804 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2805 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
2806 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2807 		wakeup(sc);	/* FH DMA transfer completed. */
2808 	}
2809 
2810 	if (r1 & IWN_INT_ALIVE)
2811 		wakeup(sc);	/* Firmware is alive. */
2812 
2813 	if (r1 & IWN_INT_WAKEUP)
2814 		iwn_wakeup_intr(sc);
2815 
2816 done:
2817 	/* Re-enable interrupts. */
2818 	if (ifp->if_flags & IFF_UP)
2819 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2820 
2821 	IWN_UNLOCK(sc);
2822 }
2823 
2824 /*
2825  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2826  * 5000 adapters use a slightly different format.)
2827  */
2828 static void
2829 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2830     uint16_t len)
2831 {
2832 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2833 
2834 	*w = htole16(len + 8);
2835 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2836 	    BUS_DMASYNC_PREWRITE);
2837 	if (idx < IWN_SCHED_WINSZ) {
2838 		*(w + IWN_TX_RING_COUNT) = *w;
2839 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2840 		    BUS_DMASYNC_PREWRITE);
2841 	}
2842 }
2843 
2844 static void
2845 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2846     uint16_t len)
2847 {
2848 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2849 
2850 	*w = htole16(id << 12 | (len + 8));
2851 
2852 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2853 	    BUS_DMASYNC_PREWRITE);
2854 	if (idx < IWN_SCHED_WINSZ) {
2855 		*(w + IWN_TX_RING_COUNT) = *w;
2856 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2857 		    BUS_DMASYNC_PREWRITE);
2858 	}
2859 }
2860 
2861 #ifdef notyet
2862 static void
2863 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2864 {
2865 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2866 
2867 	*w = (*w & htole16(0xf000)) | htole16(1);
2868 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2869 	    BUS_DMASYNC_PREWRITE);
2870 	if (idx < IWN_SCHED_WINSZ) {
2871 		*(w + IWN_TX_RING_COUNT) = *w;
2872 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2873 		    BUS_DMASYNC_PREWRITE);
2874 	}
2875 }
2876 #endif
2877 
2878 static uint8_t
2879 iwn_plcp_signal(int rate) {
2880 	int i;
2881 
2882 	for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
2883 		if (rate == iwn_rates[i].rate)
2884 			return i;
2885 	}
2886 
2887 	return 0;
2888 }
2889 
2890 static int
2891 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
2892     struct iwn_tx_ring *ring)
2893 {
2894 	const struct iwn_hal *hal = sc->sc_hal;
2895 	const struct ieee80211_txparam *tp;
2896 	const struct iwn_rate *rinfo;
2897 	struct ieee80211vap *vap = ni->ni_vap;
2898 	struct ieee80211com *ic = ni->ni_ic;
2899 	struct iwn_node *wn = (void *)ni;
2900 	struct iwn_tx_desc *desc;
2901 	struct iwn_tx_data *data;
2902 	struct iwn_tx_cmd *cmd;
2903 	struct iwn_cmd_data *tx;
2904 	struct ieee80211_frame *wh;
2905 	struct ieee80211_key *k = NULL;
2906 	struct mbuf *mnew;
2907 	bus_dma_segment_t segs[IWN_MAX_SCATTER];
2908 	uint32_t flags;
2909 	u_int hdrlen;
2910 	int totlen, error, pad, nsegs = 0, i, rate;
2911 	uint8_t ridx, type, txant;
2912 
2913 	IWN_LOCK_ASSERT(sc);
2914 
2915 	wh = mtod(m, struct ieee80211_frame *);
2916 	hdrlen = ieee80211_anyhdrsize(wh);
2917 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2918 
2919 	desc = &ring->desc[ring->cur];
2920 	data = &ring->data[ring->cur];
2921 
2922 	/* Choose a TX rate index. */
2923 	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
2924 	if (type == IEEE80211_FC0_TYPE_MGT)
2925 		rate = tp->mgmtrate;
2926 	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2927 		rate = tp->mcastrate;
2928 	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2929 		rate = tp->ucastrate;
2930 	else {
2931 		/* XXX pass pktlen */
2932 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2933 		rate = ni->ni_txrate;
2934 	}
2935 	ridx = iwn_plcp_signal(rate);
2936 	rinfo = &iwn_rates[ridx];
2937 
2938 	/* Encrypt the frame if need be. */
2939 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2940 		k = ieee80211_crypto_encap(ni, m);
2941 		if (k == NULL) {
2942 			m_freem(m);
2943 			return ENOBUFS;
2944 		}
2945 		/* Packet header may have moved, reset our local pointer. */
2946 		wh = mtod(m, struct ieee80211_frame *);
2947 	}
2948 	totlen = m->m_pkthdr.len;
2949 
2950 	if (ieee80211_radiotap_active_vap(vap)) {
2951 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2952 
2953 		tap->wt_flags = 0;
2954 		tap->wt_rate = rinfo->rate;
2955 		if (k != NULL)
2956 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2957 
2958 		ieee80211_radiotap_tx(vap, m);
2959 	}
2960 
2961 	/* Prepare TX firmware command. */
2962 	cmd = &ring->cmd[ring->cur];
2963 	cmd->code = IWN_CMD_TX_DATA;
2964 	cmd->flags = 0;
2965 	cmd->qid = ring->qid;
2966 	cmd->idx = ring->cur;
2967 
2968 	tx = (struct iwn_cmd_data *)cmd->data;
2969 	/* NB: No need to clear tx, all fields are reinitialized here. */
2970 	tx->scratch = 0;	/* clear "scratch" area */
2971 
2972 	flags = 0;
2973 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2974 		flags |= IWN_TX_NEED_ACK;
2975 	if ((wh->i_fc[0] &
2976 	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2977 	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2978 		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
2979 
2980 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2981 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
2982 
2983 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2984 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2985 		/* NB: Group frames are sent using CCK in 802.11b/g. */
2986 		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2987 			flags |= IWN_TX_NEED_RTS;
2988 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2989 		    ridx >= IWN_RIDX_OFDM6) {
2990 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2991 				flags |= IWN_TX_NEED_CTS;
2992 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2993 				flags |= IWN_TX_NEED_RTS;
2994 		}
2995 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2996 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2997 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
2998 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2999 				flags |= IWN_TX_NEED_PROTECTION;
3000 			} else
3001 				flags |= IWN_TX_FULL_TXOP;
3002 		}
3003 	}
3004 
3005 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3006 	    type != IEEE80211_FC0_TYPE_DATA)
3007 		tx->id = hal->broadcast_id;
3008 	else
3009 		tx->id = wn->id;
3010 
3011 	if (type == IEEE80211_FC0_TYPE_MGT) {
3012 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3013 
3014 		/* Tell HW to set timestamp in probe responses. */
3015 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3016 			flags |= IWN_TX_INSERT_TSTAMP;
3017 
3018 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3019 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3020 			tx->timeout = htole16(3);
3021 		else
3022 			tx->timeout = htole16(2);
3023 	} else
3024 		tx->timeout = htole16(0);
3025 
3026 	if (hdrlen & 3) {
3027 		/* First segment length must be a multiple of 4. */
3028 		flags |= IWN_TX_NEED_PADDING;
3029 		pad = 4 - (hdrlen & 3);
3030 	} else
3031 		pad = 0;
3032 
3033 	tx->len = htole16(totlen);
3034 	tx->tid = 0;
3035 	tx->rts_ntries = 60;
3036 	tx->data_ntries = 15;
3037 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3038 	tx->plcp = rinfo->plcp;
3039 	tx->rflags = rinfo->flags;
3040 	if (tx->id == hal->broadcast_id) {
3041 		/* Group or management frame. */
3042 		tx->linkq = 0;
3043 		/* XXX Alternate between antenna A and B? */
3044 		txant = IWN_LSB(sc->txchainmask);
3045 		tx->rflags |= IWN_RFLAG_ANT(txant);
3046 	} else {
3047 		tx->linkq = IWN_RIDX_OFDM54 - ridx;
3048 		flags |= IWN_TX_LINKQ;	/* enable MRR */
3049 	}
3050 
3051 	/* Set physical address of "scratch area". */
3052 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3053 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3054 
3055 	/* Copy 802.11 header in TX command. */
3056 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3057 
3058 	/* Trim 802.11 header. */
3059 	m_adj(m, hdrlen);
3060 	tx->security = 0;
3061 	tx->flags = htole32(flags);
3062 
3063 	if (m->m_len > 0) {
3064 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3065 		    m, segs, &nsegs, BUS_DMA_NOWAIT);
3066 		if (error == EFBIG) {
3067 			/* too many fragments, linearize */
3068 			mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3069 			if (mnew == NULL) {
3070 				device_printf(sc->sc_dev,
3071 				    "%s: could not defrag mbuf\n", __func__);
3072 				m_freem(m);
3073 				return ENOBUFS;
3074 			}
3075 			m = mnew;
3076 			error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3077 			    data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3078 		}
3079 		if (error != 0) {
3080 			device_printf(sc->sc_dev,
3081 			    "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3082 			    __func__, error);
3083 			m_freem(m);
3084 			return error;
3085 		}
3086 	}
3087 
3088 	data->m = m;
3089 	data->ni = ni;
3090 
3091 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3092 	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3093 
3094 	/* Fill TX descriptor. */
3095 	desc->nsegs = 1 + nsegs;
3096 	/* First DMA segment is used by the TX command. */
3097 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3098 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3099 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3100 	/* Other DMA segments are for data payload. */
3101 	for (i = 1; i <= nsegs; i++) {
3102 		desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3103 		desc->segs[i].len  = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3104 		    segs[i - 1].ds_len << 4);
3105 	}
3106 
3107 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3108 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3109 	    BUS_DMASYNC_PREWRITE);
3110 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3111 	    BUS_DMASYNC_PREWRITE);
3112 
3113 #ifdef notyet
3114 	/* Update TX scheduler. */
3115 	hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3116 #endif
3117 
3118 	/* Kick TX ring. */
3119 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3120 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3121 
3122 	/* Mark TX ring as full if we reach a certain threshold. */
3123 	if (++ring->queued > IWN_TX_RING_HIMARK)
3124 		sc->qfullmsk |= 1 << ring->qid;
3125 
3126 	return 0;
3127 }
3128 
3129 static int
3130 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3131     struct ieee80211_node *ni, struct iwn_tx_ring *ring,
3132     const struct ieee80211_bpf_params *params)
3133 {
3134 	const struct iwn_hal *hal = sc->sc_hal;
3135 	const struct iwn_rate *rinfo;
3136 	struct ifnet *ifp = sc->sc_ifp;
3137 	struct ieee80211vap *vap = ni->ni_vap;
3138 	struct ieee80211com *ic = ifp->if_l2com;
3139 	struct iwn_tx_cmd *cmd;
3140 	struct iwn_cmd_data *tx;
3141 	struct ieee80211_frame *wh;
3142 	struct iwn_tx_desc *desc;
3143 	struct iwn_tx_data *data;
3144 	struct mbuf *mnew;
3145 	bus_addr_t paddr;
3146 	bus_dma_segment_t segs[IWN_MAX_SCATTER];
3147 	uint32_t flags;
3148 	u_int hdrlen;
3149 	int totlen, error, pad, nsegs = 0, i, rate;
3150 	uint8_t ridx, type, txant;
3151 
3152 	IWN_LOCK_ASSERT(sc);
3153 
3154 	wh = mtod(m, struct ieee80211_frame *);
3155 	hdrlen = ieee80211_anyhdrsize(wh);
3156 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3157 
3158 	desc = &ring->desc[ring->cur];
3159 	data = &ring->data[ring->cur];
3160 
3161 	/* Choose a TX rate index. */
3162 	rate = params->ibp_rate0;
3163 	if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3164 		/* XXX fall back to mcast/mgmt rate? */
3165 		m_freem(m);
3166 		return EINVAL;
3167 	}
3168 	ridx = iwn_plcp_signal(rate);
3169 	rinfo = &iwn_rates[ridx];
3170 
3171 	totlen = m->m_pkthdr.len;
3172 
3173 	/* Prepare TX firmware command. */
3174 	cmd = &ring->cmd[ring->cur];
3175 	cmd->code = IWN_CMD_TX_DATA;
3176 	cmd->flags = 0;
3177 	cmd->qid = ring->qid;
3178 	cmd->idx = ring->cur;
3179 
3180 	tx = (struct iwn_cmd_data *)cmd->data;
3181 	/* NB: No need to clear tx, all fields are reinitialized here. */
3182 	tx->scratch = 0;	/* clear "scratch" area */
3183 
3184 	flags = 0;
3185 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3186 		flags |= IWN_TX_NEED_ACK;
3187 	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3188 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3189 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3190 			flags &= ~IWN_TX_NEED_RTS;
3191 			flags |= IWN_TX_NEED_PROTECTION;
3192 		} else
3193 			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3194 	}
3195 	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3196 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3197 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3198 			flags &= ~IWN_TX_NEED_CTS;
3199 			flags |= IWN_TX_NEED_PROTECTION;
3200 		} else
3201 			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3202 	}
3203 	if (type == IEEE80211_FC0_TYPE_MGT) {
3204 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3205 
3206 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3207 			flags |= IWN_TX_INSERT_TSTAMP;
3208 
3209 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3210 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3211 			tx->timeout = htole16(3);
3212 		else
3213 			tx->timeout = htole16(2);
3214 	} else
3215 		tx->timeout = htole16(0);
3216 
3217 	if (hdrlen & 3) {
3218 		/* First segment length must be a multiple of 4. */
3219 		flags |= IWN_TX_NEED_PADDING;
3220 		pad = 4 - (hdrlen & 3);
3221 	} else
3222 		pad = 0;
3223 
3224 	if (ieee80211_radiotap_active_vap(vap)) {
3225 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3226 
3227 		tap->wt_flags = 0;
3228 		tap->wt_rate = rate;
3229 
3230 		ieee80211_radiotap_tx(vap, m);
3231 	}
3232 
3233 	tx->len = htole16(totlen);
3234 	tx->tid = 0;
3235 	tx->id = hal->broadcast_id;
3236 	tx->rts_ntries = params->ibp_try1;
3237 	tx->data_ntries = params->ibp_try0;
3238 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3239 	tx->plcp = rinfo->plcp;
3240 	tx->rflags = rinfo->flags;
3241 	/* Group or management frame. */
3242 	tx->linkq = 0;
3243 	txant = IWN_LSB(sc->txchainmask);
3244 	tx->rflags |= IWN_RFLAG_ANT(txant);
3245 	/* Set physical address of "scratch area". */
3246 	paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd);
3247 	tx->loaddr = htole32(IWN_LOADDR(paddr));
3248 	tx->hiaddr = IWN_HIADDR(paddr);
3249 
3250 	/* Copy 802.11 header in TX command. */
3251 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3252 
3253 	/* Trim 802.11 header. */
3254 	m_adj(m, hdrlen);
3255 	tx->security = 0;
3256 	tx->flags = htole32(flags);
3257 
3258 	if (m->m_len > 0) {
3259 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3260 		    m, segs, &nsegs, BUS_DMA_NOWAIT);
3261 		if (error == EFBIG) {
3262 			/* Too many fragments, linearize. */
3263 			mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3264 			if (mnew == NULL) {
3265 				device_printf(sc->sc_dev,
3266 				    "%s: could not defrag mbuf\n", __func__);
3267 				m_freem(m);
3268 				return ENOBUFS;
3269 			}
3270 			m = mnew;
3271 			error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3272 			    data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3273 		}
3274 		if (error != 0) {
3275 			device_printf(sc->sc_dev,
3276 			    "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3277 			    __func__, error);
3278 			m_freem(m);
3279 			return error;
3280 		}
3281 	}
3282 
3283 	data->m = m;
3284 	data->ni = ni;
3285 
3286 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3287 	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3288 
3289 	/* Fill TX descriptor. */
3290 	desc->nsegs = 1 + nsegs;
3291 	/* First DMA segment is used by the TX command. */
3292 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3293 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3294 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3295 	/* Other DMA segments are for data payload. */
3296 	for (i = 1; i <= nsegs; i++) {
3297 		desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3298 		desc->segs[i].len  = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3299 		    segs[i - 1].ds_len << 4);
3300 	}
3301 
3302 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3303 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3304 	    BUS_DMASYNC_PREWRITE);
3305 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3306 	    BUS_DMASYNC_PREWRITE);
3307 
3308 #ifdef notyet
3309 	/* Update TX scheduler. */
3310 	hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3311 #endif
3312 
3313 	/* Kick TX ring. */
3314 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3315 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3316 
3317 	/* Mark TX ring as full if we reach a certain threshold. */
3318 	if (++ring->queued > IWN_TX_RING_HIMARK)
3319 		sc->qfullmsk |= 1 << ring->qid;
3320 
3321 	return 0;
3322 }
3323 
3324 static int
3325 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3326 	const struct ieee80211_bpf_params *params)
3327 {
3328 	struct ieee80211com *ic = ni->ni_ic;
3329 	struct ifnet *ifp = ic->ic_ifp;
3330 	struct iwn_softc *sc = ifp->if_softc;
3331 	struct iwn_tx_ring *txq;
3332 	int error = 0;
3333 
3334 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3335 		ieee80211_free_node(ni);
3336 		m_freem(m);
3337 		return ENETDOWN;
3338 	}
3339 
3340 	IWN_LOCK(sc);
3341 	if (params == NULL)
3342 		txq = &sc->txq[M_WME_GETAC(m)];
3343 	else
3344 		txq = &sc->txq[params->ibp_pri & 3];
3345 
3346 	if (params == NULL) {
3347 		/*
3348 		 * Legacy path; interpret frame contents to decide
3349 		 * precisely how to send the frame.
3350 		 */
3351 		error = iwn_tx_data(sc, m, ni, txq);
3352 	} else {
3353 		/*
3354 		 * Caller supplied explicit parameters to use in
3355 		 * sending the frame.
3356 		 */
3357 		error = iwn_tx_data_raw(sc, m, ni, txq, params);
3358 	}
3359 	if (error != 0) {
3360 		/* NB: m is reclaimed on tx failure */
3361 		ieee80211_free_node(ni);
3362 		ifp->if_oerrors++;
3363 	}
3364 	IWN_UNLOCK(sc);
3365 	return error;
3366 }
3367 
3368 static void
3369 iwn_start(struct ifnet *ifp)
3370 {
3371 	struct iwn_softc *sc = ifp->if_softc;
3372 
3373 	IWN_LOCK(sc);
3374 	iwn_start_locked(ifp);
3375 	IWN_UNLOCK(sc);
3376 }
3377 
3378 static void
3379 iwn_start_locked(struct ifnet *ifp)
3380 {
3381 	struct iwn_softc *sc = ifp->if_softc;
3382 	struct ieee80211_node *ni;
3383 	struct iwn_tx_ring *txq;
3384 	struct mbuf *m;
3385 	int pri;
3386 
3387 	IWN_LOCK_ASSERT(sc);
3388 
3389 	for (;;) {
3390 		if (sc->qfullmsk != 0) {
3391 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3392 			break;
3393 		}
3394 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3395 		if (m == NULL)
3396 			break;
3397 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3398 		pri = M_WME_GETAC(m);
3399 		txq = &sc->txq[pri];
3400 		if (iwn_tx_data(sc, m, ni, txq) != 0) {
3401 			ifp->if_oerrors++;
3402 			ieee80211_free_node(ni);
3403 			break;
3404 		}
3405 		sc->sc_tx_timer = 5;
3406 	}
3407 }
3408 
3409 static void
3410 iwn_watchdog(struct iwn_softc *sc)
3411 {
3412 	if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
3413 		struct ifnet *ifp = sc->sc_ifp;
3414 		struct ieee80211com *ic = ifp->if_l2com;
3415 
3416 		if_printf(ifp, "device timeout\n");
3417 		ieee80211_runtask(ic, &sc->sc_reinit_task);
3418 	}
3419 }
3420 
3421 static int
3422 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3423 {
3424 	struct iwn_softc *sc = ifp->if_softc;
3425 	struct ieee80211com *ic = ifp->if_l2com;
3426 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3427 	struct ifreq *ifr = (struct ifreq *) data;
3428 	int error = 0, startall = 0, stop = 0;
3429 
3430 	switch (cmd) {
3431 	case SIOCSIFFLAGS:
3432 		IWN_LOCK(sc);
3433 		if (ifp->if_flags & IFF_UP) {
3434 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3435 				iwn_init_locked(sc);
3436 				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3437 					startall = 1;
3438 				else
3439 					stop = 1;
3440 			}
3441 		} else {
3442 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3443 				iwn_stop_locked(sc);
3444 		}
3445 		IWN_UNLOCK(sc);
3446 		if (startall)
3447 			ieee80211_start_all(ic);
3448 		else if (vap != NULL && stop)
3449 			ieee80211_stop(vap);
3450 		break;
3451 	case SIOCGIFMEDIA:
3452 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3453 		break;
3454 	case SIOCGIFADDR:
3455 		error = ether_ioctl(ifp, cmd, data);
3456 		break;
3457 	default:
3458 		error = EINVAL;
3459 		break;
3460 	}
3461 	return error;
3462 }
3463 
3464 /*
3465  * Send a command to the firmware.
3466  */
3467 static int
3468 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3469 {
3470 	struct iwn_tx_ring *ring = &sc->txq[4];
3471 	struct iwn_tx_desc *desc;
3472 	struct iwn_tx_data *data;
3473 	struct iwn_tx_cmd *cmd;
3474 	struct mbuf *m;
3475 	bus_addr_t paddr;
3476 	int totlen, error;
3477 
3478 	IWN_LOCK_ASSERT(sc);
3479 
3480 	desc = &ring->desc[ring->cur];
3481 	data = &ring->data[ring->cur];
3482 	totlen = 4 + size;
3483 
3484 	if (size > sizeof cmd->data) {
3485 		/* Command is too large to fit in a descriptor. */
3486 		if (totlen > MCLBYTES)
3487 			return EINVAL;
3488 		m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3489 		if (m == NULL)
3490 			return ENOMEM;
3491 		cmd = mtod(m, struct iwn_tx_cmd *);
3492 		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3493 		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3494 		if (error != 0) {
3495 			m_freem(m);
3496 			return error;
3497 		}
3498 		data->m = m;
3499 	} else {
3500 		cmd = &ring->cmd[ring->cur];
3501 		paddr = data->cmd_paddr;
3502 	}
3503 
3504 	cmd->code = code;
3505 	cmd->flags = 0;
3506 	cmd->qid = ring->qid;
3507 	cmd->idx = ring->cur;
3508 	memcpy(cmd->data, buf, size);
3509 
3510 	desc->nsegs = 1;
3511 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3512 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3513 
3514 	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3515 	    __func__, iwn_intr_str(cmd->code), cmd->code,
3516 	    cmd->flags, cmd->qid, cmd->idx);
3517 
3518 	if (size > sizeof cmd->data) {
3519 		bus_dmamap_sync(ring->data_dmat, data->map,
3520 		    BUS_DMASYNC_PREWRITE);
3521 	} else {
3522 		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3523 		    BUS_DMASYNC_PREWRITE);
3524 	}
3525 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3526 	    BUS_DMASYNC_PREWRITE);
3527 
3528 #ifdef notyet
3529 	/* Update TX scheduler. */
3530 	sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0);
3531 #endif
3532 
3533 	/* Kick command ring. */
3534 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3535 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3536 
3537 	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
3538 }
3539 
3540 static int
3541 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3542 {
3543 	struct iwn4965_node_info hnode;
3544 	caddr_t src, dst;
3545 
3546 	/*
3547 	 * We use the node structure for 5000 Series internally (it is
3548 	 * a superset of the one for 4965AGN). We thus copy the common
3549 	 * fields before sending the command.
3550 	 */
3551 	src = (caddr_t)node;
3552 	dst = (caddr_t)&hnode;
3553 	memcpy(dst, src, 48);
3554 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3555 	memcpy(dst + 48, src + 72, 20);
3556 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3557 }
3558 
3559 static int
3560 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3561 {
3562 	/* Direct mapping. */
3563 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3564 }
3565 
3566 #if 0	/* HT */
3567 static const uint8_t iwn_ridx_to_plcp[] = {
3568 	10, 20, 55, 110, /* CCK */
3569 	0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */
3570 };
3571 static const uint8_t iwn_siso_mcs_to_plcp[] = {
3572 	0, 0, 0, 0, 			/* CCK */
3573 	0, 0, 1, 2, 3, 4, 5, 6, 7	/* HT */
3574 };
3575 static const uint8_t iwn_mimo_mcs_to_plcp[] = {
3576 	0, 0, 0, 0, 			/* CCK */
3577 	8, 8, 9, 10, 11, 12, 13, 14, 15	/* HT */
3578 };
3579 #endif
3580 static const uint8_t iwn_prev_ridx[] = {
3581 	/* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */
3582 	0, 0, 1, 5,			/* CCK */
3583 	2, 4, 3, 6, 7, 8, 9, 10, 10	/* OFDM */
3584 };
3585 
3586 /*
3587  * Configure hardware link parameters for the specified
3588  * node operating on the specified channel.
3589  */
3590 static int
3591 iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async)
3592 {
3593 	struct ifnet *ifp = sc->sc_ifp;
3594 	struct ieee80211com *ic = ifp->if_l2com;
3595 	struct iwn_cmd_link_quality linkq;
3596 	const struct iwn_rate *rinfo;
3597 	int i;
3598 	uint8_t txant, ridx;
3599 
3600 	/* Use the first valid TX antenna. */
3601 	txant = IWN_LSB(sc->txchainmask);
3602 
3603 	memset(&linkq, 0, sizeof linkq);
3604 	linkq.id = id;
3605 	linkq.antmsk_1stream = txant;
3606 	linkq.antmsk_2stream = IWN_ANT_AB;
3607 	linkq.ampdu_max = 31;
3608 	linkq.ampdu_threshold = 3;
3609 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3610 
3611 #if 0	/* HT */
3612 	if (IEEE80211_IS_CHAN_HT(c))
3613 		linkq.mimo = 1;
3614 #endif
3615 
3616 	if (id == IWN_ID_BSS)
3617 		ridx = IWN_RIDX_OFDM54;
3618 	else if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
3619 		ridx = IWN_RIDX_OFDM6;
3620 	else
3621 		ridx = IWN_RIDX_CCK1;
3622 
3623 	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3624 		rinfo = &iwn_rates[ridx];
3625 #if 0	/* HT */
3626 		if (IEEE80211_IS_CHAN_HT40(c)) {
3627 			linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx]
3628 					 | IWN_RIDX_MCS;
3629 			linkq.retry[i].rflags = IWN_RFLAG_HT
3630 					 | IWN_RFLAG_HT40;
3631 			/* XXX shortGI */
3632 		} else if (IEEE80211_IS_CHAN_HT(c)) {
3633 			linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx]
3634 					 | IWN_RIDX_MCS;
3635 			linkq.retry[i].rflags = IWN_RFLAG_HT;
3636 			/* XXX shortGI */
3637 		} else
3638 #endif
3639 		{
3640 			linkq.retry[i].plcp = rinfo->plcp;
3641 			linkq.retry[i].rflags = rinfo->flags;
3642 		}
3643 		linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3644 		ridx = iwn_prev_ridx[ridx];
3645 	}
3646 #ifdef IWN_DEBUG
3647 	if (sc->sc_debug & IWN_DEBUG_STATE) {
3648 		printf("%s: set link quality for node %d, mimo %d ssmask %d\n",
3649 		    __func__, id, linkq.mimo, linkq.antmsk_1stream);
3650 		printf("%s:", __func__);
3651 		for (i = 0; i < IWN_MAX_TX_RETRIES; i++)
3652 			printf(" %d:%x", linkq.retry[i].plcp,
3653 			    linkq.retry[i].rflags);
3654 		printf("\n");
3655 	}
3656 #endif
3657 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3658 }
3659 
3660 /*
3661  * Broadcast node is used to send group-addressed and management frames.
3662  */
3663 static int
3664 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3665 {
3666 	const struct iwn_hal *hal = sc->sc_hal;
3667 	struct ifnet *ifp = sc->sc_ifp;
3668 	struct iwn_node_info node;
3669 	int error;
3670 
3671 	memset(&node, 0, sizeof node);
3672 	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3673 	node.id = hal->broadcast_id;
3674 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3675 	error = hal->add_node(sc, &node, async);
3676 	if (error != 0)
3677 		return error;
3678 
3679 	error = iwn_set_link_quality(sc, hal->broadcast_id, async);
3680 	return error;
3681 }
3682 
3683 static int
3684 iwn_wme_update(struct ieee80211com *ic)
3685 {
3686 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
3687 #define	IWN_TXOP_TO_US(v)		(v<<5)
3688 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
3689 	struct iwn_edca_params cmd;
3690 	int i;
3691 
3692 	memset(&cmd, 0, sizeof cmd);
3693 	cmd.flags = htole32(IWN_EDCA_UPDATE);
3694 	for (i = 0; i < WME_NUM_AC; i++) {
3695 		const struct wmeParams *wmep =
3696 		    &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
3697 		cmd.ac[i].aifsn = wmep->wmep_aifsn;
3698 		cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin));
3699 		cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax));
3700 		cmd.ac[i].txoplimit =
3701 		    htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit));
3702 	}
3703 	IEEE80211_UNLOCK(ic);
3704 	IWN_LOCK(sc);
3705 	(void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/);
3706 	IWN_UNLOCK(sc);
3707 	IEEE80211_LOCK(ic);
3708 	return 0;
3709 #undef IWN_TXOP_TO_US
3710 #undef IWN_EXP2
3711 }
3712 
3713 static void
3714 iwn_update_mcast(struct ifnet *ifp)
3715 {
3716 	/* Ignore */
3717 }
3718 
3719 static void
3720 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3721 {
3722 	struct iwn_cmd_led led;
3723 
3724 	/* Clear microcode LED ownership. */
3725 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3726 
3727 	led.which = which;
3728 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
3729 	led.off = off;
3730 	led.on = on;
3731 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3732 }
3733 
3734 /*
3735  * Set the critical temperature at which the firmware will stop the radio
3736  * and notify us.
3737  */
3738 static int
3739 iwn_set_critical_temp(struct iwn_softc *sc)
3740 {
3741 	struct iwn_critical_temp crit;
3742 	int32_t temp;
3743 
3744 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3745 
3746 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3747 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3748 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3749 		temp = IWN_CTOK(110);
3750 	else
3751 		temp = 110;
3752 	memset(&crit, 0, sizeof crit);
3753 	crit.tempR = htole32(temp);
3754 	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n",
3755 	    temp);
3756 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3757 }
3758 
3759 static int
3760 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3761 {
3762 	struct iwn_cmd_timing cmd;
3763 	uint64_t val, mod;
3764 
3765 	memset(&cmd, 0, sizeof cmd);
3766 	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3767 	cmd.bintval = htole16(ni->ni_intval);
3768 	cmd.lintval = htole16(10);
3769 
3770 	/* Compute remaining time until next beacon. */
3771 	val = (uint64_t)ni->ni_intval * 1024;	/* msecs -> usecs */
3772 	mod = le64toh(cmd.tstamp) % val;
3773 	cmd.binitval = htole32((uint32_t)(val - mod));
3774 
3775 	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3776 	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3777 
3778 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3779 }
3780 
3781 static void
3782 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3783 {
3784 	struct ifnet *ifp = sc->sc_ifp;
3785 	struct ieee80211com *ic = ifp->if_l2com;
3786 
3787 	/* Adjust TX power if need be (delta >= 3 degC.) */
3788 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3789 	    __func__, sc->temp, temp);
3790 	if (abs(temp - sc->temp) >= 3) {
3791 		/* Record temperature of last calibration. */
3792 		sc->temp = temp;
3793 		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3794 	}
3795 }
3796 
3797 /*
3798  * Set TX power for current channel (each rate has its own power settings).
3799  * This function takes into account the regulatory information from EEPROM,
3800  * the current temperature and the current voltage.
3801  */
3802 static int
3803 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3804     int async)
3805 {
3806 /* Fixed-point arithmetic division using a n-bit fractional part. */
3807 #define fdivround(a, b, n)	\
3808 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3809 /* Linear interpolation. */
3810 #define interpolate(x, x1, y1, x2, y2, n)	\
3811 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3812 
3813 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3814 	struct ifnet *ifp = sc->sc_ifp;
3815 	struct ieee80211com *ic = ifp->if_l2com;
3816 	struct iwn_ucode_info *uc = &sc->ucode_info;
3817 	struct iwn4965_cmd_txpower cmd;
3818 	struct iwn4965_eeprom_chan_samples *chans;
3819 	int32_t vdiff, tdiff;
3820 	int i, c, grp, maxpwr;
3821 	const uint8_t *rf_gain, *dsp_gain;
3822 	uint8_t chan;
3823 
3824 	/* Retrieve channel number. */
3825 	chan = ieee80211_chan2ieee(ic, ch);
3826 	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
3827 	    chan);
3828 
3829 	memset(&cmd, 0, sizeof cmd);
3830 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3831 	cmd.chan = chan;
3832 
3833 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3834 		maxpwr   = sc->maxpwr5GHz;
3835 		rf_gain  = iwn4965_rf_gain_5ghz;
3836 		dsp_gain = iwn4965_dsp_gain_5ghz;
3837 	} else {
3838 		maxpwr   = sc->maxpwr2GHz;
3839 		rf_gain  = iwn4965_rf_gain_2ghz;
3840 		dsp_gain = iwn4965_dsp_gain_2ghz;
3841 	}
3842 
3843 	/* Compute voltage compensation. */
3844 	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3845 	if (vdiff > 0)
3846 		vdiff *= 2;
3847 	if (abs(vdiff) > 2)
3848 		vdiff = 0;
3849 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3850 	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3851 	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
3852 
3853 	/* Get channel attenuation group. */
3854 	if (chan <= 20)		/* 1-20 */
3855 		grp = 4;
3856 	else if (chan <= 43)	/* 34-43 */
3857 		grp = 0;
3858 	else if (chan <= 70)	/* 44-70 */
3859 		grp = 1;
3860 	else if (chan <= 124)	/* 71-124 */
3861 		grp = 2;
3862 	else			/* 125-200 */
3863 		grp = 3;
3864 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3865 	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
3866 
3867 	/* Get channel sub-band. */
3868 	for (i = 0; i < IWN_NBANDS; i++)
3869 		if (sc->bands[i].lo != 0 &&
3870 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3871 			break;
3872 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
3873 		return EINVAL;
3874 	chans = sc->bands[i].chans;
3875 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3876 	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
3877 
3878 	for (c = 0; c < 2; c++) {
3879 		uint8_t power, gain, temp;
3880 		int maxchpwr, pwr, ridx, idx;
3881 
3882 		power = interpolate(chan,
3883 		    chans[0].num, chans[0].samples[c][1].power,
3884 		    chans[1].num, chans[1].samples[c][1].power, 1);
3885 		gain  = interpolate(chan,
3886 		    chans[0].num, chans[0].samples[c][1].gain,
3887 		    chans[1].num, chans[1].samples[c][1].gain, 1);
3888 		temp  = interpolate(chan,
3889 		    chans[0].num, chans[0].samples[c][1].temp,
3890 		    chans[1].num, chans[1].samples[c][1].temp, 1);
3891 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3892 		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
3893 		    __func__, c, power, gain, temp);
3894 
3895 		/* Compute temperature compensation. */
3896 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3897 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3898 		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
3899 		    __func__, tdiff, sc->temp, temp);
3900 
3901 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3902 			/* Convert dBm to half-dBm. */
3903 			maxchpwr = sc->maxpwr[chan] * 2;
3904 			if ((ridx / 8) & 1)
3905 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
3906 
3907 			pwr = maxpwr;
3908 
3909 			/* Adjust TX power based on rate. */
3910 			if ((ridx % 8) == 5)
3911 				pwr -= 15;	/* OFDM48: -7.5dB */
3912 			else if ((ridx % 8) == 6)
3913 				pwr -= 17;	/* OFDM54: -8.5dB */
3914 			else if ((ridx % 8) == 7)
3915 				pwr -= 20;	/* OFDM60: -10dB */
3916 			else
3917 				pwr -= 10;	/* Others: -5dB */
3918 
3919 			/* Do not exceed channel max TX power. */
3920 			if (pwr > maxchpwr)
3921 				pwr = maxchpwr;
3922 
3923 			idx = gain - (pwr - power) - tdiff - vdiff;
3924 			if ((ridx / 8) & 1)	/* MIMO */
3925 				idx += (int32_t)le32toh(uc->atten[grp][c]);
3926 
3927 			if (cmd.band == 0)
3928 				idx += 9;	/* 5GHz */
3929 			if (ridx == IWN_RIDX_MAX)
3930 				idx += 5;	/* CCK */
3931 
3932 			/* Make sure idx stays in a valid range. */
3933 			if (idx < 0)
3934 				idx = 0;
3935 			else if (idx > IWN4965_MAX_PWR_INDEX)
3936 				idx = IWN4965_MAX_PWR_INDEX;
3937 
3938 			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3939 			    "%s: Tx chain %d, rate idx %d: power=%d\n",
3940 			    __func__, c, ridx, idx);
3941 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3942 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3943 		}
3944 	}
3945 
3946 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3947 	    "%s: set tx power for chan %d\n", __func__, chan);
3948 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3949 
3950 #undef interpolate
3951 #undef fdivround
3952 }
3953 
3954 static int
3955 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3956     int async)
3957 {
3958 	struct iwn5000_cmd_txpower cmd;
3959 
3960 	/*
3961 	 * TX power calibration is handled automatically by the firmware
3962 	 * for 5000 Series.
3963 	 */
3964 	memset(&cmd, 0, sizeof cmd);
3965 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
3966 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3967 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3968 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
3969 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3970 }
3971 
3972 /*
3973  * Retrieve the maximum RSSI (in dBm) among receivers.
3974  */
3975 static int
3976 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3977 {
3978 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3979 	uint8_t mask, agc;
3980 	int rssi;
3981 
3982 	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3983 	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
3984 
3985 	rssi = 0;
3986 #if 0
3987 	if (mask & IWN_ANT_A)	/* Ant A */
3988 		rssi = max(rssi, phy->rssi[0]);
3989 	if (mask & IWN_ATH_B)	/* Ant B */
3990 		rssi = max(rssi, phy->rssi[2]);
3991 	if (mask & IWN_ANT_C)	/* Ant C */
3992 		rssi = max(rssi, phy->rssi[4]);
3993 #else
3994 	rssi = max(rssi, phy->rssi[0]);
3995 	rssi = max(rssi, phy->rssi[2]);
3996 	rssi = max(rssi, phy->rssi[4]);
3997 #endif
3998 
3999 	DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d "
4000 	    "result %d\n", __func__, agc, mask,
4001 	    phy->rssi[0], phy->rssi[2], phy->rssi[4],
4002 	    rssi - agc - IWN_RSSI_TO_DBM);
4003 	return rssi - agc - IWN_RSSI_TO_DBM;
4004 }
4005 
4006 static int
4007 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4008 {
4009 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4010 	int rssi;
4011 	uint8_t agc;
4012 
4013 	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4014 
4015 	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4016 		   le16toh(phy->rssi[1]) & 0xff);
4017 	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4018 
4019 	DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d "
4020 	    "result %d\n", __func__, agc,
4021 	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
4022 	    rssi - agc - IWN_RSSI_TO_DBM);
4023 	return rssi - agc - IWN_RSSI_TO_DBM;
4024 }
4025 
4026 /*
4027  * Retrieve the average noise (in dBm) among receivers.
4028  */
4029 static int
4030 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4031 {
4032 	int i, total, nbant, noise;
4033 
4034 	total = nbant = 0;
4035 	for (i = 0; i < 3; i++) {
4036 		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4037 			continue;
4038 		total += noise;
4039 		nbant++;
4040 	}
4041 	/* There should be at least one antenna but check anyway. */
4042 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4043 }
4044 
4045 /*
4046  * Compute temperature (in degC) from last received statistics.
4047  */
4048 static int
4049 iwn4965_get_temperature(struct iwn_softc *sc)
4050 {
4051 	struct iwn_ucode_info *uc = &sc->ucode_info;
4052 	int32_t r1, r2, r3, r4, temp;
4053 
4054 	r1 = le32toh(uc->temp[0].chan20MHz);
4055 	r2 = le32toh(uc->temp[1].chan20MHz);
4056 	r3 = le32toh(uc->temp[2].chan20MHz);
4057 	r4 = le32toh(sc->rawtemp);
4058 
4059 	if (r1 == r3)	/* Prevents division by 0 (should not happen.) */
4060 		return 0;
4061 
4062 	/* Sign-extend 23-bit R4 value to 32-bit. */
4063 	r4 = (r4 << 8) >> 8;
4064 	/* Compute temperature in Kelvin. */
4065 	temp = (259 * (r4 - r2)) / (r3 - r1);
4066 	temp = (temp * 97) / 100 + 8;
4067 
4068 	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4069 	    IWN_KTOC(temp));
4070 	return IWN_KTOC(temp);
4071 }
4072 
4073 static int
4074 iwn5000_get_temperature(struct iwn_softc *sc)
4075 {
4076 	int32_t temp;
4077 
4078 	/*
4079 	 * Temperature is not used by the driver for 5000 Series because
4080 	 * TX power calibration is handled by firmware.  We export it to
4081 	 * users through the sensor framework though.
4082 	 */
4083 	temp = le32toh(sc->rawtemp);
4084 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4085 		temp = (temp / -5) + sc->temp_off;
4086 		temp = IWN_KTOC(temp);
4087 	}
4088 	return temp;
4089 }
4090 
4091 /*
4092  * Initialize sensitivity calibration state machine.
4093  */
4094 static int
4095 iwn_init_sensitivity(struct iwn_softc *sc)
4096 {
4097 	const struct iwn_hal *hal = sc->sc_hal;
4098 	struct iwn_calib_state *calib = &sc->calib;
4099 	uint32_t flags;
4100 	int error;
4101 
4102 	/* Reset calibration state machine. */
4103 	memset(calib, 0, sizeof (*calib));
4104 	calib->state = IWN_CALIB_STATE_INIT;
4105 	calib->cck_state = IWN_CCK_STATE_HIFA;
4106 	/* Set initial correlation values. */
4107 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4108 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4109 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4110 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4111 	calib->cck_x4      = 125;
4112 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4113 	calib->energy_cck  = sc->limits->energy_cck;
4114 
4115 	/* Write initial sensitivity. */
4116 	error = iwn_send_sensitivity(sc);
4117 	if (error != 0)
4118 		return error;
4119 
4120 	/* Write initial gains. */
4121 	error = hal->init_gains(sc);
4122 	if (error != 0)
4123 		return error;
4124 
4125 	/* Request statistics at each beacon interval. */
4126 	flags = 0;
4127 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__);
4128 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4129 }
4130 
4131 /*
4132  * Collect noise and RSSI statistics for the first 20 beacons received
4133  * after association and use them to determine connected antennas and
4134  * to set differential gains.
4135  */
4136 static void
4137 iwn_collect_noise(struct iwn_softc *sc,
4138     const struct iwn_rx_general_stats *stats)
4139 {
4140 	const struct iwn_hal *hal = sc->sc_hal;
4141 	struct iwn_calib_state *calib = &sc->calib;
4142 	uint32_t val;
4143 	int i;
4144 
4145 	/* Accumulate RSSI and noise for all 3 antennas. */
4146 	for (i = 0; i < 3; i++) {
4147 		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4148 		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4149 	}
4150 	/* NB: We update differential gains only once after 20 beacons. */
4151 	if (++calib->nbeacons < 20)
4152 		return;
4153 
4154 	/* Determine highest average RSSI. */
4155 	val = MAX(calib->rssi[0], calib->rssi[1]);
4156 	val = MAX(calib->rssi[2], val);
4157 
4158 	/* Determine which antennas are connected. */
4159 	sc->chainmask = sc->rxchainmask;
4160 	for (i = 0; i < 3; i++)
4161 		if (val - calib->rssi[i] > 15 * 20)
4162 			sc->chainmask &= ~(1 << i);
4163 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4164 	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4165 	    __func__, sc->rxchainmask, sc->chainmask);
4166 
4167 	/* If none of the TX antennas are connected, keep at least one. */
4168 	if ((sc->chainmask & sc->txchainmask) == 0)
4169 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4170 
4171 	(void)hal->set_gains(sc);
4172 	calib->state = IWN_CALIB_STATE_RUN;
4173 
4174 #ifdef notyet
4175 	/* XXX Disable RX chains with no antennas connected. */
4176 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4177 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4178 #endif
4179 
4180 #if 0
4181 	/* XXX: not yet */
4182 	/* Enable power-saving mode if requested by user. */
4183 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4184 		(void)iwn_set_pslevel(sc, 0, 3, 1);
4185 #endif
4186 }
4187 
4188 static int
4189 iwn4965_init_gains(struct iwn_softc *sc)
4190 {
4191 	struct iwn_phy_calib_gain cmd;
4192 
4193 	memset(&cmd, 0, sizeof cmd);
4194 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4195 	/* Differential gains initially set to 0 for all 3 antennas. */
4196 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4197 	    "%s: setting initial differential gains\n", __func__);
4198 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4199 }
4200 
4201 static int
4202 iwn5000_init_gains(struct iwn_softc *sc)
4203 {
4204 	struct iwn_phy_calib cmd;
4205 
4206 	memset(&cmd, 0, sizeof cmd);
4207 	cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
4208 	cmd.ngroups = 1;
4209 	cmd.isvalid = 1;
4210 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4211 	    "%s: setting initial differential gains\n", __func__);
4212 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4213 }
4214 
4215 static int
4216 iwn4965_set_gains(struct iwn_softc *sc)
4217 {
4218 	struct iwn_calib_state *calib = &sc->calib;
4219 	struct iwn_phy_calib_gain cmd;
4220 	int i, delta, noise;
4221 
4222 	/* Get minimal noise among connected antennas. */
4223 	noise = INT_MAX;	/* NB: There's at least one antenna. */
4224 	for (i = 0; i < 3; i++)
4225 		if (sc->chainmask & (1 << i))
4226 			noise = MIN(calib->noise[i], noise);
4227 
4228 	memset(&cmd, 0, sizeof cmd);
4229 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4230 	/* Set differential gains for connected antennas. */
4231 	for (i = 0; i < 3; i++) {
4232 		if (sc->chainmask & (1 << i)) {
4233 			/* Compute attenuation (in unit of 1.5dB). */
4234 			delta = (noise - (int32_t)calib->noise[i]) / 30;
4235 			/* NB: delta <= 0 */
4236 			/* Limit to [-4.5dB,0]. */
4237 			cmd.gain[i] = MIN(abs(delta), 3);
4238 			if (delta < 0)
4239 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4240 		}
4241 	}
4242 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4243 	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4244 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4245 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4246 }
4247 
4248 static int
4249 iwn5000_set_gains(struct iwn_softc *sc)
4250 {
4251 	struct iwn_calib_state *calib = &sc->calib;
4252 	struct iwn_phy_calib_gain cmd;
4253 	int i, ant, delta, div;
4254 
4255 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4256 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4257 
4258 	memset(&cmd, 0, sizeof cmd);
4259 	cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
4260 	cmd.ngroups = 1;
4261 	cmd.isvalid = 1;
4262 	/* Get first available RX antenna as referential. */
4263 	ant = IWN_LSB(sc->rxchainmask);
4264 	/* Set differential gains for other antennas. */
4265 	for (i = ant + 1; i < 3; i++) {
4266 		if (sc->chainmask & (1 << i)) {
4267 			/* The delta is relative to antenna "ant". */
4268 			delta = ((int32_t)calib->noise[ant] -
4269 			    (int32_t)calib->noise[i]) / div;
4270 			/* Limit to [-4.5dB,+4.5dB]. */
4271 			cmd.gain[i - 1] = MIN(abs(delta), 3);
4272 			if (delta < 0)
4273 				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
4274 		}
4275 	}
4276 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4277 	    "setting differential gains Ant B/C: %x/%x (%x)\n",
4278 	    cmd.gain[0], cmd.gain[1], sc->chainmask);
4279 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4280 }
4281 
4282 /*
4283  * Tune RF RX sensitivity based on the number of false alarms detected
4284  * during the last beacon period.
4285  */
4286 static void
4287 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4288 {
4289 #define inc(val, inc, max)			\
4290 	if ((val) < (max)) {			\
4291 		if ((val) < (max) - (inc))	\
4292 			(val) += (inc);		\
4293 		else				\
4294 			(val) = (max);		\
4295 		needs_update = 1;		\
4296 	}
4297 #define dec(val, dec, min)			\
4298 	if ((val) > (min)) {			\
4299 		if ((val) > (min) + (dec))	\
4300 			(val) -= (dec);		\
4301 		else				\
4302 			(val) = (min);		\
4303 		needs_update = 1;		\
4304 	}
4305 
4306 	const struct iwn_sensitivity_limits *limits = sc->limits;
4307 	struct iwn_calib_state *calib = &sc->calib;
4308 	uint32_t val, rxena, fa;
4309 	uint32_t energy[3], energy_min;
4310 	uint8_t noise[3], noise_ref;
4311 	int i, needs_update = 0;
4312 
4313 	/* Check that we've been enabled long enough. */
4314 	rxena = le32toh(stats->general.load);
4315 	if (rxena == 0)
4316 		return;
4317 
4318 	/* Compute number of false alarms since last call for OFDM. */
4319 	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4320 	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4321 	fa *= 200 * 1024;	/* 200TU */
4322 
4323 	/* Save counters values for next call. */
4324 	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4325 	calib->fa_ofdm = le32toh(stats->ofdm.fa);
4326 
4327 	if (fa > 50 * rxena) {
4328 		/* High false alarm count, decrease sensitivity. */
4329 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4330 		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
4331 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4332 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4333 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4334 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4335 
4336 	} else if (fa < 5 * rxena) {
4337 		/* Low false alarm count, increase sensitivity. */
4338 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4339 		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
4340 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4341 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4342 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4343 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4344 	}
4345 
4346 	/* Compute maximum noise among 3 receivers. */
4347 	for (i = 0; i < 3; i++)
4348 		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4349 	val = MAX(noise[0], noise[1]);
4350 	val = MAX(noise[2], val);
4351 	/* Insert it into our samples table. */
4352 	calib->noise_samples[calib->cur_noise_sample] = val;
4353 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4354 
4355 	/* Compute maximum noise among last 20 samples. */
4356 	noise_ref = calib->noise_samples[0];
4357 	for (i = 1; i < 20; i++)
4358 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4359 
4360 	/* Compute maximum energy among 3 receivers. */
4361 	for (i = 0; i < 3; i++)
4362 		energy[i] = le32toh(stats->general.energy[i]);
4363 	val = MIN(energy[0], energy[1]);
4364 	val = MIN(energy[2], val);
4365 	/* Insert it into our samples table. */
4366 	calib->energy_samples[calib->cur_energy_sample] = val;
4367 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4368 
4369 	/* Compute minimum energy among last 10 samples. */
4370 	energy_min = calib->energy_samples[0];
4371 	for (i = 1; i < 10; i++)
4372 		energy_min = MAX(energy_min, calib->energy_samples[i]);
4373 	energy_min += 6;
4374 
4375 	/* Compute number of false alarms since last call for CCK. */
4376 	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4377 	fa += le32toh(stats->cck.fa) - calib->fa_cck;
4378 	fa *= 200 * 1024;	/* 200TU */
4379 
4380 	/* Save counters values for next call. */
4381 	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4382 	calib->fa_cck = le32toh(stats->cck.fa);
4383 
4384 	if (fa > 50 * rxena) {
4385 		/* High false alarm count, decrease sensitivity. */
4386 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4387 		    "%s: CCK high false alarm count: %u\n", __func__, fa);
4388 		calib->cck_state = IWN_CCK_STATE_HIFA;
4389 		calib->low_fa = 0;
4390 
4391 		if (calib->cck_x4 > 160) {
4392 			calib->noise_ref = noise_ref;
4393 			if (calib->energy_cck > 2)
4394 				dec(calib->energy_cck, 2, energy_min);
4395 		}
4396 		if (calib->cck_x4 < 160) {
4397 			calib->cck_x4 = 161;
4398 			needs_update = 1;
4399 		} else
4400 			inc(calib->cck_x4, 3, limits->max_cck_x4);
4401 
4402 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4403 
4404 	} else if (fa < 5 * rxena) {
4405 		/* Low false alarm count, increase sensitivity. */
4406 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4407 		    "%s: CCK low false alarm count: %u\n", __func__, fa);
4408 		calib->cck_state = IWN_CCK_STATE_LOFA;
4409 		calib->low_fa++;
4410 
4411 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4412 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4413 		    calib->low_fa > 100)) {
4414 			inc(calib->energy_cck, 2, limits->min_energy_cck);
4415 			dec(calib->cck_x4,     3, limits->min_cck_x4);
4416 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4417 		}
4418 	} else {
4419 		/* Not worth to increase or decrease sensitivity. */
4420 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4421 		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
4422 		calib->low_fa = 0;
4423 		calib->noise_ref = noise_ref;
4424 
4425 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4426 			/* Previous interval had many false alarms. */
4427 			dec(calib->energy_cck, 8, energy_min);
4428 		}
4429 		calib->cck_state = IWN_CCK_STATE_INIT;
4430 	}
4431 
4432 	if (needs_update)
4433 		(void)iwn_send_sensitivity(sc);
4434 #undef dec
4435 #undef inc
4436 }
4437 
4438 static int
4439 iwn_send_sensitivity(struct iwn_softc *sc)
4440 {
4441 	struct iwn_calib_state *calib = &sc->calib;
4442 	struct iwn_sensitivity_cmd cmd;
4443 
4444 	memset(&cmd, 0, sizeof cmd);
4445 	cmd.which = IWN_SENSITIVITY_WORKTBL;
4446 	/* OFDM modulation. */
4447 	cmd.corr_ofdm_x1     = htole16(calib->ofdm_x1);
4448 	cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4449 	cmd.corr_ofdm_x4     = htole16(calib->ofdm_x4);
4450 	cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4451 	cmd.energy_ofdm      = htole16(sc->limits->energy_ofdm);
4452 	cmd.energy_ofdm_th   = htole16(62);
4453 	/* CCK modulation. */
4454 	cmd.corr_cck_x4      = htole16(calib->cck_x4);
4455 	cmd.corr_cck_mrc_x4  = htole16(calib->cck_mrc_x4);
4456 	cmd.energy_cck       = htole16(calib->energy_cck);
4457 	/* Barker modulation: use default values. */
4458 	cmd.corr_barker      = htole16(190);
4459 	cmd.corr_barker_mrc  = htole16(390);
4460 
4461 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4462 	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4463 	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4464 	    calib->ofdm_mrc_x4, calib->cck_x4,
4465 	    calib->cck_mrc_x4, calib->energy_cck);
4466 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4467 }
4468 
4469 /*
4470  * Set STA mode power saving level (between 0 and 5).
4471  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4472  */
4473 static int
4474 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4475 {
4476 	const struct iwn_pmgt *pmgt;
4477 	struct iwn_pmgt_cmd cmd;
4478 	uint32_t max, skip_dtim;
4479 	uint32_t tmp;
4480 	int i;
4481 
4482 	/* Select which PS parameters to use. */
4483 	if (dtim <= 2)
4484 		pmgt = &iwn_pmgt[0][level];
4485 	else if (dtim <= 10)
4486 		pmgt = &iwn_pmgt[1][level];
4487 	else
4488 		pmgt = &iwn_pmgt[2][level];
4489 
4490 	memset(&cmd, 0, sizeof cmd);
4491 	if (level != 0)	/* not CAM */
4492 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4493 	if (level == 5)
4494 		cmd.flags |= htole16(IWN_PS_FAST_PD);
4495 	/* Retrieve PCIe Active State Power Management (ASPM). */
4496 	tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4497 	if (!(tmp & 0x1))	/* L0s Entry disabled. */
4498 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4499 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4500 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4501 
4502 	if (dtim == 0) {
4503 		dtim = 1;
4504 		skip_dtim = 0;
4505 	} else
4506 		skip_dtim = pmgt->skip_dtim;
4507 	if (skip_dtim != 0) {
4508 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4509 		max = pmgt->intval[4];
4510 		if (max == (uint32_t)-1)
4511 			max = dtim * (skip_dtim + 1);
4512 		else if (max > dtim)
4513 			max = (max / dtim) * dtim;
4514 	} else
4515 		max = dtim;
4516 	for (i = 0; i < 5; i++)
4517 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4518 
4519 	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4520 	    level);
4521 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4522 }
4523 
4524 static int
4525 iwn_config(struct iwn_softc *sc)
4526 {
4527 	const struct iwn_hal *hal = sc->sc_hal;
4528 	struct ifnet *ifp = sc->sc_ifp;
4529 	struct ieee80211com *ic = ifp->if_l2com;
4530 	struct iwn_bluetooth bluetooth;
4531 	uint32_t txmask;
4532 	int error;
4533 	uint16_t rxchain;
4534 
4535 	/* Configure valid TX chains for 5000 Series. */
4536 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4537 		txmask = htole32(sc->txchainmask);
4538 		DPRINTF(sc, IWN_DEBUG_RESET,
4539 		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
4540 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4541 		    sizeof txmask, 0);
4542 		if (error != 0) {
4543 			device_printf(sc->sc_dev,
4544 			    "%s: could not configure valid TX chains, "
4545 			    "error %d\n", __func__, error);
4546 			return error;
4547 		}
4548 	}
4549 
4550 	/* Configure bluetooth coexistence. */
4551 	memset(&bluetooth, 0, sizeof bluetooth);
4552 	bluetooth.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4553 	bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
4554 	bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
4555 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n",
4556 	    __func__);
4557 	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
4558 	if (error != 0) {
4559 		device_printf(sc->sc_dev,
4560 		    "%s: could not configure bluetooth coexistence, error %d\n",
4561 		    __func__, error);
4562 		return error;
4563 	}
4564 
4565 	/* Set mode, channel, RX filter and enable RX. */
4566 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4567 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
4568 	IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
4569 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
4570 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4571 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
4572 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4573 	switch (ic->ic_opmode) {
4574 	case IEEE80211_M_STA:
4575 		sc->rxon.mode = IWN_MODE_STA;
4576 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4577 		break;
4578 	case IEEE80211_M_MONITOR:
4579 		sc->rxon.mode = IWN_MODE_MONITOR;
4580 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4581 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4582 		break;
4583 	default:
4584 		/* Should not get there. */
4585 		break;
4586 	}
4587 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
4588 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
4589 	sc->rxon.ht_single_mask = 0xff;
4590 	sc->rxon.ht_dual_mask = 0xff;
4591 	sc->rxon.ht_triple_mask = 0xff;
4592 	rxchain =
4593 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4594 	    IWN_RXCHAIN_MIMO_COUNT(2) |
4595 	    IWN_RXCHAIN_IDLE_COUNT(2);
4596 	sc->rxon.rxchain = htole16(rxchain);
4597 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
4598 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0);
4599 	if (error != 0) {
4600 		device_printf(sc->sc_dev,
4601 		    "%s: RXON command failed\n", __func__);
4602 		return error;
4603 	}
4604 
4605 	error = iwn_add_broadcast_node(sc, 0);
4606 	if (error != 0) {
4607 		device_printf(sc->sc_dev,
4608 		    "%s: could not add broadcast node\n", __func__);
4609 		return error;
4610 	}
4611 
4612 	/* Configuration has changed, set TX power accordingly. */
4613 	error = hal->set_txpower(sc, ic->ic_curchan, 0);
4614 	if (error != 0) {
4615 		device_printf(sc->sc_dev,
4616 		    "%s: could not set TX power\n", __func__);
4617 		return error;
4618 	}
4619 
4620 	error = iwn_set_critical_temp(sc);
4621 	if (error != 0) {
4622 		device_printf(sc->sc_dev,
4623 		    "%s: ccould not set critical temperature\n", __func__);
4624 		return error;
4625 	}
4626 
4627 	/* Set power saving level to CAM during initialization. */
4628 	error = iwn_set_pslevel(sc, 0, 0, 0);
4629 	if (error != 0) {
4630 		device_printf(sc->sc_dev,
4631 		    "%s: could not set power saving level\n", __func__);
4632 		return error;
4633 	}
4634 	return 0;
4635 }
4636 
4637 static int
4638 iwn_scan(struct iwn_softc *sc)
4639 {
4640 	struct ifnet *ifp = sc->sc_ifp;
4641 	struct ieee80211com *ic = ifp->if_l2com;
4642 	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
4643 	struct iwn_scan_hdr *hdr;
4644 	struct iwn_cmd_data *tx;
4645 	struct iwn_scan_essid *essid;
4646 	struct iwn_scan_chan *chan;
4647 	struct ieee80211_frame *wh;
4648 	struct ieee80211_rateset *rs;
4649 	struct ieee80211_channel *c;
4650 	int buflen, error, nrates;
4651 	uint16_t rxchain;
4652 	uint8_t *buf, *frm, txant;
4653 
4654 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4655 	if (buf == NULL) {
4656 		device_printf(sc->sc_dev,
4657 		    "%s: could not allocate buffer for scan command\n",
4658 		    __func__);
4659 		return ENOMEM;
4660 	}
4661 	hdr = (struct iwn_scan_hdr *)buf;
4662 
4663 	/*
4664 	 * Move to the next channel if no frames are received within 10ms
4665 	 * after sending the probe request.
4666 	 */
4667 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
4668 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
4669 
4670 	/* Select antennas for scanning. */
4671 	rxchain =
4672 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4673 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4674 	    IWN_RXCHAIN_DRIVER_FORCE;
4675 	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
4676 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
4677 		/* Ant A must be avoided in 5GHz because of an HW bug. */
4678 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4679 	} else	/* Use all available RX antennas. */
4680 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4681 	hdr->rxchain = htole16(rxchain);
4682 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4683 
4684 	tx = (struct iwn_cmd_data *)(hdr + 1);
4685 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
4686 	tx->id = sc->sc_hal->broadcast_id;
4687 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4688 
4689 	if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
4690 		/* Send probe requests at 6Mbps. */
4691 		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4692 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4693 	} else {
4694 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4695 		/* Send probe requests at 1Mbps. */
4696 		tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4697 		tx->rflags = IWN_RFLAG_CCK;
4698 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4699 	}
4700 	/* Use the first valid TX antenna. */
4701 	txant = IWN_LSB(sc->txchainmask);
4702 	tx->rflags |= IWN_RFLAG_ANT(txant);
4703 
4704 	essid = (struct iwn_scan_essid *)(tx + 1);
4705 	if (ss->ss_ssid[0].len != 0) {
4706 		essid[0].id = IEEE80211_ELEMID_SSID;
4707 		essid[0].len = ss->ss_ssid[0].len;
4708 		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4709 	}
4710 
4711 	/*
4712 	 * Build a probe request frame.  Most of the following code is a
4713 	 * copy & paste of what is done in net80211.
4714 	 */
4715 	wh = (struct ieee80211_frame *)(essid + 20);
4716 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4717 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4718 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4719 	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
4720 	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
4721 	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
4722 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4723 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4724 
4725 	frm = (uint8_t *)(wh + 1);
4726 
4727 	/* Add SSID IE. */
4728 	*frm++ = IEEE80211_ELEMID_SSID;
4729 	*frm++ = ss->ss_ssid[0].len;
4730 	memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4731 	frm += ss->ss_ssid[0].len;
4732 
4733 	/* Add supported rates IE. */
4734 	*frm++ = IEEE80211_ELEMID_RATES;
4735 	nrates = rs->rs_nrates;
4736 	if (nrates > IEEE80211_RATE_SIZE)
4737 		nrates = IEEE80211_RATE_SIZE;
4738 	*frm++ = nrates;
4739 	memcpy(frm, rs->rs_rates, nrates);
4740 	frm += nrates;
4741 
4742 	/* Add supported xrates IE. */
4743 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4744 		nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
4745 		*frm++ = IEEE80211_ELEMID_XRATES;
4746 		*frm++ = (uint8_t)nrates;
4747 		memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
4748 		frm += nrates;
4749 	}
4750 
4751 	/* Set length of probe request. */
4752 	tx->len = htole16(frm - (uint8_t *)wh);
4753 
4754 	c = ic->ic_curchan;
4755 	chan = (struct iwn_scan_chan *)frm;
4756 	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4757 	chan->flags = 0;
4758 	if (ss->ss_nssid > 0)
4759 		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4760 	chan->dsp_gain = 0x6e;
4761 	if (IEEE80211_IS_CHAN_5GHZ(c) &&
4762 	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4763 		chan->rf_gain = 0x3b;
4764 		chan->active  = htole16(24);
4765 		chan->passive = htole16(110);
4766 		chan->flags |= htole32(IWN_CHAN_ACTIVE);
4767 	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4768 		chan->rf_gain = 0x3b;
4769 		chan->active  = htole16(24);
4770 		if (sc->rxon.associd)
4771 			chan->passive = htole16(78);
4772 		else
4773 			chan->passive = htole16(110);
4774 		hdr->crc_threshold = 0xffff;
4775 	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4776 		chan->rf_gain = 0x28;
4777 		chan->active  = htole16(36);
4778 		chan->passive = htole16(120);
4779 		chan->flags |= htole32(IWN_CHAN_ACTIVE);
4780 	} else {
4781 		chan->rf_gain = 0x28;
4782 		chan->active  = htole16(36);
4783 		if (sc->rxon.associd)
4784 			chan->passive = htole16(88);
4785 		else
4786 			chan->passive = htole16(120);
4787 		hdr->crc_threshold = 0xffff;
4788 	}
4789 
4790 	DPRINTF(sc, IWN_DEBUG_STATE,
4791 	    "%s: chan %u flags 0x%x rf_gain 0x%x "
4792 	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
4793 	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
4794 	    chan->active, chan->passive);
4795 
4796 	hdr->nchan++;
4797 	chan++;
4798 	buflen = (uint8_t *)chan - buf;
4799 	hdr->len = htole16(buflen);
4800 
4801 	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
4802 	    hdr->nchan);
4803 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4804 	free(buf, M_DEVBUF);
4805 	return error;
4806 }
4807 
4808 static int
4809 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
4810 {
4811 	const struct iwn_hal *hal = sc->sc_hal;
4812 	struct ifnet *ifp = sc->sc_ifp;
4813 	struct ieee80211com *ic = ifp->if_l2com;
4814 	struct ieee80211_node *ni = vap->iv_bss;
4815 	int error;
4816 
4817 	sc->calib.state = IWN_CALIB_STATE_INIT;
4818 
4819 	/* Update adapter configuration. */
4820 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4821 	sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan));
4822 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4823 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4824 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4825 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4826 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4827 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4828 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4829 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4830 		sc->rxon.cck_mask  = 0;
4831 		sc->rxon.ofdm_mask = 0x15;
4832 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4833 		sc->rxon.cck_mask  = 0x03;
4834 		sc->rxon.ofdm_mask = 0;
4835 	} else {
4836 		/* XXX assume 802.11b/g */
4837 		sc->rxon.cck_mask  = 0x0f;
4838 		sc->rxon.ofdm_mask = 0x15;
4839 	}
4840 	DPRINTF(sc, IWN_DEBUG_STATE,
4841 	    "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4842 	    "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4843 	    "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4844 	    __func__,
4845 	    le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4846 	    sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4847 	    sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4848 	    le16toh(sc->rxon.rxchain),
4849 	    sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4850 	    le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4851 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4852 	if (error != 0) {
4853 		device_printf(sc->sc_dev,
4854 		    "%s: RXON command failed, error %d\n", __func__, error);
4855 		return error;
4856 	}
4857 
4858 	/* Configuration has changed, set TX power accordingly. */
4859 	error = hal->set_txpower(sc, ni->ni_chan, 1);
4860 	if (error != 0) {
4861 		device_printf(sc->sc_dev,
4862 		    "%s: could not set Tx power, error %d\n", __func__, error);
4863 		return error;
4864 	}
4865 	/*
4866 	 * Reconfiguring RXON clears the firmware nodes table so we must
4867 	 * add the broadcast node again.
4868 	 */
4869 	error = iwn_add_broadcast_node(sc, 1);
4870 	if (error != 0) {
4871 		device_printf(sc->sc_dev,
4872 		    "%s: could not add broadcast node, error %d\n",
4873 		    __func__, error);
4874 		return error;
4875 	}
4876 	return 0;
4877 }
4878 
4879 /*
4880  * Configure the adapter for associated state.
4881  */
4882 static int
4883 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
4884 {
4885 #define	MS(v,x)	(((v) & x) >> x##_S)
4886 	const struct iwn_hal *hal = sc->sc_hal;
4887 	struct ifnet *ifp = sc->sc_ifp;
4888 	struct ieee80211com *ic = ifp->if_l2com;
4889 	struct ieee80211_node *ni = vap->iv_bss;
4890 	struct iwn_node_info node;
4891 	int error;
4892 
4893 	sc->calib.state = IWN_CALIB_STATE_INIT;
4894 
4895 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4896 		/* Link LED blinks while monitoring. */
4897 		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
4898 		return 0;
4899 	}
4900 	error = iwn_set_timing(sc, ni);
4901 	if (error != 0) {
4902 		device_printf(sc->sc_dev,
4903 		    "%s: could not set timing, error %d\n", __func__, error);
4904 		return error;
4905 	}
4906 
4907 	/* Update adapter configuration. */
4908 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4909 	sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan));
4910 	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4911 	/* Short preamble and slot time are negotiated when associating. */
4912 	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
4913 	sc->rxon.flags |= htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4914 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4915 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4916 	else
4917 		sc->rxon.flags &= ~htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4918 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4919 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4920 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4921 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4922 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4923 		sc->rxon.cck_mask  = 0;
4924 		sc->rxon.ofdm_mask = 0x15;
4925 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4926 		sc->rxon.cck_mask  = 0x03;
4927 		sc->rxon.ofdm_mask = 0;
4928 	} else {
4929 		/* XXX assume 802.11b/g */
4930 		sc->rxon.cck_mask  = 0x0f;
4931 		sc->rxon.ofdm_mask = 0x15;
4932 	}
4933 #if 0	/* HT */
4934 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
4935 		sc->rxon.flags &= ~htole32(IWN_RXON_HT);
4936 		if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
4937 			sc->rxon.flags |= htole32(IWN_RXON_HT40U);
4938 		else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
4939 			sc->rxon.flags |= htole32(IWN_RXON_HT40D);
4940 		else
4941 			sc->rxon.flags |= htole32(IWN_RXON_HT20);
4942 		sc->rxon.rxchain = htole16(
4943 			  IWN_RXCHAIN_VALID(3)
4944 			| IWN_RXCHAIN_MIMO_COUNT(3)
4945 			| IWN_RXCHAIN_IDLE_COUNT(1)
4946 			| IWN_RXCHAIN_MIMO_FORCE);
4947 
4948 		maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
4949 		ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
4950 	} else
4951 		maxrxampdu = ampdudensity = 0;
4952 #endif
4953 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4954 
4955 	DPRINTF(sc, IWN_DEBUG_STATE,
4956 	    "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4957 	    "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4958 	    "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4959 	    __func__,
4960 	    le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4961 	    sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4962 	    sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4963 	    le16toh(sc->rxon.rxchain),
4964 	    sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4965 	    le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4966 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4967 	if (error != 0) {
4968 		device_printf(sc->sc_dev,
4969 		    "%s: could not update configuration, error %d\n",
4970 		    __func__, error);
4971 		return error;
4972 	}
4973 
4974 	/* Configuration has changed, set TX power accordingly. */
4975 	error = hal->set_txpower(sc, ni->ni_chan, 1);
4976 	if (error != 0) {
4977 		device_printf(sc->sc_dev,
4978 		    "%s: could not set Tx power, error %d\n", __func__, error);
4979 		return error;
4980 	}
4981 
4982 	/* Add BSS node. */
4983 	memset(&node, 0, sizeof node);
4984 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4985 	node.id = IWN_ID_BSS;
4986 #ifdef notyet
4987 	node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4988 	    IWN_AMDPU_DENSITY(5));	/* 2us */
4989 #endif
4990 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n",
4991 	    __func__, node.id, le32toh(node.htflags));
4992 	error = hal->add_node(sc, &node, 1);
4993 	if (error != 0) {
4994 		device_printf(sc->sc_dev, "could not add BSS node\n");
4995 		return error;
4996 	}
4997 	DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n",
4998 	    node.id);
4999 	error = iwn_set_link_quality(sc, node.id, 1);
5000 	if (error != 0) {
5001 		device_printf(sc->sc_dev,
5002 		    "%s: could not setup MRR for node %d, error %d\n",
5003 		    __func__, node.id, error);
5004 		return error;
5005 	}
5006 
5007 	error = iwn_init_sensitivity(sc);
5008 	if (error != 0) {
5009 		device_printf(sc->sc_dev,
5010 		    "%s: could not set sensitivity, error %d\n",
5011 		    __func__, error);
5012 		return error;
5013 	}
5014 
5015 	/* Start periodic calibration timer. */
5016 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5017 	iwn_calib_reset(sc);
5018 
5019 	/* Link LED always on while associated. */
5020 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5021 
5022 	return 0;
5023 #undef MS
5024 }
5025 
5026 #if 0	/* HT */
5027 /*
5028  * This function is called by upper layer when an ADDBA request is received
5029  * from another STA and before the ADDBA response is sent.
5030  */
5031 static int
5032 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5033     uint8_t tid)
5034 {
5035 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
5036 	struct iwn_softc *sc = ic->ic_softc;
5037 	struct iwn_node *wn = (void *)ni;
5038 	struct iwn_node_info node;
5039 
5040 	memset(&node, 0, sizeof node);
5041 	node.id = wn->id;
5042 	node.control = IWN_NODE_UPDATE;
5043 	node.flags = IWN_FLAG_SET_ADDBA;
5044 	node.addba_tid = tid;
5045 	node.addba_ssn = htole16(ba->ba_winstart);
5046 	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5047 	    wn->id, tid, ba->ba_winstart));
5048 	return sc->sc_hal->add_node(sc, &node, 1);
5049 }
5050 
5051 /*
5052  * This function is called by upper layer on teardown of an HT-immediate
5053  * Block Ack agreement (eg. uppon receipt of a DELBA frame.)
5054  */
5055 static void
5056 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5057     uint8_t tid)
5058 {
5059 	struct iwn_softc *sc = ic->ic_softc;
5060 	struct iwn_node *wn = (void *)ni;
5061 	struct iwn_node_info node;
5062 
5063 	memset(&node, 0, sizeof node);
5064 	node.id = wn->id;
5065 	node.control = IWN_NODE_UPDATE;
5066 	node.flags = IWN_FLAG_SET_DELBA;
5067 	node.delba_tid = tid;
5068 	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5069 	(void)sc->sc_hal->add_node(sc, &node, 1);
5070 }
5071 
5072 /*
5073  * This function is called by upper layer when an ADDBA response is received
5074  * from another STA.
5075  */
5076 static int
5077 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5078     uint8_t tid)
5079 {
5080 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5081 	struct iwn_softc *sc = ic->ic_softc;
5082 	const struct iwn_hal *hal = sc->sc_hal;
5083 	struct iwn_node *wn = (void *)ni;
5084 	struct iwn_node_info node;
5085 	int error;
5086 
5087 	/* Enable TX for the specified RA/TID. */
5088 	wn->disable_tid &= ~(1 << tid);
5089 	memset(&node, 0, sizeof node);
5090 	node.id = wn->id;
5091 	node.control = IWN_NODE_UPDATE;
5092 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5093 	node.disable_tid = htole16(wn->disable_tid);
5094 	error = hal->add_node(sc, &node, 1);
5095 	if (error != 0)
5096 		return error;
5097 
5098 	if ((error = iwn_nic_lock(sc)) != 0)
5099 		return error;
5100 	hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5101 	iwn_nic_unlock(sc);
5102 	return 0;
5103 }
5104 
5105 static void
5106 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5107     uint8_t tid)
5108 {
5109 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5110 	struct iwn_softc *sc = ic->ic_softc;
5111 	int error;
5112 
5113 	error = iwn_nic_lock(sc);
5114 	if (error != 0)
5115 		return;
5116 	sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5117 	iwn_nic_unlock(sc);
5118 }
5119 
5120 static void
5121 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5122     uint8_t tid, uint16_t ssn)
5123 {
5124 	struct iwn_node *wn = (void *)ni;
5125 	int qid = 7 + tid;
5126 
5127 	/* Stop TX scheduler while we're changing its configuration. */
5128 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5129 	    IWN4965_TXQ_STATUS_CHGACT);
5130 
5131 	/* Assign RA/TID translation to the queue. */
5132 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5133 	    wn->id << 4 | tid);
5134 
5135 	/* Enable chain-building mode for the queue. */
5136 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5137 
5138 	/* Set starting sequence number from the ADDBA request. */
5139 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5140 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5141 
5142 	/* Set scheduler window size. */
5143 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5144 	    IWN_SCHED_WINSZ);
5145 	/* Set scheduler frame limit. */
5146 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5147 	    IWN_SCHED_LIMIT << 16);
5148 
5149 	/* Enable interrupts for the queue. */
5150 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5151 
5152 	/* Mark the queue as active. */
5153 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5154 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5155 	    iwn_tid2fifo[tid] << 1);
5156 }
5157 
5158 static void
5159 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5160 {
5161 	int qid = 7 + tid;
5162 
5163 	/* Stop TX scheduler while we're changing its configuration. */
5164 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5165 	    IWN4965_TXQ_STATUS_CHGACT);
5166 
5167 	/* Set starting sequence number from the ADDBA request. */
5168 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5169 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5170 
5171 	/* Disable interrupts for the queue. */
5172 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5173 
5174 	/* Mark the queue as inactive. */
5175 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5176 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5177 }
5178 
5179 static void
5180 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5181     uint8_t tid, uint16_t ssn)
5182 {
5183 	struct iwn_node *wn = (void *)ni;
5184 	int qid = 10 + tid;
5185 
5186 	/* Stop TX scheduler while we're changing its configuration. */
5187 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5188 	    IWN5000_TXQ_STATUS_CHGACT);
5189 
5190 	/* Assign RA/TID translation to the queue. */
5191 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5192 	    wn->id << 4 | tid);
5193 
5194 	/* Enable chain-building mode for the queue. */
5195 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5196 
5197 	/* Enable aggregation for the queue. */
5198 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5199 
5200 	/* Set starting sequence number from the ADDBA request. */
5201 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5202 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5203 
5204 	/* Set scheduler window size and frame limit. */
5205 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5206 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5207 
5208 	/* Enable interrupts for the queue. */
5209 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5210 
5211 	/* Mark the queue as active. */
5212 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5213 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5214 }
5215 
5216 static void
5217 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5218 {
5219 	int qid = 10 + tid;
5220 
5221 	/* Stop TX scheduler while we're changing its configuration. */
5222 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5223 	    IWN5000_TXQ_STATUS_CHGACT);
5224 
5225 	/* Disable aggregation for the queue. */
5226 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5227 
5228 	/* Set starting sequence number from the ADDBA request. */
5229 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5230 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5231 
5232 	/* Disable interrupts for the queue. */
5233 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5234 
5235 	/* Mark the queue as inactive. */
5236 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5237 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5238 }
5239 #endif
5240 
5241 /*
5242  * Query calibration tables from the initialization firmware.  We do this
5243  * only once at first boot.  Called from a process context.
5244  */
5245 static int
5246 iwn5000_query_calibration(struct iwn_softc *sc)
5247 {
5248 	struct iwn5000_calib_config cmd;
5249 	int error;
5250 
5251 	memset(&cmd, 0, sizeof cmd);
5252 	cmd.ucode.once.enable = 0xffffffff;
5253 	cmd.ucode.once.start  = 0xffffffff;
5254 	cmd.ucode.once.send   = 0xffffffff;
5255 	cmd.ucode.flags       = 0xffffffff;
5256 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
5257 	    __func__);
5258 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5259 	if (error != 0)
5260 		return error;
5261 
5262 	/* Wait at most two seconds for calibration to complete. */
5263 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5264 		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz);
5265 	return error;
5266 }
5267 
5268 /*
5269  * Send calibration results to the runtime firmware.  These results were
5270  * obtained on first boot from the initialization firmware.
5271  */
5272 static int
5273 iwn5000_send_calibration(struct iwn_softc *sc)
5274 {
5275 	int idx, error;
5276 
5277 	for (idx = 0; idx < 5; idx++) {
5278 		if (sc->calibcmd[idx].buf == NULL)
5279 			continue;	/* No results available. */
5280 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5281 		    "send calibration result idx=%d len=%d\n",
5282 		    idx, sc->calibcmd[idx].len);
5283 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5284 		    sc->calibcmd[idx].len, 0);
5285 		if (error != 0) {
5286 			device_printf(sc->sc_dev,
5287 			    "%s: could not send calibration result, error %d\n",
5288 			    __func__, error);
5289 			return error;
5290 		}
5291 	}
5292 	return 0;
5293 }
5294 
5295 static int
5296 iwn5000_send_wimax_coex(struct iwn_softc *sc)
5297 {
5298 	struct iwn5000_wimax_coex wimax;
5299 
5300 #ifdef notyet
5301 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5302 		/* Enable WiMAX coexistence for combo adapters. */
5303 		wimax.flags =
5304 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5305 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5306 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
5307 		    IWN_WIMAX_COEX_ENABLE;
5308 		memcpy(wimax.events, iwn6050_wimax_events,
5309 		    sizeof iwn6050_wimax_events);
5310 	} else
5311 #endif
5312 	{
5313 		/* Disable WiMAX coexistence. */
5314 		wimax.flags = 0;
5315 		memset(wimax.events, 0, sizeof wimax.events);
5316 	}
5317 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5318 	    __func__);
5319 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5320 }
5321 
5322 /*
5323  * This function is called after the runtime firmware notifies us of its
5324  * readiness (called in a process context.)
5325  */
5326 static int
5327 iwn4965_post_alive(struct iwn_softc *sc)
5328 {
5329 	int error, qid;
5330 
5331 	if ((error = iwn_nic_lock(sc)) != 0)
5332 		return error;
5333 
5334 	/* Clear TX scheduler state in SRAM. */
5335 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5336 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5337 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5338 
5339 	/* Set physical address of TX scheduler rings (1KB aligned.) */
5340 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5341 
5342 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5343 
5344 	/* Disable chain mode for all our 16 queues. */
5345 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5346 
5347 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5348 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5349 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5350 
5351 		/* Set scheduler window size. */
5352 		iwn_mem_write(sc, sc->sched_base +
5353 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5354 		/* Set scheduler frame limit. */
5355 		iwn_mem_write(sc, sc->sched_base +
5356 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5357 		    IWN_SCHED_LIMIT << 16);
5358 	}
5359 
5360 	/* Enable interrupts for all our 16 queues. */
5361 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5362 	/* Identify TX FIFO rings (0-7). */
5363 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5364 
5365 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5366 	for (qid = 0; qid < 7; qid++) {
5367 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5368 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5369 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5370 	}
5371 	iwn_nic_unlock(sc);
5372 	return 0;
5373 }
5374 
5375 /*
5376  * This function is called after the initialization or runtime firmware
5377  * notifies us of its readiness (called in a process context.)
5378  */
5379 static int
5380 iwn5000_post_alive(struct iwn_softc *sc)
5381 {
5382 	int error, qid;
5383 
5384 	/* Switch to using ICT interrupt mode. */
5385 	iwn5000_ict_reset(sc);
5386 
5387 	error = iwn_nic_lock(sc);
5388 	if (error != 0)
5389 		return error;
5390 
5391 	/* Clear TX scheduler state in SRAM. */
5392 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5393 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5394 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5395 
5396 	/* Set physical address of TX scheduler rings (1KB aligned.) */
5397 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5398 
5399 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5400 
5401 	/* Enable chain mode for all queues, except command queue. */
5402 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5403 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5404 
5405 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5406 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5407 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5408 
5409 		iwn_mem_write(sc, sc->sched_base +
5410 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5411 		/* Set scheduler window size and frame limit. */
5412 		iwn_mem_write(sc, sc->sched_base +
5413 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5414 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5415 	}
5416 
5417 	/* Enable interrupts for all our 20 queues. */
5418 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5419 	/* Identify TX FIFO rings (0-7). */
5420 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5421 
5422 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5423 	for (qid = 0; qid < 7; qid++) {
5424 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5425 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5426 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5427 	}
5428 	iwn_nic_unlock(sc);
5429 
5430 	/* Configure WiMAX coexistence for combo adapters. */
5431 	error = iwn5000_send_wimax_coex(sc);
5432 	if (error != 0) {
5433 		device_printf(sc->sc_dev,
5434 		    "%s: could not configure WiMAX coexistence, error %d\n",
5435 		    __func__, error);
5436 		return error;
5437 	}
5438 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
5439 		struct iwn5000_phy_calib_crystal cmd;
5440 
5441 		/* Perform crystal calibration. */
5442 		memset(&cmd, 0, sizeof cmd);
5443 		cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5444 		cmd.ngroups = 1;
5445 		cmd.isvalid = 1;
5446 		cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5447 		cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5448 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5449 		    "sending crystal calibration %d, %d\n",
5450 		    cmd.cap_pin[0], cmd.cap_pin[1]);
5451 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5452 		if (error != 0) {
5453 			device_printf(sc->sc_dev,
5454 			    "%s: crystal calibration failed, error %d\n",
5455 			    __func__, error);
5456 			return error;
5457 		}
5458 	}
5459 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5460 		/* Query calibration from the initialization firmware. */
5461 		error = iwn5000_query_calibration(sc);
5462 		if (error != 0) {
5463 			device_printf(sc->sc_dev,
5464 			    "%s: could not query calibration, error %d\n",
5465 			    __func__, error);
5466 			return error;
5467 		}
5468 		/*
5469 		 * We have the calibration results now, reboot with the
5470 		 * runtime firmware (call ourselves recursively!)
5471 		 */
5472 		iwn_hw_stop(sc);
5473 		error = iwn_hw_init(sc);
5474 	} else {
5475 		/* Send calibration results to runtime firmware. */
5476 		error = iwn5000_send_calibration(sc);
5477 	}
5478 	return error;
5479 }
5480 
5481 /*
5482  * The firmware boot code is small and is intended to be copied directly into
5483  * the NIC internal memory (no DMA transfer.)
5484  */
5485 static int
5486 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5487 {
5488 	int error, ntries;
5489 
5490 	size /= sizeof (uint32_t);
5491 
5492 	error = iwn_nic_lock(sc);
5493 	if (error != 0)
5494 		return error;
5495 
5496 	/* Copy microcode image into NIC memory. */
5497 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5498 	    (const uint32_t *)ucode, size);
5499 
5500 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5501 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5502 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5503 
5504 	/* Start boot load now. */
5505 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5506 
5507 	/* Wait for transfer to complete. */
5508 	for (ntries = 0; ntries < 1000; ntries++) {
5509 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5510 		    IWN_BSM_WR_CTRL_START))
5511 			break;
5512 		DELAY(10);
5513 	}
5514 	if (ntries == 1000) {
5515 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5516 		    __func__);
5517 		iwn_nic_unlock(sc);
5518 		return ETIMEDOUT;
5519 	}
5520 
5521 	/* Enable boot after power up. */
5522 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5523 
5524 	iwn_nic_unlock(sc);
5525 	return 0;
5526 }
5527 
5528 static int
5529 iwn4965_load_firmware(struct iwn_softc *sc)
5530 {
5531 	struct iwn_fw_info *fw = &sc->fw;
5532 	struct iwn_dma_info *dma = &sc->fw_dma;
5533 	int error;
5534 
5535 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
5536 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5537 	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5538 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5539 	    fw->init.text, fw->init.textsz);
5540 	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5541 
5542 	/* Tell adapter where to find initialization sections. */
5543 	error = iwn_nic_lock(sc);
5544 	if (error != 0)
5545 		return error;
5546 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5547 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5548 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5549 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5550 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5551 	iwn_nic_unlock(sc);
5552 
5553 	/* Load firmware boot code. */
5554 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5555 	if (error != 0) {
5556 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5557 		    __func__);
5558 		return error;
5559 	}
5560 	/* Now press "execute". */
5561 	IWN_WRITE(sc, IWN_RESET, 0);
5562 
5563 	/* Wait at most one second for first alive notification. */
5564 	error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5565 	if (error) {
5566 		device_printf(sc->sc_dev,
5567 		    "%s: timeout waiting for adapter to initialize, error %d\n",
5568 		    __func__, error);
5569 		return error;
5570 	}
5571 
5572 	/* Retrieve current temperature for initial TX power calibration. */
5573 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5574 	sc->temp = iwn4965_get_temperature(sc);
5575 
5576 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
5577 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5578 	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5579 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5580 	    fw->main.text, fw->main.textsz);
5581 	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5582 
5583 	/* Tell adapter where to find runtime sections. */
5584 	error = iwn_nic_lock(sc);
5585 	if (error != 0)
5586 		return error;
5587 
5588 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5589 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5590 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5591 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5592 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5593 	    IWN_FW_UPDATED | fw->main.textsz);
5594 	iwn_nic_unlock(sc);
5595 
5596 	return 0;
5597 }
5598 
5599 static int
5600 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5601     const uint8_t *section, int size)
5602 {
5603 	struct iwn_dma_info *dma = &sc->fw_dma;
5604 	int error;
5605 
5606 	/* Copy firmware section into pre-allocated DMA-safe memory. */
5607 	memcpy(dma->vaddr, section, size);
5608 	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5609 
5610 	error = iwn_nic_lock(sc);
5611 	if (error != 0)
5612 		return error;
5613 
5614 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5615 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
5616 
5617 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5618 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5619 	    IWN_LOADDR(dma->paddr));
5620 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5621 	    IWN_HIADDR(dma->paddr) << 28 | size);
5622 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5623 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
5624 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
5625 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
5626 
5627 	/* Kick Flow Handler to start DMA transfer. */
5628 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5629 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5630 
5631 	iwn_nic_unlock(sc);
5632 
5633 	/* Wait at most five seconds for FH DMA transfer to complete. */
5634 	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5635 }
5636 
5637 static int
5638 iwn5000_load_firmware(struct iwn_softc *sc)
5639 {
5640 	struct iwn_fw_part *fw;
5641 	int error;
5642 
5643 	/* Load the initialization firmware on first boot only. */
5644 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5645 	    &sc->fw.main : &sc->fw.init;
5646 
5647 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5648 	    fw->text, fw->textsz);
5649 	if (error != 0) {
5650 		device_printf(sc->sc_dev,
5651 		    "%s: could not load firmware %s section, error %d\n",
5652 		    __func__, ".text", error);
5653 		return error;
5654 	}
5655 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5656 	    fw->data, fw->datasz);
5657 	if (error != 0) {
5658 		device_printf(sc->sc_dev,
5659 		    "%s: could not load firmware %s section, error %d\n",
5660 		    __func__, ".data", error);
5661 		return error;
5662 	}
5663 
5664 	/* Now press "execute". */
5665 	IWN_WRITE(sc, IWN_RESET, 0);
5666 	return 0;
5667 }
5668 
5669 /*
5670  * Extract text and data sections from a legacy firmware image.
5671  */
5672 static int
5673 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5674 {
5675 	const uint32_t *ptr;
5676 	size_t hdrlen = 24;
5677 	uint32_t rev;
5678 
5679 	ptr = (const uint32_t *)sc->fw_fp->data;
5680 	rev = le32toh(*ptr++);
5681 
5682 	/* Check firmware API version. */
5683 	if (IWN_FW_API(rev) <= 1) {
5684 		device_printf(sc->sc_dev,
5685 		    "%s: bad firmware, need API version >=2\n", __func__);
5686 		return EINVAL;
5687 	}
5688 	if (IWN_FW_API(rev) >= 3) {
5689 		/* Skip build number (version 2 header). */
5690 		hdrlen += 4;
5691 		ptr++;
5692 	}
5693 	if (fw->size < hdrlen) {
5694 		device_printf(sc->sc_dev,
5695 		    "%s: firmware file too short: %zu bytes\n",
5696 		    __func__, fw->size);
5697 		return EINVAL;
5698 	}
5699 	fw->main.textsz = le32toh(*ptr++);
5700 	fw->main.datasz = le32toh(*ptr++);
5701 	fw->init.textsz = le32toh(*ptr++);
5702 	fw->init.datasz = le32toh(*ptr++);
5703 	fw->boot.textsz = le32toh(*ptr++);
5704 
5705 	/* Check that all firmware sections fit. */
5706 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5707 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5708 		device_printf(sc->sc_dev,
5709 		    "%s: firmware file too short: %zu bytes\n",
5710 		    __func__, fw->size);
5711 		return EINVAL;
5712 	}
5713 
5714 	/* Get pointers to firmware sections. */
5715 	fw->main.text = (const uint8_t *)ptr;
5716 	fw->main.data = fw->main.text + fw->main.textsz;
5717 	fw->init.text = fw->main.data + fw->main.datasz;
5718 	fw->init.data = fw->init.text + fw->init.textsz;
5719 	fw->boot.text = fw->init.data + fw->init.datasz;
5720 
5721 	return 0;
5722 }
5723 
5724 /*
5725  * Extract text and data sections from a TLV firmware image.
5726  */
5727 int
5728 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5729     uint16_t alt)
5730 {
5731 	const struct iwn_fw_tlv_hdr *hdr;
5732 	const struct iwn_fw_tlv *tlv;
5733 	const uint8_t *ptr, *end;
5734 	uint64_t altmask;
5735 	uint32_t len;
5736 
5737 	if (fw->size < sizeof (*hdr)) {
5738 		device_printf(sc->sc_dev,
5739 		    "%s: firmware file too short: %zu bytes\n",
5740 		    __func__, fw->size);
5741 		return EINVAL;
5742 	}
5743 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5744 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5745 		device_printf(sc->sc_dev,
5746 		    "%s: bad firmware file signature 0x%08x\n",
5747 		    __func__, le32toh(hdr->signature));
5748 		return EINVAL;
5749 	}
5750 
5751 	/*
5752 	 * Select the closest supported alternative that is less than
5753 	 * or equal to the specified one.
5754 	 */
5755 	altmask = le64toh(hdr->altmask);
5756 	while (alt > 0 && !(altmask & (1ULL << alt)))
5757 		alt--;	/* Downgrade. */
5758 
5759 	ptr = (const uint8_t *)(hdr + 1);
5760 	end = (const uint8_t *)(fw->data + fw->size);
5761 
5762 	/* Parse type-length-value fields. */
5763 	while (ptr + sizeof (*tlv) <= end) {
5764 		tlv = (const struct iwn_fw_tlv *)ptr;
5765 		len = le32toh(tlv->len);
5766 
5767 		ptr += sizeof (*tlv);
5768 		if (ptr + len > end) {
5769 			device_printf(sc->sc_dev,
5770 			    "%s: firmware file too short: %zu bytes\n",
5771 			    __func__, fw->size);
5772 			return EINVAL;
5773 		}
5774 		/* Skip other alternatives. */
5775 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
5776 			goto next;
5777 
5778 		switch (le16toh(tlv->type)) {
5779 		case IWN_FW_TLV_MAIN_TEXT:
5780 			fw->main.text = ptr;
5781 			fw->main.textsz = len;
5782 			break;
5783 		case IWN_FW_TLV_MAIN_DATA:
5784 			fw->main.data = ptr;
5785 			fw->main.datasz = len;
5786 			break;
5787 		case IWN_FW_TLV_INIT_TEXT:
5788 			fw->init.text = ptr;
5789 			fw->init.textsz = len;
5790 			break;
5791 		case IWN_FW_TLV_INIT_DATA:
5792 			fw->init.data = ptr;
5793 			fw->init.datasz = len;
5794 			break;
5795 		case IWN_FW_TLV_BOOT_TEXT:
5796 			fw->boot.text = ptr;
5797 			fw->boot.textsz = len;
5798 			break;
5799 		default:
5800 			DPRINTF(sc, IWN_DEBUG_RESET,
5801 			    "%s: TLV type %d not handled\n",
5802 			    __func__, le16toh(tlv->type));
5803 			break;
5804 		}
5805 next:		/* TLV fields are 32-bit aligned. */
5806 		ptr += (len + 3) & ~3;
5807 	}
5808 	return 0;
5809 }
5810 
5811 static int
5812 iwn_read_firmware(struct iwn_softc *sc)
5813 {
5814 	const struct iwn_hal *hal = sc->sc_hal;
5815 	struct iwn_fw_info *fw = &sc->fw;
5816 	int error;
5817 
5818 	IWN_UNLOCK(sc);
5819 
5820 	memset(fw, 0, sizeof (*fw));
5821 
5822 	/* Read firmware image from filesystem. */
5823 	sc->fw_fp = firmware_get(sc->fwname);
5824 	if (sc->fw_fp == NULL) {
5825 		device_printf(sc->sc_dev,
5826 		    "%s: could not load firmare image \"%s\"\n", __func__,
5827 		    sc->fwname);
5828 		IWN_LOCK(sc);
5829 		return EINVAL;
5830 	}
5831 	IWN_LOCK(sc);
5832 
5833 	fw->size = sc->fw_fp->datasize;
5834 	fw->data = (const uint8_t *)sc->fw_fp->data;
5835 	if (fw->size < sizeof (uint32_t)) {
5836 		device_printf(sc->sc_dev,
5837 		    "%s: firmware file too short: %zu bytes\n",
5838 		    __func__, fw->size);
5839 		return EINVAL;
5840 	}
5841 
5842 	/* Retrieve text and data sections. */
5843 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
5844 		error = iwn_read_firmware_leg(sc, fw);
5845 	else
5846 		error = iwn_read_firmware_tlv(sc, fw, 1);
5847 	if (error != 0) {
5848 		device_printf(sc->sc_dev,
5849 		    "%s: could not read firmware sections\n", __func__);
5850 		return error;
5851 	}
5852 
5853 	/* Make sure text and data sections fit in hardware memory. */
5854 	if (fw->main.textsz > hal->fw_text_maxsz ||
5855 	    fw->main.datasz > hal->fw_data_maxsz ||
5856 	    fw->init.textsz > hal->fw_text_maxsz ||
5857 	    fw->init.datasz > hal->fw_data_maxsz ||
5858 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5859 	    (fw->boot.textsz & 3) != 0) {
5860 		device_printf(sc->sc_dev,
5861 		    "%s: firmware sections too large\n", __func__);
5862 		return EINVAL;
5863 	}
5864 
5865 	/* We can proceed with loading the firmware. */
5866 	return 0;
5867 }
5868 
5869 static int
5870 iwn_clock_wait(struct iwn_softc *sc)
5871 {
5872 	int ntries;
5873 
5874 	/* Set "initialization complete" bit. */
5875 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5876 
5877 	/* Wait for clock stabilization. */
5878 	for (ntries = 0; ntries < 2500; ntries++) {
5879 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
5880 			return 0;
5881 		DELAY(10);
5882 	}
5883 	device_printf(sc->sc_dev,
5884 	    "%s: timeout waiting for clock stabilization\n", __func__);
5885 	return ETIMEDOUT;
5886 }
5887 
5888 static int
5889 iwn_apm_init(struct iwn_softc *sc)
5890 {
5891 	uint32_t tmp;
5892 	int error;
5893 
5894 	/* Disable L0s exit timer (NMI bug workaround.) */
5895 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
5896 	/* Don't wait for ICH L0s (ICH bug workaround.) */
5897 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
5898 
5899 	/* Set FH wait threshold to max (HW bug under stress workaround.) */
5900 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
5901 
5902 	/* Enable HAP INTA to move adapter from L1a to L0s. */
5903 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
5904 
5905 	/* Retrieve PCIe Active State Power Management (ASPM). */
5906 	tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
5907 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5908 	if (tmp & 0x02)	/* L1 Entry enabled. */
5909 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5910 	else
5911 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5912 
5913 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
5914 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
5915 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
5916 
5917 	/* Wait for clock stabilization before accessing prph. */
5918 	error = iwn_clock_wait(sc);
5919 	if (error != 0)
5920 		return error;
5921 
5922 	error = iwn_nic_lock(sc);
5923 	if (error != 0)
5924 		return error;
5925 
5926 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
5927 		/* Enable DMA and BSM (Bootstrap State Machine.) */
5928 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
5929 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
5930 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
5931 	} else {
5932 		/* Enable DMA. */
5933 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
5934 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
5935 	}
5936 	DELAY(20);
5937 
5938 	/* Disable L1-Active. */
5939 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
5940 	iwn_nic_unlock(sc);
5941 
5942 	return 0;
5943 }
5944 
5945 static void
5946 iwn_apm_stop_master(struct iwn_softc *sc)
5947 {
5948 	int ntries;
5949 
5950 	/* Stop busmaster DMA activity. */
5951 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
5952 	for (ntries = 0; ntries < 100; ntries++) {
5953 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
5954 			return;
5955 		DELAY(10);
5956 	}
5957 	device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
5958 	    __func__);
5959 }
5960 
5961 static void
5962 iwn_apm_stop(struct iwn_softc *sc)
5963 {
5964 	iwn_apm_stop_master(sc);
5965 
5966 	/* Reset the entire device. */
5967 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
5968 	DELAY(10);
5969 	/* Clear "initialization complete" bit. */
5970 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5971 }
5972 
5973 static int
5974 iwn4965_nic_config(struct iwn_softc *sc)
5975 {
5976 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
5977 		/*
5978 		 * I don't believe this to be correct but this is what the
5979 		 * vendor driver is doing. Probably the bits should not be
5980 		 * shifted in IWN_RFCFG_*.
5981 		 */
5982 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5983 		    IWN_RFCFG_TYPE(sc->rfcfg) |
5984 		    IWN_RFCFG_STEP(sc->rfcfg) |
5985 		    IWN_RFCFG_DASH(sc->rfcfg));
5986 	}
5987 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5988 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
5989 	return 0;
5990 }
5991 
5992 static int
5993 iwn5000_nic_config(struct iwn_softc *sc)
5994 {
5995 	uint32_t tmp;
5996 	int error;
5997 
5998 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
5999 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6000 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6001 		    IWN_RFCFG_STEP(sc->rfcfg) |
6002 		    IWN_RFCFG_DASH(sc->rfcfg));
6003 	}
6004 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6005 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6006 
6007 	error = iwn_nic_lock(sc);
6008 	if (error != 0)
6009 		return error;
6010 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6011 
6012 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6013 		/*
6014 		 * Select first Switching Voltage Regulator (1.32V) to
6015 		 * solve a stability issue related to noisy DC2DC line
6016 		 * in the silicon of 1000 Series.
6017 		 */
6018 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6019 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6020 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6021 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6022 	}
6023 	iwn_nic_unlock(sc);
6024 
6025 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6026 		/* Use internal power amplifier only. */
6027 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6028 	}
6029 	if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) {
6030 		/* Indicate that ROM calibration version is >=6. */
6031 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6032 	}
6033 	return 0;
6034 }
6035 
6036 /*
6037  * Take NIC ownership over Intel Active Management Technology (AMT).
6038  */
6039 static int
6040 iwn_hw_prepare(struct iwn_softc *sc)
6041 {
6042 	int ntries;
6043 
6044 	/* Check if hardware is ready. */
6045 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6046 	for (ntries = 0; ntries < 5; ntries++) {
6047 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6048 		    IWN_HW_IF_CONFIG_NIC_READY)
6049 			return 0;
6050 		DELAY(10);
6051 	}
6052 
6053 	/* Hardware not ready, force into ready state. */
6054 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6055 	for (ntries = 0; ntries < 15000; ntries++) {
6056 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6057 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6058 			break;
6059 		DELAY(10);
6060 	}
6061 	if (ntries == 15000)
6062 		return ETIMEDOUT;
6063 
6064 	/* Hardware should be ready now. */
6065 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6066 	for (ntries = 0; ntries < 5; ntries++) {
6067 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6068 		    IWN_HW_IF_CONFIG_NIC_READY)
6069 			return 0;
6070 		DELAY(10);
6071 	}
6072 	return ETIMEDOUT;
6073 }
6074 
6075 static int
6076 iwn_hw_init(struct iwn_softc *sc)
6077 {
6078 	const struct iwn_hal *hal = sc->sc_hal;
6079 	int error, chnl, qid;
6080 
6081 	/* Clear pending interrupts. */
6082 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6083 
6084 	error = iwn_apm_init(sc);
6085 	if (error != 0) {
6086 		device_printf(sc->sc_dev,
6087 		    "%s: could not power ON adapter, error %d\n",
6088 		    __func__, error);
6089 		return error;
6090 	}
6091 
6092 	/* Select VMAIN power source. */
6093 	error = iwn_nic_lock(sc);
6094 	if (error != 0)
6095 		return error;
6096 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6097 	iwn_nic_unlock(sc);
6098 
6099 	/* Perform adapter-specific initialization. */
6100 	error = hal->nic_config(sc);
6101 	if (error != 0)
6102 		return error;
6103 
6104 	/* Initialize RX ring. */
6105 	error = iwn_nic_lock(sc);
6106 	if (error != 0)
6107 		return error;
6108 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6109 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6110 	/* Set physical address of RX ring (256-byte aligned.) */
6111 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6112 	/* Set physical address of RX status (16-byte aligned.) */
6113 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6114 	/* Enable RX. */
6115 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6116 	    IWN_FH_RX_CONFIG_ENA           |
6117 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6118 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6119 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6120 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6121 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6122 	iwn_nic_unlock(sc);
6123 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6124 
6125 	error = iwn_nic_lock(sc);
6126 	if (error != 0)
6127 		return error;
6128 
6129 	/* Initialize TX scheduler. */
6130 	iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6131 
6132 	/* Set physical address of "keep warm" page (16-byte aligned.) */
6133 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6134 
6135 	/* Initialize TX rings. */
6136 	for (qid = 0; qid < hal->ntxqs; qid++) {
6137 		struct iwn_tx_ring *txq = &sc->txq[qid];
6138 
6139 		/* Set physical address of TX ring (256-byte aligned.) */
6140 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6141 		    txq->desc_dma.paddr >> 8);
6142 	}
6143 	iwn_nic_unlock(sc);
6144 
6145 	/* Enable DMA channels. */
6146 	for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6147 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6148 		    IWN_FH_TX_CONFIG_DMA_ENA |
6149 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6150 	}
6151 
6152 	/* Clear "radio off" and "commands blocked" bits. */
6153 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6154 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6155 
6156 	/* Clear pending interrupts. */
6157 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6158 	/* Enable interrupt coalescing. */
6159 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6160 	/* Enable interrupts. */
6161 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6162 
6163 	/* _Really_ make sure "radio off" bit is cleared! */
6164 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6165 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6166 
6167 	error = hal->load_firmware(sc);
6168 	if (error != 0) {
6169 		device_printf(sc->sc_dev,
6170 		    "%s: could not load firmware, error %d\n",
6171 		    __func__, error);
6172 		return error;
6173 	}
6174 	/* Wait at most one second for firmware alive notification. */
6175 	error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
6176 	if (error != 0) {
6177 		device_printf(sc->sc_dev,
6178 		    "%s: timeout waiting for adapter to initialize, error %d\n",
6179 		    __func__, error);
6180 		return error;
6181 	}
6182 	/* Do post-firmware initialization. */
6183 	return hal->post_alive(sc);
6184 }
6185 
6186 static void
6187 iwn_hw_stop(struct iwn_softc *sc)
6188 {
6189 	const struct iwn_hal *hal = sc->sc_hal;
6190 	uint32_t tmp;
6191 	int chnl, qid, ntries;
6192 
6193 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6194 
6195 	/* Disable interrupts. */
6196 	IWN_WRITE(sc, IWN_INT_MASK, 0);
6197 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6198 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6199 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6200 
6201 	/* Make sure we no longer hold the NIC lock. */
6202 	iwn_nic_unlock(sc);
6203 
6204 	/* Stop TX scheduler. */
6205 	iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6206 
6207 	/* Stop all DMA channels. */
6208 	if (iwn_nic_lock(sc) == 0) {
6209 		for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6210 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6211 			for (ntries = 0; ntries < 200; ntries++) {
6212 				tmp = IWN_READ(sc, IWN_FH_TX_STATUS);
6213 				if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) ==
6214 				    IWN_FH_TX_STATUS_IDLE(chnl))
6215 					break;
6216 				DELAY(10);
6217 			}
6218 		}
6219 		iwn_nic_unlock(sc);
6220 	}
6221 
6222 	/* Stop RX ring. */
6223 	iwn_reset_rx_ring(sc, &sc->rxq);
6224 
6225 	/* Reset all TX rings. */
6226 	for (qid = 0; qid < hal->ntxqs; qid++)
6227 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
6228 
6229 	if (iwn_nic_lock(sc) == 0) {
6230 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6231 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6232 		iwn_nic_unlock(sc);
6233 	}
6234 	DELAY(5);
6235 
6236 	/* Power OFF adapter. */
6237 	iwn_apm_stop(sc);
6238 }
6239 
6240 static void
6241 iwn_init_locked(struct iwn_softc *sc)
6242 {
6243 	struct ifnet *ifp = sc->sc_ifp;
6244 	int error;
6245 
6246 	IWN_LOCK_ASSERT(sc);
6247 
6248 	error = iwn_hw_prepare(sc);
6249 	if (error != 0) {
6250 		device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n",
6251 		    __func__, error);
6252 		goto fail;
6253 	}
6254 
6255 	/* Initialize interrupt mask to default value. */
6256 	sc->int_mask = IWN_INT_MASK_DEF;
6257 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6258 
6259 	/* Check that the radio is not disabled by hardware switch. */
6260 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6261 		device_printf(sc->sc_dev,
6262 		    "radio is disabled by hardware switch\n");
6263 
6264 		/* Enable interrupts to get RF toggle notifications. */
6265 		IWN_WRITE(sc, IWN_INT, 0xffffffff);
6266 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6267 		return;
6268 	}
6269 
6270 	/* Read firmware images from the filesystem. */
6271 	error = iwn_read_firmware(sc);
6272 	if (error != 0) {
6273 		device_printf(sc->sc_dev,
6274 		    "%s: could not read firmware, error %d\n",
6275 		    __func__, error);
6276 		goto fail;
6277 	}
6278 
6279 	/* Initialize hardware and upload firmware. */
6280 	error = iwn_hw_init(sc);
6281 	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6282 	sc->fw_fp = NULL;
6283 	if (error != 0) {
6284 		device_printf(sc->sc_dev,
6285 		    "%s: could not initialize hardware, error %d\n",
6286 		    __func__, error);
6287 		goto fail;
6288 	}
6289 
6290 	/* Configure adapter now that it is ready. */
6291 	error = iwn_config(sc);
6292 	if (error != 0) {
6293 		device_printf(sc->sc_dev,
6294 		    "%s: could not configure device, error %d\n",
6295 		    __func__, error);
6296 		goto fail;
6297 	}
6298 
6299 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6300 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6301 
6302 	return;
6303 
6304 fail:
6305 	iwn_stop_locked(sc);
6306 }
6307 
6308 static void
6309 iwn_init(void *arg)
6310 {
6311 	struct iwn_softc *sc = arg;
6312 	struct ifnet *ifp = sc->sc_ifp;
6313 	struct ieee80211com *ic = ifp->if_l2com;
6314 
6315 	IWN_LOCK(sc);
6316 	iwn_init_locked(sc);
6317 	IWN_UNLOCK(sc);
6318 
6319 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6320 		ieee80211_start_all(ic);
6321 }
6322 
6323 static void
6324 iwn_stop_locked(struct iwn_softc *sc)
6325 {
6326 	struct ifnet *ifp = sc->sc_ifp;
6327 
6328 	IWN_LOCK_ASSERT(sc);
6329 
6330 	sc->sc_tx_timer = 0;
6331 	callout_stop(&sc->sc_timer_to);
6332 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6333 
6334 	/* Power OFF hardware. */
6335 	iwn_hw_stop(sc);
6336 }
6337 
6338 static void
6339 iwn_stop(struct iwn_softc *sc)
6340 {
6341 	IWN_LOCK(sc);
6342 	iwn_stop_locked(sc);
6343 	IWN_UNLOCK(sc);
6344 }
6345 
6346 /*
6347  * Callback from net80211 to start a scan.
6348  */
6349 static void
6350 iwn_scan_start(struct ieee80211com *ic)
6351 {
6352 	struct ifnet *ifp = ic->ic_ifp;
6353 	struct iwn_softc *sc = ifp->if_softc;
6354 
6355 	IWN_LOCK(sc);
6356 	/* make the link LED blink while we're scanning */
6357 	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
6358 	IWN_UNLOCK(sc);
6359 }
6360 
6361 /*
6362  * Callback from net80211 to terminate a scan.
6363  */
6364 static void
6365 iwn_scan_end(struct ieee80211com *ic)
6366 {
6367 	struct ifnet *ifp = ic->ic_ifp;
6368 	struct iwn_softc *sc = ifp->if_softc;
6369 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6370 
6371 	IWN_LOCK(sc);
6372 	if (vap->iv_state == IEEE80211_S_RUN) {
6373 		/* Set link LED to ON status if we are associated */
6374 		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6375 	}
6376 	IWN_UNLOCK(sc);
6377 }
6378 
6379 /*
6380  * Callback from net80211 to force a channel change.
6381  */
6382 static void
6383 iwn_set_channel(struct ieee80211com *ic)
6384 {
6385 	const struct ieee80211_channel *c = ic->ic_curchan;
6386 	struct ifnet *ifp = ic->ic_ifp;
6387 	struct iwn_softc *sc = ifp->if_softc;
6388 
6389 	IWN_LOCK(sc);
6390 	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
6391 	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
6392 	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
6393 	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
6394 	IWN_UNLOCK(sc);
6395 }
6396 
6397 /*
6398  * Callback from net80211 to start scanning of the current channel.
6399  */
6400 static void
6401 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6402 {
6403 	struct ieee80211vap *vap = ss->ss_vap;
6404 	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6405 	int error;
6406 
6407 	IWN_LOCK(sc);
6408 	error = iwn_scan(sc);
6409 	IWN_UNLOCK(sc);
6410 	if (error != 0)
6411 		ieee80211_cancel_scan(vap);
6412 }
6413 
6414 /*
6415  * Callback from net80211 to handle the minimum dwell time being met.
6416  * The intent is to terminate the scan but we just let the firmware
6417  * notify us when it's finished as we have no safe way to abort it.
6418  */
6419 static void
6420 iwn_scan_mindwell(struct ieee80211_scan_state *ss)
6421 {
6422 	/* NB: don't try to abort scan; wait for firmware to finish */
6423 }
6424 
6425 static struct iwn_eeprom_chan *
6426 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
6427 {
6428 	int i, j;
6429 
6430 	for (j = 0; j < 7; j++) {
6431 		for (i = 0; i < iwn_bands[j].nchan; i++) {
6432 			if (iwn_bands[j].chan[i] == c->ic_ieee)
6433 				return &sc->eeprom_channels[j][i];
6434 		}
6435 	}
6436 
6437 	return NULL;
6438 }
6439 
6440 /*
6441  * Enforce flags read from EEPROM.
6442  */
6443 static int
6444 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
6445     int nchan, struct ieee80211_channel chans[])
6446 {
6447 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
6448 	int i;
6449 
6450 	for (i = 0; i < nchan; i++) {
6451 		struct ieee80211_channel *c = &chans[i];
6452 		struct iwn_eeprom_chan *channel;
6453 
6454 		channel = iwn_find_eeprom_channel(sc, c);
6455 		if (channel == NULL) {
6456 			if_printf(ic->ic_ifp,
6457 			    "%s: invalid channel %u freq %u/0x%x\n",
6458 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
6459 			return EINVAL;
6460 		}
6461 		c->ic_flags |= iwn_eeprom_channel_flags(channel);
6462 	}
6463 
6464 	return 0;
6465 }
6466 
6467 static void
6468 iwn_hw_reset(void *arg0, int pending)
6469 {
6470 	struct iwn_softc *sc = arg0;
6471 	struct ifnet *ifp = sc->sc_ifp;
6472 	struct ieee80211com *ic = ifp->if_l2com;
6473 
6474 	iwn_stop(sc);
6475 	iwn_init(sc);
6476 	ieee80211_notify_radio(ic, 1);
6477 }
6478 
6479 static void
6480 iwn_radio_on(void *arg0, int pending)
6481 {
6482 	struct iwn_softc *sc = arg0;
6483 	struct ifnet *ifp = sc->sc_ifp;
6484 	struct ieee80211com *ic = ifp->if_l2com;
6485 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6486 
6487 	if (vap != NULL) {
6488 		iwn_init(sc);
6489 		ieee80211_init(vap);
6490 	}
6491 }
6492 
6493 static void
6494 iwn_radio_off(void *arg0, int pending)
6495 {
6496 	struct iwn_softc *sc = arg0;
6497 	struct ifnet *ifp = sc->sc_ifp;
6498 	struct ieee80211com *ic = ifp->if_l2com;
6499 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6500 
6501 	iwn_stop(sc);
6502 	if (vap != NULL)
6503 		ieee80211_stop(vap);
6504 
6505 	/* Enable interrupts to get RF toggle notification. */
6506 	IWN_LOCK(sc);
6507 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6508 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6509 	IWN_UNLOCK(sc);
6510 }
6511 
6512 static void
6513 iwn_sysctlattach(struct iwn_softc *sc)
6514 {
6515 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6516 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6517 
6518 #ifdef IWN_DEBUG
6519 	sc->sc_debug = 0;
6520 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6521 	    "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
6522 #endif
6523 }
6524 
6525 static int
6526 iwn_shutdown(device_t dev)
6527 {
6528 	struct iwn_softc *sc = device_get_softc(dev);
6529 
6530 	iwn_stop(sc);
6531 	return 0;
6532 }
6533 
6534 static int
6535 iwn_suspend(device_t dev)
6536 {
6537 	struct iwn_softc *sc = device_get_softc(dev);
6538 	struct ifnet *ifp = sc->sc_ifp;
6539 	struct ieee80211com *ic = ifp->if_l2com;
6540 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6541 
6542 	iwn_stop(sc);
6543 	if (vap != NULL)
6544 		ieee80211_stop(vap);
6545 	return 0;
6546 }
6547 
6548 static int
6549 iwn_resume(device_t dev)
6550 {
6551 	struct iwn_softc *sc = device_get_softc(dev);
6552 	struct ifnet *ifp = sc->sc_ifp;
6553 	struct ieee80211com *ic = ifp->if_l2com;
6554 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6555 
6556 	/* Clear device-specific "PCI retry timeout" register (41h). */
6557 	pci_write_config(dev, 0x41, 0, 1);
6558 
6559 	if (ifp->if_flags & IFF_UP) {
6560 		iwn_init(sc);
6561 		if (vap != NULL)
6562 			ieee80211_init(vap);
6563 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6564 			iwn_start(ifp);
6565 	}
6566 	return 0;
6567 }
6568 
6569 #ifdef IWN_DEBUG
6570 static const char *
6571 iwn_intr_str(uint8_t cmd)
6572 {
6573 	switch (cmd) {
6574 	/* Notifications */
6575 	case IWN_UC_READY:		return "UC_READY";
6576 	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
6577 	case IWN_TX_DONE:		return "TX_DONE";
6578 	case IWN_START_SCAN:		return "START_SCAN";
6579 	case IWN_STOP_SCAN:		return "STOP_SCAN";
6580 	case IWN_RX_STATISTICS:		return "RX_STATS";
6581 	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
6582 	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
6583 	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
6584 	case IWN_RX_PHY:		return "RX_PHY";
6585 	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
6586 	case IWN_RX_DONE:		return "RX_DONE";
6587 
6588 	/* Command Notifications */
6589 	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
6590 	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
6591 	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
6592 	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
6593 	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
6594 	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
6595 	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
6596 	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
6597 	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
6598 	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
6599 	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
6600 	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
6601 	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
6602 	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
6603 	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
6604 	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
6605 	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
6606 	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
6607 	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
6608 	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
6609 	}
6610 	return "UNKNOWN INTR NOTIF/CMD";
6611 }
6612 #endif /* IWN_DEBUG */
6613 
6614 static device_method_t iwn_methods[] = {
6615 	/* Device interface */
6616 	DEVMETHOD(device_probe,		iwn_probe),
6617 	DEVMETHOD(device_attach,	iwn_attach),
6618 	DEVMETHOD(device_detach,	iwn_detach),
6619 	DEVMETHOD(device_shutdown,	iwn_shutdown),
6620 	DEVMETHOD(device_suspend,	iwn_suspend),
6621 	DEVMETHOD(device_resume,	iwn_resume),
6622 	{ 0, 0 }
6623 };
6624 
6625 static driver_t iwn_driver = {
6626 	"iwn",
6627 	iwn_methods,
6628 	sizeof (struct iwn_softc)
6629 };
6630 static devclass_t iwn_devclass;
6631 
6632 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
6633 MODULE_DEPEND(iwn, pci, 1, 1, 1);
6634 MODULE_DEPEND(iwn, firmware, 1, 1, 1);
6635 MODULE_DEPEND(iwn, wlan, 1, 1, 1);
6636