1 /* $NetBSD: if_iwn.c,v 1.78 2016/06/10 13:27:14 ozaki-r Exp $ */
2 /* $OpenBSD: if_iwn.c,v 1.135 2014/09/10 07:22:09 dcoppa Exp $ */
3
4 /*-
5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
22 */
23
24 /*
25 * Driver for Intel WiFi Link 4965 and 100/1000/2000/5000/6000 Series 802.11
26 * network adapters.
27 */
28
29 /*
30 * TODO:
31 * - turn tunables into driver properties
32 */
33
34 #undef IWN_HWCRYPTO /* XXX does not even compile yet */
35
36 #include <sys/modctl.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/stat.h>
40
41 #include <sys/param.h>
42 #include <sys/sockio.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/systm.h>
46 #include <sys/mutex.h>
47 #include <sys/conf.h>
48
49 #include <sys/pci.h>
50 #include <sys/pcie.h>
51
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61
62 #include <sys/dlpi.h>
63 #include <sys/mac_provider.h>
64 #include <sys/mac_wifi.h>
65 #include <sys/net80211.h>
66 #include <sys/firmload.h>
67 #include <sys/queue.h>
68 #include <sys/strsun.h>
69 #include <sys/strsubr.h>
70 #include <sys/sysmacros.h>
71 #include <sys/types.h>
72 #include <sys/kstat.h>
73
74 #include <sys/sdt.h>
75
76 #include "if_iwncompat.h"
77 #include "if_iwnreg.h"
78 #include "if_iwnvar.h"
79 #include <inet/wifi_ioctl.h>
80
81 #ifdef DEBUG
82 #define IWN_DEBUG
83 #endif
84
85 /*
86 * regs access attributes
87 */
88 static ddi_device_acc_attr_t iwn_reg_accattr = {
89 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
90 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
91 .devacc_attr_dataorder = DDI_STRICTORDER_ACC,
92 .devacc_attr_access = DDI_DEFAULT_ACC
93 };
94
95 /*
96 * DMA access attributes for descriptor
97 */
98 static ddi_device_acc_attr_t iwn_dma_descattr = {
99 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
100 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
101 .devacc_attr_dataorder = DDI_STRICTORDER_ACC,
102 .devacc_attr_access = DDI_DEFAULT_ACC
103 };
104
105 /*
106 * DMA access attributes
107 */
108 static ddi_device_acc_attr_t iwn_dma_accattr = {
109 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
110 .devacc_attr_endian_flags = DDI_NEVERSWAP_ACC,
111 .devacc_attr_dataorder = DDI_STRICTORDER_ACC,
112 .devacc_attr_access = DDI_DEFAULT_ACC
113 };
114
115
116 /*
117 * Supported rates for 802.11a/b/g modes (in 500Kbps unit).
118 */
119 static const struct ieee80211_rateset iwn_rateset_11a =
120 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
121
122 static const struct ieee80211_rateset iwn_rateset_11b =
123 { 4, { 2, 4, 11, 22 } };
124
125 static const struct ieee80211_rateset iwn_rateset_11g =
126 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
127
128 static void iwn_kstat_create(struct iwn_softc *, const char *, size_t,
129 kstat_t **, void **);
130 static void iwn_kstat_free(kstat_t *, void *, size_t);
131 static void iwn_kstat_init(struct iwn_softc *);
132 static void iwn_kstat_init_2000(struct iwn_softc *);
133 static void iwn_kstat_init_4965(struct iwn_softc *);
134 static void iwn_kstat_init_6000(struct iwn_softc *);
135 static void iwn_intr_teardown(struct iwn_softc *);
136 static int iwn_intr_add(struct iwn_softc *, int);
137 static int iwn_intr_setup(struct iwn_softc *);
138 static int iwn_attach(dev_info_t *, ddi_attach_cmd_t);
139 static int iwn4965_attach(struct iwn_softc *);
140 static int iwn5000_attach(struct iwn_softc *, uint16_t);
141 static int iwn_detach(dev_info_t *, ddi_detach_cmd_t);
142 static int iwn_quiesce(dev_info_t *);
143 static int iwn_nic_lock(struct iwn_softc *);
144 static int iwn_eeprom_lock(struct iwn_softc *);
145 static int iwn_init_otprom(struct iwn_softc *);
146 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
147 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
148 uint_t, uint_t, void **, ddi_device_acc_attr_t *, uint_t);
149 static void iwn_dma_contig_free(struct iwn_dma_info *);
150 static int iwn_alloc_sched(struct iwn_softc *);
151 static void iwn_free_sched(struct iwn_softc *);
152 static int iwn_alloc_kw(struct iwn_softc *);
153 static void iwn_free_kw(struct iwn_softc *);
154 static int iwn_alloc_ict(struct iwn_softc *);
155 static void iwn_free_ict(struct iwn_softc *);
156 static int iwn_alloc_fwmem(struct iwn_softc *);
157 static void iwn_free_fwmem(struct iwn_softc *);
158 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
159 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
160 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
161 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
162 int);
163 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
164 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
165 static void iwn5000_ict_reset(struct iwn_softc *);
166 static int iwn_read_eeprom(struct iwn_softc *);
167 static void iwn4965_read_eeprom(struct iwn_softc *);
168
169 #ifdef IWN_DEBUG
170 static void iwn4965_print_power_group(struct iwn_softc *, int);
171 #endif
172 static void iwn5000_read_eeprom(struct iwn_softc *);
173 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
174 static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
175 static struct ieee80211_node *iwn_node_alloc(ieee80211com_t *);
176 static void iwn_node_free(ieee80211_node_t *);
177 static void iwn_newassoc(struct ieee80211_node *, int);
178 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
179 static void iwn_iter_func(void *, struct ieee80211_node *);
180 static void iwn_calib_timeout(void *);
181 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
182 struct iwn_rx_data *);
183 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
184 struct iwn_rx_data *);
185 #ifndef IEEE80211_NO_HT
186 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
187 struct iwn_rx_data *);
188 #endif
189 static void iwn5000_rx_calib_results(struct iwn_softc *,
190 struct iwn_rx_desc *, struct iwn_rx_data *);
191 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
192 struct iwn_rx_data *);
193 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
194 struct iwn_rx_data *);
195 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
196 struct iwn_rx_data *);
197 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
198 uint8_t);
199 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
200 static void iwn_notif_intr(struct iwn_softc *);
201 static void iwn_wakeup_intr(struct iwn_softc *);
202 static void iwn_fatal_intr(struct iwn_softc *);
203 static uint_t iwn_intr(caddr_t, caddr_t);
204 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
205 uint16_t);
206 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
207 uint16_t);
208 #ifdef notyet
209 static void iwn5000_reset_sched(struct iwn_softc *, int, int);
210 #endif
211 static int iwn_send(ieee80211com_t *, mblk_t *, uint8_t);
212 static void iwn_watchdog(void *);
213 static int iwn_cmd(struct iwn_softc *, uint8_t, void *, int, int);
214 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
215 int);
216 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
217 int);
218 static int iwn_set_link_quality(struct iwn_softc *,
219 struct ieee80211_node *);
220 static int iwn_add_broadcast_node(struct iwn_softc *, int);
221 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
222 static int iwn_set_critical_temp(struct iwn_softc *);
223 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
224 static void iwn4965_power_calibration(struct iwn_softc *, int);
225 static int iwn4965_set_txpower(struct iwn_softc *, int);
226 static int iwn5000_set_txpower(struct iwn_softc *, int);
227 static int iwn4965_get_rssi(const struct iwn_rx_stat *);
228 static int iwn5000_get_rssi(const struct iwn_rx_stat *);
229 static int iwn_get_noise(const struct iwn_rx_general_stats *);
230 static int iwn4965_get_temperature(struct iwn_softc *);
231 static int iwn5000_get_temperature(struct iwn_softc *);
232 static int iwn_init_sensitivity(struct iwn_softc *);
233 static void iwn_collect_noise(struct iwn_softc *,
234 const struct iwn_rx_general_stats *);
235 static int iwn4965_init_gains(struct iwn_softc *);
236 static int iwn5000_init_gains(struct iwn_softc *);
237 static int iwn4965_set_gains(struct iwn_softc *);
238 static int iwn5000_set_gains(struct iwn_softc *);
239 static void iwn_tune_sensitivity(struct iwn_softc *,
240 const struct iwn_rx_stats *);
241 static int iwn_send_sensitivity(struct iwn_softc *);
242 static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
243 static int iwn5000_runtime_calib(struct iwn_softc *);
244
245 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *);
246 static int iwn_config_bt_coex_prio_table(struct iwn_softc *);
247 static int iwn_config_bt_coex_adv1(struct iwn_softc *);
248 static int iwn_config_bt_coex_adv2(struct iwn_softc *);
249
250 static int iwn_config(struct iwn_softc *);
251 static uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t,
252 uint8_t);
253 static uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t);
254 static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t);
255 static int iwn_scan(struct iwn_softc *, uint16_t);
256 static int iwn_auth(struct iwn_softc *);
257 static int iwn_run(struct iwn_softc *);
258 #ifdef IWN_HWCRYPTO
259 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
260 struct ieee80211_key *);
261 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
262 struct ieee80211_key *);
263 #endif
264 static int iwn_wme_update(struct ieee80211com *);
265 #ifndef IEEE80211_NO_HT
266 static int iwn_ampdu_rx_start(struct ieee80211com *,
267 struct ieee80211_node *, uint8_t);
268 static void iwn_ampdu_rx_stop(struct ieee80211com *,
269 struct ieee80211_node *, uint8_t);
270 static int iwn_ampdu_tx_start(struct ieee80211com *,
271 struct ieee80211_node *, uint8_t);
272 static void iwn_ampdu_tx_stop(struct ieee80211com *,
273 struct ieee80211_node *, uint8_t);
274 static void iwn4965_ampdu_tx_start(struct iwn_softc *,
275 struct ieee80211_node *, uint8_t, uint16_t);
276 static void iwn4965_ampdu_tx_stop(struct iwn_softc *,
277 uint8_t, uint16_t);
278 static void iwn5000_ampdu_tx_start(struct iwn_softc *,
279 struct ieee80211_node *, uint8_t, uint16_t);
280 static void iwn5000_ampdu_tx_stop(struct iwn_softc *,
281 uint8_t, uint16_t);
282 #endif
283 static int iwn5000_query_calibration(struct iwn_softc *);
284 static int iwn5000_send_calibration(struct iwn_softc *);
285 static int iwn5000_send_wimax_coex(struct iwn_softc *);
286 static int iwn6000_temp_offset_calib(struct iwn_softc *);
287 static int iwn2000_temp_offset_calib(struct iwn_softc *);
288 static int iwn4965_post_alive(struct iwn_softc *);
289 static int iwn5000_post_alive(struct iwn_softc *);
290 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
291 int);
292 static int iwn4965_load_firmware(struct iwn_softc *);
293 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
294 const uint8_t *, int);
295 static int iwn5000_load_firmware(struct iwn_softc *);
296 static int iwn_read_firmware_leg(struct iwn_softc *,
297 struct iwn_fw_info *);
298 static int iwn_read_firmware_tlv(struct iwn_softc *,
299 struct iwn_fw_info *, uint16_t);
300 static int iwn_read_firmware(struct iwn_softc *);
301 static int iwn_clock_wait(struct iwn_softc *);
302 static int iwn_apm_init(struct iwn_softc *);
303 static void iwn_apm_stop_master(struct iwn_softc *);
304 static void iwn_apm_stop(struct iwn_softc *);
305 static int iwn4965_nic_config(struct iwn_softc *);
306 static int iwn5000_nic_config(struct iwn_softc *);
307 static int iwn_hw_prepare(struct iwn_softc *);
308 static int iwn_hw_init(struct iwn_softc *);
309 static void iwn_hw_stop(struct iwn_softc *, boolean_t);
310 static int iwn_init(struct iwn_softc *);
311 static void iwn_abort_scan(void *);
312 static void iwn_periodic(void *);
313 static int iwn_fast_recover(struct iwn_softc *);
314
315 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, uint32_t);
316 static uint8_t *ieee80211_add_rates(uint8_t *,
317 const struct ieee80211_rateset *);
318 static uint8_t *ieee80211_add_xrates(uint8_t *,
319 const struct ieee80211_rateset *);
320
321 static void iwn_fix_channel(struct iwn_softc *, mblk_t *,
322 struct iwn_rx_stat *);
323
324 #ifdef IWN_DEBUG
325
326 #define IWN_DBG(...) iwn_dbg("?" __VA_ARGS__)
327
328 static int iwn_dbg_print = 0;
329
330 static void
iwn_dbg(const char * fmt,...)331 iwn_dbg(const char *fmt, ...)
332 {
333 va_list ap;
334
335 if (iwn_dbg_print != 0) {
336 va_start(ap, fmt);
337 vcmn_err(CE_CONT, fmt, ap);
338 va_end(ap);
339 }
340 }
341
342 #else
343 #define IWN_DBG(...)
344 #endif
345
346 /*
347 * tunables
348 */
349
350 /*
351 * enable 5GHz scanning
352 */
353 int iwn_enable_5ghz = 1;
354
355 /*
356 * If more than 50 consecutive beacons are missed,
357 * we've probably lost our connection.
358 * If more than 5 consecutive beacons are missed,
359 * reinitialize the sensitivity state machine.
360 */
361 int iwn_beacons_missed_disconnect = 50;
362 int iwn_beacons_missed_sensitivity = 5;
363
364 /*
365 * iwn_periodic interval, in units of msec
366 */
367 int iwn_periodic_interval = 100;
368
369 /*
370 * scan timeout in sec
371 */
372 int iwn_scan_timeout = 20;
373
374 static ether_addr_t etherbroadcastaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
375
376 static void *iwn_state = NULL;
377
378 /*
379 * Mac Call Back entries
380 */
381 static int iwn_m_stat(void *, uint_t, uint64_t *);
382 static int iwn_m_start(void *);
383 static void iwn_m_stop(void *);
384 static int iwn_m_unicst(void *, const uint8_t *);
385 static int iwn_m_multicst(void *, boolean_t, const uint8_t *);
386 static int iwn_m_promisc(void *, boolean_t);
387 static mblk_t *iwn_m_tx(void *, mblk_t *);
388 static void iwn_m_ioctl(void *, queue_t *, mblk_t *);
389 static int iwn_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
390 const void *);
391 static int iwn_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
392 void *);
393 static void iwn_m_propinfo(void *, const char *, mac_prop_id_t,
394 mac_prop_info_handle_t);
395
396 mac_callbacks_t iwn_m_callbacks = {
397 .mc_callbacks = MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
398 .mc_getstat = iwn_m_stat,
399 .mc_start = iwn_m_start,
400 .mc_stop = iwn_m_stop,
401 .mc_setpromisc = iwn_m_promisc,
402 .mc_multicst = iwn_m_multicst,
403 .mc_unicst = iwn_m_unicst,
404 .mc_tx = iwn_m_tx,
405 .mc_reserved = NULL,
406 .mc_ioctl = iwn_m_ioctl,
407 .mc_getcapab = NULL,
408 .mc_open = NULL,
409 .mc_close = NULL,
410 .mc_setprop = iwn_m_setprop,
411 .mc_getprop = iwn_m_getprop,
412 .mc_propinfo = iwn_m_propinfo
413 };
414
415 static inline uint32_t
iwn_read(struct iwn_softc * sc,int reg)416 iwn_read(struct iwn_softc *sc, int reg)
417 {
418 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
419 return (ddi_get32(sc->sc_regh, (uint32_t *)(sc->sc_base + reg)));
420 }
421
422 static inline void
iwn_write(struct iwn_softc * sc,int reg,uint32_t val)423 iwn_write(struct iwn_softc *sc, int reg, uint32_t val)
424 {
425 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
426 ddi_put32(sc->sc_regh, (uint32_t *)(sc->sc_base + reg), val);
427 }
428
429 static inline void
iwn_write_1(struct iwn_softc * sc,int reg,uint8_t val)430 iwn_write_1(struct iwn_softc *sc, int reg, uint8_t val)
431 {
432 ddi_put8(sc->sc_regh, (uint8_t *)(sc->sc_base + reg), val);
433 }
434
435 static void
iwn_kstat_create(struct iwn_softc * sc,const char * name,size_t size,kstat_t ** ks,void ** data)436 iwn_kstat_create(struct iwn_softc *sc, const char *name, size_t size,
437 kstat_t **ks, void **data)
438 {
439 *ks = kstat_create(ddi_driver_name(sc->sc_dip),
440 ddi_get_instance(sc->sc_dip), name, "misc", KSTAT_TYPE_NAMED,
441 size / sizeof (kstat_named_t), 0);
442 if (*ks == NULL)
443 *data = kmem_zalloc(size, KM_SLEEP);
444 else
445 *data = (*ks)->ks_data;
446 }
447
448 static void
iwn_kstat_free(kstat_t * ks,void * data,size_t size)449 iwn_kstat_free(kstat_t *ks, void *data, size_t size)
450 {
451 if (ks != NULL)
452 kstat_delete(ks);
453 else if (data != NULL)
454 kmem_free(data, size);
455 }
456
457 static void
iwn_kstat_init(struct iwn_softc * sc)458 iwn_kstat_init(struct iwn_softc *sc)
459 {
460 if (sc->sc_ks_misc != NULL)
461 sc->sc_ks_misc->ks_lock = &sc->sc_mtx;
462 if (sc->sc_ks_ant != NULL)
463 sc->sc_ks_ant->ks_lock = &sc->sc_mtx;
464 if (sc->sc_ks_sens != NULL)
465 sc->sc_ks_sens->ks_lock = &sc->sc_mtx;
466 if (sc->sc_ks_timing != NULL)
467 sc->sc_ks_timing->ks_lock = &sc->sc_mtx;
468 if (sc->sc_ks_edca != NULL)
469 sc->sc_ks_edca->ks_lock = &sc->sc_mtx;
470
471 kstat_named_init(&sc->sc_misc->temp,
472 "temperature", KSTAT_DATA_ULONG);
473 kstat_named_init(&sc->sc_misc->crit_temp,
474 "critical temperature", KSTAT_DATA_ULONG);
475 kstat_named_init(&sc->sc_misc->pslevel,
476 "power saving level", KSTAT_DATA_ULONG);
477 kstat_named_init(&sc->sc_misc->noise,
478 "noise", KSTAT_DATA_LONG);
479
480
481 kstat_named_init(&sc->sc_ant->tx_ant,
482 "TX mask", KSTAT_DATA_ULONG);
483 kstat_named_init(&sc->sc_ant->rx_ant,
484 "RX mask", KSTAT_DATA_ULONG);
485 kstat_named_init(&sc->sc_ant->conn_ant,
486 "connected mask", KSTAT_DATA_ULONG);
487 kstat_named_init(&sc->sc_ant->gain[0],
488 "gain A", KSTAT_DATA_ULONG);
489 kstat_named_init(&sc->sc_ant->gain[1],
490 "gain B", KSTAT_DATA_ULONG);
491 kstat_named_init(&sc->sc_ant->gain[2],
492 "gain C", KSTAT_DATA_ULONG);
493
494 kstat_named_init(&sc->sc_sens->ofdm_x1,
495 "OFDM X1", KSTAT_DATA_ULONG);
496 kstat_named_init(&sc->sc_sens->ofdm_mrc_x1,
497 "OFDM MRC X1", KSTAT_DATA_ULONG);
498 kstat_named_init(&sc->sc_sens->ofdm_x4,
499 "OFDM X4", KSTAT_DATA_ULONG);
500 kstat_named_init(&sc->sc_sens->ofdm_mrc_x4,
501 "OFDM MRC X4", KSTAT_DATA_ULONG);
502 kstat_named_init(&sc->sc_sens->cck_x4,
503 "CCK X4", KSTAT_DATA_ULONG);
504 kstat_named_init(&sc->sc_sens->cck_mrc_x4,
505 "CCK MRC X4", KSTAT_DATA_ULONG);
506 kstat_named_init(&sc->sc_sens->energy_cck,
507 "energy CCK", KSTAT_DATA_ULONG);
508
509 kstat_named_init(&sc->sc_timing->bintval,
510 "bintval", KSTAT_DATA_ULONG);
511 kstat_named_init(&sc->sc_timing->tstamp,
512 "timestamp", KSTAT_DATA_ULONGLONG);
513 kstat_named_init(&sc->sc_timing->init,
514 "init", KSTAT_DATA_ULONG);
515
516 kstat_named_init(&sc->sc_edca->ac[0].cwmin,
517 "background cwmin", KSTAT_DATA_ULONG);
518 kstat_named_init(&sc->sc_edca->ac[0].cwmax,
519 "background cwmax", KSTAT_DATA_ULONG);
520 kstat_named_init(&sc->sc_edca->ac[0].aifsn,
521 "background aifsn", KSTAT_DATA_ULONG);
522 kstat_named_init(&sc->sc_edca->ac[0].txop,
523 "background txop", KSTAT_DATA_ULONG);
524 kstat_named_init(&sc->sc_edca->ac[1].cwmin,
525 "best effort cwmin", KSTAT_DATA_ULONG);
526 kstat_named_init(&sc->sc_edca->ac[1].cwmax,
527 "best effort cwmax", KSTAT_DATA_ULONG);
528 kstat_named_init(&sc->sc_edca->ac[1].aifsn,
529 "best effort aifsn", KSTAT_DATA_ULONG);
530 kstat_named_init(&sc->sc_edca->ac[1].txop,
531 "best effort txop", KSTAT_DATA_ULONG);
532 kstat_named_init(&sc->sc_edca->ac[2].cwmin,
533 "video cwmin", KSTAT_DATA_ULONG);
534 kstat_named_init(&sc->sc_edca->ac[2].cwmax,
535 "video cwmax", KSTAT_DATA_ULONG);
536 kstat_named_init(&sc->sc_edca->ac[2].aifsn,
537 "video aifsn", KSTAT_DATA_ULONG);
538 kstat_named_init(&sc->sc_edca->ac[2].txop,
539 "video txop", KSTAT_DATA_ULONG);
540 kstat_named_init(&sc->sc_edca->ac[3].cwmin,
541 "voice cwmin", KSTAT_DATA_ULONG);
542 kstat_named_init(&sc->sc_edca->ac[3].cwmax,
543 "voice cwmax", KSTAT_DATA_ULONG);
544 kstat_named_init(&sc->sc_edca->ac[3].aifsn,
545 "voice aifsn", KSTAT_DATA_ULONG);
546 kstat_named_init(&sc->sc_edca->ac[3].txop,
547 "voice txop", KSTAT_DATA_ULONG);
548 }
549
550 static void
iwn_kstat_init_2000(struct iwn_softc * sc)551 iwn_kstat_init_2000(struct iwn_softc *sc)
552 {
553 if (sc->sc_ks_toff != NULL)
554 sc->sc_ks_toff->ks_lock = &sc->sc_mtx;
555
556 kstat_named_init(&sc->sc_toff.t2000->toff_lo,
557 "temperature offset low", KSTAT_DATA_LONG);
558 kstat_named_init(&sc->sc_toff.t2000->toff_hi,
559 "temperature offset high", KSTAT_DATA_LONG);
560 kstat_named_init(&sc->sc_toff.t2000->volt,
561 "reference voltage", KSTAT_DATA_LONG);
562 }
563
564 static void
iwn_kstat_init_4965(struct iwn_softc * sc)565 iwn_kstat_init_4965(struct iwn_softc *sc)
566 {
567 int i, r;
568
569 if (sc->sc_ks_txpower != NULL)
570 sc->sc_ks_txpower->ks_lock = &sc->sc_mtx;
571
572 kstat_named_init(&sc->sc_txpower->vdiff,
573 "voltage comp", KSTAT_DATA_LONG);
574 kstat_named_init(&sc->sc_txpower->chan,
575 "channel", KSTAT_DATA_LONG);
576 kstat_named_init(&sc->sc_txpower->group,
577 "attenuation group", KSTAT_DATA_LONG);
578 kstat_named_init(&sc->sc_txpower->subband,
579 "sub-band", KSTAT_DATA_LONG);
580 for (i = 0; i != 2; i++) {
581 char tmp[KSTAT_STRLEN];
582
583 (void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d power", i);
584 kstat_named_init(&sc->sc_txpower->txchain[i].power,
585 tmp, KSTAT_DATA_LONG);
586
587 (void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d gain", i);
588 kstat_named_init(&sc->sc_txpower->txchain[i].gain,
589 tmp, KSTAT_DATA_LONG);
590
591 (void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d temperature", i);
592 kstat_named_init(&sc->sc_txpower->txchain[i].temp,
593 tmp, KSTAT_DATA_LONG);
594
595 (void) snprintf(tmp, KSTAT_STRLEN - 1,
596 "Ant %d temperature compensation", i);
597 kstat_named_init(&sc->sc_txpower->txchain[i].tcomp,
598 tmp, KSTAT_DATA_LONG);
599
600 for (r = 0; r <= IWN_RIDX_MAX; r++) {
601 (void) snprintf(tmp, KSTAT_STRLEN - 1,
602 "Ant %d Rate %d RF gain", i, r);
603 kstat_named_init(
604 &sc->sc_txpower->txchain[i].rate[r].rf_gain,
605 tmp, KSTAT_DATA_LONG);
606
607 (void) snprintf(tmp, KSTAT_STRLEN - 1,
608 "Ant %d Rate %d DSP gain", i, r);
609 kstat_named_init(
610 &sc->sc_txpower->txchain[0].rate[0].dsp_gain,
611 tmp, KSTAT_DATA_LONG);
612 }
613 }
614 }
615
616 static void
iwn_kstat_init_6000(struct iwn_softc * sc)617 iwn_kstat_init_6000(struct iwn_softc *sc)
618 {
619 if (sc->sc_ks_toff != NULL)
620 sc->sc_ks_toff->ks_lock = &sc->sc_mtx;
621
622 kstat_named_init(&sc->sc_toff.t6000->toff,
623 "temperature offset", KSTAT_DATA_LONG);
624 }
625
626 static void
iwn_intr_teardown(struct iwn_softc * sc)627 iwn_intr_teardown(struct iwn_softc *sc)
628 {
629 if (sc->sc_intr_htable != NULL) {
630 if ((sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) {
631 (void) ddi_intr_block_disable(sc->sc_intr_htable,
632 sc->sc_intr_count);
633 } else {
634 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
635 }
636 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
637 (void) ddi_intr_free(sc->sc_intr_htable[0]);
638 sc->sc_intr_htable[0] = NULL;
639
640 kmem_free(sc->sc_intr_htable, sc->sc_intr_size);
641 sc->sc_intr_size = 0;
642 sc->sc_intr_htable = NULL;
643 }
644 }
645
646 static int
iwn_intr_add(struct iwn_softc * sc,int intr_type)647 iwn_intr_add(struct iwn_softc *sc, int intr_type)
648 {
649 int ni, na;
650 int ret;
651 char *func;
652
653 if (ddi_intr_get_nintrs(sc->sc_dip, intr_type, &ni) != DDI_SUCCESS)
654 return (DDI_FAILURE);
655
656
657 if (ddi_intr_get_navail(sc->sc_dip, intr_type, &na) != DDI_SUCCESS)
658 return (DDI_FAILURE);
659
660 sc->sc_intr_size = sizeof (ddi_intr_handle_t);
661 sc->sc_intr_htable = kmem_zalloc(sc->sc_intr_size, KM_SLEEP);
662
663 ret = ddi_intr_alloc(sc->sc_dip, sc->sc_intr_htable, intr_type, 0, 1,
664 &sc->sc_intr_count, DDI_INTR_ALLOC_STRICT);
665 if (ret != DDI_SUCCESS) {
666 dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_alloc() failed");
667 return (DDI_FAILURE);
668 }
669
670 ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
671 if (ret != DDI_SUCCESS) {
672 dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_get_pri() failed");
673 return (DDI_FAILURE);
674 }
675
676 ret = ddi_intr_add_handler(sc->sc_intr_htable[0], iwn_intr, (caddr_t)sc,
677 NULL);
678 if (ret != DDI_SUCCESS) {
679 dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_add_handler() failed");
680 return (DDI_FAILURE);
681 }
682
683 ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
684 if (ret != DDI_SUCCESS) {
685 dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_get_cap() failed");
686 return (DDI_FAILURE);
687 }
688
689 if ((sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) {
690 ret = ddi_intr_block_enable(sc->sc_intr_htable,
691 sc->sc_intr_count);
692 func = "ddi_intr_enable_block";
693 } else {
694 ret = ddi_intr_enable(sc->sc_intr_htable[0]);
695 func = "ddi_intr_enable";
696 }
697
698 if (ret != DDI_SUCCESS) {
699 dev_err(sc->sc_dip, CE_WARN, "!%s() failed", func);
700 return (DDI_FAILURE);
701 }
702
703 return (DDI_SUCCESS);
704 }
705
706 static int
iwn_intr_setup(struct iwn_softc * sc)707 iwn_intr_setup(struct iwn_softc *sc)
708 {
709 int intr_type;
710 int ret;
711
712 ret = ddi_intr_get_supported_types(sc->sc_dip, &intr_type);
713 if (ret != DDI_SUCCESS) {
714 dev_err(sc->sc_dip, CE_WARN,
715 "!ddi_intr_get_supported_types() failed");
716 return (DDI_FAILURE);
717 }
718
719 if ((intr_type & DDI_INTR_TYPE_MSIX)) {
720 if (iwn_intr_add(sc, DDI_INTR_TYPE_MSIX) == DDI_SUCCESS)
721 return (DDI_SUCCESS);
722 iwn_intr_teardown(sc);
723 }
724
725 if ((intr_type & DDI_INTR_TYPE_MSI)) {
726 if (iwn_intr_add(sc, DDI_INTR_TYPE_MSI) == DDI_SUCCESS)
727 return (DDI_SUCCESS);
728 iwn_intr_teardown(sc);
729 }
730
731 if ((intr_type & DDI_INTR_TYPE_FIXED)) {
732 if (iwn_intr_add(sc, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS)
733 return (DDI_SUCCESS);
734 iwn_intr_teardown(sc);
735 }
736
737 dev_err(sc->sc_dip, CE_WARN, "!iwn_intr_setup() failed");
738 return (DDI_FAILURE);
739 }
740
741 static int
iwn_pci_get_capability(ddi_acc_handle_t pcih,int cap,int * cap_off)742 iwn_pci_get_capability(ddi_acc_handle_t pcih, int cap, int *cap_off)
743 {
744 uint8_t ptr;
745 uint8_t val;
746
747 for (ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR);
748 ptr != 0 && ptr != 0xff;
749 ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR)) {
750 val = pci_config_get8(pcih, ptr + PCIE_CAP_ID);
751 if (val == 0xff)
752 return (DDI_FAILURE);
753
754 if (cap != val)
755 continue;
756
757 *cap_off = ptr;
758 return (DDI_SUCCESS);
759 }
760
761 return (DDI_FAILURE);
762 }
763
764 static int
iwn_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)765 iwn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
766 {
767 int instance;
768
769 struct iwn_softc *sc;
770 struct ieee80211com *ic;
771 char strbuf[32];
772 wifi_data_t wd = { 0 };
773 mac_register_t *macp;
774 uint32_t reg;
775 int i, error;
776
777 switch (cmd) {
778 case DDI_ATTACH:
779 break;
780
781 case DDI_RESUME:
782 instance = ddi_get_instance(dip);
783 sc = ddi_get_soft_state(iwn_state,
784 instance);
785 ASSERT(sc != NULL);
786
787 if (sc->sc_flags & IWN_FLAG_RUNNING) {
788 (void) iwn_init(sc);
789 }
790
791 sc->sc_flags &= ~IWN_FLAG_SUSPEND;
792
793 return (DDI_SUCCESS);
794 default:
795 return (DDI_FAILURE);
796 }
797
798 instance = ddi_get_instance(dip);
799
800 if (ddi_soft_state_zalloc(iwn_state, instance) != DDI_SUCCESS) {
801 dev_err(dip, CE_WARN, "!ddi_soft_state_zalloc() failed");
802 return (DDI_FAILURE);
803 }
804
805 sc = ddi_get_soft_state(iwn_state, instance);
806 ddi_set_driver_private(dip, (caddr_t)sc);
807
808 ic = &sc->sc_ic;
809
810 sc->sc_dip = dip;
811
812 iwn_kstat_create(sc, "hw_state", sizeof (struct iwn_ks_misc),
813 &sc->sc_ks_misc, (void **)&sc->sc_misc);
814 iwn_kstat_create(sc, "antennas", sizeof (struct iwn_ks_ant),
815 &sc->sc_ks_ant, (void **)&sc->sc_ant);
816 iwn_kstat_create(sc, "sensitivity", sizeof (struct iwn_ks_sens),
817 &sc->sc_ks_sens, (void **)&sc->sc_sens);
818 iwn_kstat_create(sc, "timing", sizeof (struct iwn_ks_timing),
819 &sc->sc_ks_timing, (void **)&sc->sc_timing);
820 iwn_kstat_create(sc, "edca", sizeof (struct iwn_ks_edca),
821 &sc->sc_ks_edca, (void **)&sc->sc_edca);
822
823 if (pci_config_setup(dip, &sc->sc_pcih) != DDI_SUCCESS) {
824 dev_err(sc->sc_dip, CE_WARN, "!pci_config_setup() failed");
825 goto fail_pci_config;
826 }
827
828 /*
829 * Get the offset of the PCI Express Capability Structure in PCI
830 * Configuration Space.
831 */
832 error = iwn_pci_get_capability(sc->sc_pcih, PCI_CAP_ID_PCI_E,
833 &sc->sc_cap_off);
834 if (error != DDI_SUCCESS) {
835 dev_err(sc->sc_dip, CE_WARN,
836 "!PCIe capability structure not found!");
837 goto fail_pci_capab;
838 }
839
840 /* Clear device-specific "PCI retry timeout" register (41h). */
841 reg = pci_config_get8(sc->sc_pcih, 0x41);
842 if (reg)
843 pci_config_put8(sc->sc_pcih, 0x41, 0);
844
845 error = ddi_regs_map_setup(dip, 1, &sc->sc_base, 0, 0, &iwn_reg_accattr,
846 &sc->sc_regh);
847 if (error != DDI_SUCCESS) {
848 dev_err(sc->sc_dip, CE_WARN, "!ddi_regs_map_setup() failed");
849 goto fail_regs_map;
850 }
851
852 /* Clear pending interrupts. */
853 IWN_WRITE(sc, IWN_INT, 0xffffffff);
854
855 /* Disable all interrupts. */
856 IWN_WRITE(sc, IWN_INT_MASK, 0);
857
858 /* Install interrupt handler. */
859 if (iwn_intr_setup(sc) != DDI_SUCCESS)
860 goto fail_intr;
861
862 mutex_init(&sc->sc_mtx, NULL, MUTEX_DRIVER,
863 DDI_INTR_PRI(sc->sc_intr_pri));
864 mutex_init(&sc->sc_tx_mtx, NULL, MUTEX_DRIVER,
865 DDI_INTR_PRI(sc->sc_intr_pri));
866 mutex_init(&sc->sc_mt_mtx, NULL, MUTEX_DRIVER,
867 DDI_INTR_PRI(sc->sc_intr_pri));
868
869 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
870 cv_init(&sc->sc_scan_cv, NULL, CV_DRIVER, NULL);
871 cv_init(&sc->sc_fhdma_cv, NULL, CV_DRIVER, NULL);
872 cv_init(&sc->sc_alive_cv, NULL, CV_DRIVER, NULL);
873 cv_init(&sc->sc_calib_cv, NULL, CV_DRIVER, NULL);
874
875 iwn_kstat_init(sc);
876
877 /* Read hardware revision and attach. */
878 sc->hw_type =
879 (IWN_READ(sc, IWN_HW_REV) & IWN_HW_REV_TYPE_MASK)
880 >> IWN_HW_REV_TYPE_SHIFT;
881 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
882 error = iwn4965_attach(sc);
883 else
884 error = iwn5000_attach(sc, sc->sc_devid);
885 if (error != 0) {
886 dev_err(sc->sc_dip, CE_WARN, "!could not attach device");
887 goto fail_hw;
888 }
889
890 if ((error = iwn_hw_prepare(sc)) != 0) {
891 dev_err(sc->sc_dip, CE_WARN, "!hardware not ready");
892 goto fail_hw;
893 }
894
895 /* Read MAC address, channels, etc from EEPROM. */
896 if ((error = iwn_read_eeprom(sc)) != 0) {
897 dev_err(sc->sc_dip, CE_WARN, "!could not read EEPROM");
898 goto fail_hw;
899 }
900
901 /* Allocate DMA memory for firmware transfers. */
902 if ((error = iwn_alloc_fwmem(sc)) != 0) {
903 dev_err(sc->sc_dip, CE_WARN,
904 "!could not allocate memory for firmware");
905 goto fail_fwmem;
906 }
907
908 /* Allocate "Keep Warm" page. */
909 if ((error = iwn_alloc_kw(sc)) != 0) {
910 dev_err(sc->sc_dip, CE_WARN,
911 "!could not allocate keep warm page");
912 goto fail_kw;
913 }
914
915 /* Allocate ICT table for 5000 Series. */
916 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
917 (error = iwn_alloc_ict(sc)) != 0) {
918 dev_err(sc->sc_dip, CE_WARN, "!could not allocate ICT table");
919 goto fail_ict;
920 }
921
922 /* Allocate TX scheduler "rings". */
923 if ((error = iwn_alloc_sched(sc)) != 0) {
924 dev_err(sc->sc_dip, CE_WARN,
925 "!could not allocate TX scheduler rings");
926 goto fail_sched;
927 }
928
929 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
930 for (i = 0; i < sc->ntxqs; i++) {
931 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
932 dev_err(sc->sc_dip, CE_WARN,
933 "!could not allocate TX ring %d", i);
934 while (--i >= 0)
935 iwn_free_tx_ring(sc, &sc->txq[i]);
936 goto fail_txring;
937 }
938 }
939
940 /* Allocate RX ring. */
941 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
942 dev_err(sc->sc_dip, CE_WARN, "!could not allocate RX ring");
943 goto fail_rxring;
944 }
945
946 /* Clear pending interrupts. */
947 IWN_WRITE(sc, IWN_INT, 0xffffffff);
948
949 /* Count the number of available chains. */
950 sc->ntxchains =
951 ((sc->txchainmask >> 2) & 1) +
952 ((sc->txchainmask >> 1) & 1) +
953 ((sc->txchainmask >> 0) & 1);
954 sc->nrxchains =
955 ((sc->rxchainmask >> 2) & 1) +
956 ((sc->rxchainmask >> 1) & 1) +
957 ((sc->rxchainmask >> 0) & 1);
958 dev_err(sc->sc_dip, CE_CONT, "!MIMO %dT%dR, %s, address %s",
959 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
960 ieee80211_macaddr_sprintf(ic->ic_macaddr));
961
962 sc->sc_ant->tx_ant.value.ul = sc->txchainmask;
963 sc->sc_ant->rx_ant.value.ul = sc->rxchainmask;
964
965 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
966 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
967 ic->ic_state = IEEE80211_S_INIT;
968
969 /* Set device capabilities. */
970 /* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN,
971 * and IEEE80211_C_PMGT too. */
972 ic->ic_caps =
973 IEEE80211_C_IBSS | /* IBSS mode support */
974 IEEE80211_C_WPA | /* 802.11i */
975 IEEE80211_C_MONITOR | /* monitor mode supported */
976 IEEE80211_C_TXPMGT | /* tx power management */
977 IEEE80211_C_SHSLOT | /* short slot time supported */
978 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
979 IEEE80211_C_WME; /* 802.11e */
980
981 #ifndef IEEE80211_NO_HT
982 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
983 /* Set HT capabilities. */
984 ic->ic_htcaps =
985 #if IWN_RBUF_SIZE == 8192
986 IEEE80211_HTCAP_AMSDU7935 |
987 #endif
988 IEEE80211_HTCAP_CBW20_40 |
989 IEEE80211_HTCAP_SGI20 |
990 IEEE80211_HTCAP_SGI40;
991 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
992 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
993 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
994 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
995 else
996 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
997 }
998 #endif /* !IEEE80211_NO_HT */
999
1000 /* Set supported legacy rates. */
1001 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwn_rateset_11b;
1002 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwn_rateset_11g;
1003 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
1004 ic->ic_sup_rates[IEEE80211_MODE_11A] = iwn_rateset_11a;
1005 }
1006 #ifndef IEEE80211_NO_HT
1007 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1008 /* Set supported HT rates. */
1009 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
1010 if (sc->nrxchains > 1)
1011 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */
1012 if (sc->nrxchains > 2)
1013 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
1014 }
1015 #endif
1016
1017 /* IBSS channel undefined for now. */
1018 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
1019
1020 ic->ic_node_newassoc = iwn_newassoc;
1021 ic->ic_xmit = iwn_send;
1022 #ifdef IWN_HWCRYPTO
1023 ic->ic_crypto.cs_key_set = iwn_set_key;
1024 ic->ic_crypto.cs_key_delete = iwn_delete_key;
1025 #endif
1026 ic->ic_wme.wme_update = iwn_wme_update;
1027 #ifndef IEEE80211_NO_HT
1028 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
1029 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
1030 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
1031 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
1032 #endif
1033 /*
1034 * attach to 802.11 module
1035 */
1036 ieee80211_attach(ic);
1037
1038 ieee80211_register_door(ic, ddi_driver_name(dip), ddi_get_instance(dip));
1039
1040 /* Override 802.11 state transition machine. */
1041 sc->sc_newstate = ic->ic_newstate;
1042 ic->ic_newstate = iwn_newstate;
1043 ic->ic_watchdog = iwn_watchdog;
1044
1045 ic->ic_node_alloc = iwn_node_alloc;
1046 ic->ic_node_free = iwn_node_free;
1047
1048 ieee80211_media_init(ic);
1049
1050 /*
1051 * initialize default tx key
1052 */
1053 ic->ic_def_txkey = 0;
1054
1055 sc->amrr.amrr_min_success_threshold = 1;
1056 sc->amrr.amrr_max_success_threshold = 15;
1057
1058 /*
1059 * Initialize pointer to device specific functions
1060 */
1061 wd.wd_secalloc = WIFI_SEC_NONE;
1062 wd.wd_opmode = ic->ic_opmode;
1063 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
1064
1065 /*
1066 * create relation to GLD
1067 */
1068 macp = mac_alloc(MAC_VERSION);
1069 if (NULL == macp) {
1070 dev_err(sc->sc_dip, CE_WARN, "!mac_alloc() failed");
1071 goto fail_mac_alloc;
1072 }
1073
1074 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
1075 macp->m_driver = sc;
1076 macp->m_dip = dip;
1077 macp->m_src_addr = ic->ic_macaddr;
1078 macp->m_callbacks = &iwn_m_callbacks;
1079 macp->m_min_sdu = 0;
1080 macp->m_max_sdu = IEEE80211_MTU;
1081 macp->m_pdata = &wd;
1082 macp->m_pdata_size = sizeof (wd);
1083
1084 /*
1085 * Register the macp to mac
1086 */
1087 error = mac_register(macp, &ic->ic_mach);
1088 mac_free(macp);
1089 if (error != DDI_SUCCESS) {
1090 dev_err(sc->sc_dip, CE_WARN, "!mac_register() failed");
1091 goto fail_mac_alloc;
1092 }
1093
1094 /*
1095 * Create minor node of type DDI_NT_NET_WIFI
1096 */
1097 (void) snprintf(strbuf, sizeof (strbuf), "iwn%d", instance);
1098 error = ddi_create_minor_node(dip, strbuf, S_IFCHR,
1099 instance + 1, DDI_NT_NET_WIFI, 0);
1100 if (error != DDI_SUCCESS) {
1101 dev_err(sc->sc_dip, CE_WARN, "!ddi_create_minor_node() failed");
1102 goto fail_minor;
1103 }
1104
1105 /*
1106 * Notify link is down now
1107 */
1108 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
1109
1110 sc->sc_periodic = ddi_periodic_add(iwn_periodic, sc,
1111 iwn_periodic_interval * MICROSEC, 0);
1112
1113 if (sc->sc_ks_misc)
1114 kstat_install(sc->sc_ks_misc);
1115 if (sc->sc_ks_ant)
1116 kstat_install(sc->sc_ks_ant);
1117 if (sc->sc_ks_sens)
1118 kstat_install(sc->sc_ks_sens);
1119 if (sc->sc_ks_timing)
1120 kstat_install(sc->sc_ks_timing);
1121 if (sc->sc_ks_edca)
1122 kstat_install(sc->sc_ks_edca);
1123 if (sc->sc_ks_txpower)
1124 kstat_install(sc->sc_ks_txpower);
1125 if (sc->sc_ks_toff)
1126 kstat_install(sc->sc_ks_toff);
1127
1128 sc->sc_flags |= IWN_FLAG_ATTACHED;
1129
1130 return (DDI_SUCCESS);
1131
1132 /* Free allocated memory if something failed during attachment. */
1133 fail_minor:
1134 mac_unregister(ic->ic_mach);
1135
1136 fail_mac_alloc:
1137 ieee80211_detach(ic);
1138 iwn_free_rx_ring(sc, &sc->rxq);
1139
1140 fail_rxring:
1141 for (i = 0; i < sc->ntxqs; i++)
1142 iwn_free_tx_ring(sc, &sc->txq[i]);
1143
1144 fail_txring:
1145 iwn_free_sched(sc);
1146
1147 fail_sched:
1148 if (sc->ict != NULL)
1149 iwn_free_ict(sc);
1150
1151 fail_ict:
1152 iwn_free_kw(sc);
1153
1154 fail_kw:
1155 iwn_free_fwmem(sc);
1156
1157 fail_fwmem:
1158 fail_hw:
1159 iwn_intr_teardown(sc);
1160
1161 iwn_kstat_free(sc->sc_ks_txpower, sc->sc_txpower,
1162 sizeof (struct iwn_ks_txpower));
1163
1164 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
1165 iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t6000,
1166 sizeof (struct iwn_ks_toff_6000));
1167 else
1168 iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t2000,
1169 sizeof (struct iwn_ks_toff_2000));
1170
1171 fail_intr:
1172 ddi_regs_map_free(&sc->sc_regh);
1173
1174 fail_regs_map:
1175 fail_pci_capab:
1176 pci_config_teardown(&sc->sc_pcih);
1177
1178 fail_pci_config:
1179 iwn_kstat_free(sc->sc_ks_misc, sc->sc_misc,
1180 sizeof (struct iwn_ks_misc));
1181 iwn_kstat_free(sc->sc_ks_ant, sc->sc_ant,
1182 sizeof (struct iwn_ks_ant));
1183 iwn_kstat_free(sc->sc_ks_sens, sc->sc_sens,
1184 sizeof (struct iwn_ks_sens));
1185 iwn_kstat_free(sc->sc_ks_timing, sc->sc_timing,
1186 sizeof (struct iwn_ks_timing));
1187 iwn_kstat_free(sc->sc_ks_edca, sc->sc_edca,
1188 sizeof (struct iwn_ks_edca));
1189
1190 ddi_soft_state_free(iwn_state, instance);
1191
1192 return (DDI_FAILURE);
1193 }
1194
1195 int
iwn4965_attach(struct iwn_softc * sc)1196 iwn4965_attach(struct iwn_softc *sc)
1197 {
1198 struct iwn_ops *ops = &sc->ops;
1199
1200 ops->load_firmware = iwn4965_load_firmware;
1201 ops->read_eeprom = iwn4965_read_eeprom;
1202 ops->post_alive = iwn4965_post_alive;
1203 ops->nic_config = iwn4965_nic_config;
1204 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
1205 ops->update_sched = iwn4965_update_sched;
1206 ops->get_temperature = iwn4965_get_temperature;
1207 ops->get_rssi = iwn4965_get_rssi;
1208 ops->set_txpower = iwn4965_set_txpower;
1209 ops->init_gains = iwn4965_init_gains;
1210 ops->set_gains = iwn4965_set_gains;
1211 ops->add_node = iwn4965_add_node;
1212 ops->tx_done = iwn4965_tx_done;
1213 #ifndef IEEE80211_NO_HT
1214 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1215 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1216 #endif
1217 sc->ntxqs = IWN4965_NTXQUEUES;
1218 sc->ndmachnls = IWN4965_NDMACHNLS;
1219 sc->broadcast_id = IWN4965_ID_BROADCAST;
1220 sc->rxonsz = IWN4965_RXONSZ;
1221 sc->schedsz = IWN4965_SCHEDSZ;
1222 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1223 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1224 sc->fwsz = IWN4965_FWSZ;
1225 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1226 sc->limits = &iwn4965_sensitivity_limits;
1227 sc->fwname = "iwlwifi-4965-2.ucode";
1228 /* Override chains masks, ROM is known to be broken. */
1229 sc->txchainmask = IWN_ANT_AB;
1230 sc->rxchainmask = IWN_ANT_ABC;
1231
1232 iwn_kstat_create(sc, "txpower", sizeof (struct iwn_ks_txpower),
1233 &sc->sc_ks_txpower, (void **)&sc->sc_txpower);
1234 iwn_kstat_init_4965(sc);
1235
1236 return 0;
1237 }
1238
1239 int
iwn5000_attach(struct iwn_softc * sc,uint16_t pid)1240 iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1241 {
1242 struct iwn_ops *ops = &sc->ops;
1243
1244 ops->load_firmware = iwn5000_load_firmware;
1245 ops->read_eeprom = iwn5000_read_eeprom;
1246 ops->post_alive = iwn5000_post_alive;
1247 ops->nic_config = iwn5000_nic_config;
1248 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
1249 ops->update_sched = iwn5000_update_sched;
1250 ops->get_temperature = iwn5000_get_temperature;
1251 ops->get_rssi = iwn5000_get_rssi;
1252 ops->set_txpower = iwn5000_set_txpower;
1253 ops->init_gains = iwn5000_init_gains;
1254 ops->set_gains = iwn5000_set_gains;
1255 ops->add_node = iwn5000_add_node;
1256 ops->tx_done = iwn5000_tx_done;
1257 #ifndef IEEE80211_NO_HT
1258 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1259 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1260 #endif
1261 sc->ntxqs = IWN5000_NTXQUEUES;
1262 sc->ndmachnls = IWN5000_NDMACHNLS;
1263 sc->broadcast_id = IWN5000_ID_BROADCAST;
1264 sc->rxonsz = IWN5000_RXONSZ;
1265 sc->schedsz = IWN5000_SCHEDSZ;
1266 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1267 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1268 sc->fwsz = IWN5000_FWSZ;
1269 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1270
1271 switch (sc->hw_type) {
1272 case IWN_HW_REV_TYPE_5100:
1273 sc->limits = &iwn5000_sensitivity_limits;
1274 sc->fwname = "iwlwifi-5000-2.ucode";
1275 /* Override chains masks, ROM is known to be broken. */
1276 sc->txchainmask = IWN_ANT_B;
1277 sc->rxchainmask = IWN_ANT_AB;
1278 break;
1279 case IWN_HW_REV_TYPE_5150:
1280 sc->limits = &iwn5150_sensitivity_limits;
1281 sc->fwname = "iwlwifi-5150-2.ucode";
1282 break;
1283 case IWN_HW_REV_TYPE_5300:
1284 case IWN_HW_REV_TYPE_5350:
1285 sc->limits = &iwn5000_sensitivity_limits;
1286 sc->fwname = "iwlwifi-5000-2.ucode";
1287 break;
1288 case IWN_HW_REV_TYPE_1000:
1289 sc->limits = &iwn1000_sensitivity_limits;
1290 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_1 ||
1291 pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_2)
1292 sc->fwname = "iwlwifi-100-5.ucode";
1293 else
1294 sc->fwname = "iwlwifi-1000-3.ucode";
1295 break;
1296 case IWN_HW_REV_TYPE_6000:
1297 sc->limits = &iwn6000_sensitivity_limits;
1298 sc->fwname = "iwlwifi-6000-4.ucode";
1299 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 ||
1300 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) {
1301 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
1302 /* Override chains masks, ROM is known to be broken. */
1303 sc->txchainmask = IWN_ANT_BC;
1304 sc->rxchainmask = IWN_ANT_BC;
1305 }
1306 break;
1307 case IWN_HW_REV_TYPE_6050:
1308 sc->limits = &iwn6000_sensitivity_limits;
1309 sc->fwname = "iwlwifi-6050-5.ucode";
1310 break;
1311 case IWN_HW_REV_TYPE_6005:
1312 sc->limits = &iwn6000_sensitivity_limits;
1313 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */
1314 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 ||
1315 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 ||
1316 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 ||
1317 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 ||
1318 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235 ||
1319 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235_2) {
1320 sc->fwname = "iwlwifi-6000g2b-6.ucode";
1321 ops->config_bt_coex = iwn_config_bt_coex_adv1;
1322 }
1323 else
1324 sc->fwname = "iwlwifi-6000g2a-6.ucode";
1325
1326 iwn_kstat_create(sc, "temp_offset",
1327 sizeof (struct iwn_ks_toff_6000),
1328 &sc->sc_ks_toff, (void **)&sc->sc_toff.t6000);
1329 iwn_kstat_init_6000(sc);
1330 break;
1331 case IWN_HW_REV_TYPE_2030:
1332 sc->limits = &iwn2000_sensitivity_limits;
1333 sc->fwname = "iwlwifi-2030-6.ucode";
1334 ops->config_bt_coex = iwn_config_bt_coex_adv2;
1335
1336 iwn_kstat_create(sc, "temp_offset",
1337 sizeof (struct iwn_ks_toff_2000),
1338 &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1339 iwn_kstat_init_2000(sc);
1340 break;
1341 case IWN_HW_REV_TYPE_2000:
1342 sc->limits = &iwn2000_sensitivity_limits;
1343 sc->fwname = "iwlwifi-2000-6.ucode";
1344
1345 iwn_kstat_create(sc, "temp_offset",
1346 sizeof (struct iwn_ks_toff_2000),
1347 &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1348 iwn_kstat_init_2000(sc);
1349 break;
1350 case IWN_HW_REV_TYPE_135:
1351 sc->limits = &iwn2000_sensitivity_limits;
1352 sc->fwname = "iwlwifi-135-6.ucode";
1353 ops->config_bt_coex = iwn_config_bt_coex_adv2;
1354
1355 iwn_kstat_create(sc, "temp_offset",
1356 sizeof (struct iwn_ks_toff_2000),
1357 &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1358 iwn_kstat_init_2000(sc);
1359 break;
1360 case IWN_HW_REV_TYPE_105:
1361 sc->limits = &iwn2000_sensitivity_limits;
1362 sc->fwname = "iwlwifi-105-6.ucode";
1363
1364 iwn_kstat_create(sc, "temp_offset",
1365 sizeof (struct iwn_ks_toff_2000),
1366 &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1367 iwn_kstat_init_2000(sc);
1368 break;
1369 default:
1370 dev_err(sc->sc_dip, CE_WARN, "!adapter type %d not supported",
1371 sc->hw_type);
1372 return ENOTSUP;
1373 }
1374 return 0;
1375 }
1376
1377 static int
iwn_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1378 iwn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1379 {
1380 struct iwn_softc *sc = ddi_get_driver_private(dip);
1381 ieee80211com_t *ic = &sc->sc_ic;
1382 int qid, error;
1383
1384 switch (cmd) {
1385 case DDI_DETACH:
1386 break;
1387 case DDI_SUSPEND:
1388 sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
1389 sc->sc_flags &= ~IWN_FLAG_RATE_AUTO_CTL;
1390
1391 sc->sc_flags |= IWN_FLAG_SUSPEND;
1392
1393 if (sc->sc_flags & IWN_FLAG_RUNNING) {
1394 iwn_hw_stop(sc, B_TRUE);
1395 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1396
1397 }
1398
1399 return (DDI_SUCCESS);
1400 default:
1401 return (DDI_FAILURE);
1402 }
1403
1404 if (!(sc->sc_flags & IWN_FLAG_ATTACHED)) {
1405 return (DDI_FAILURE);
1406 }
1407
1408 error = mac_disable(ic->ic_mach);
1409 if (error != DDI_SUCCESS)
1410 return (error);
1411
1412 mutex_enter(&sc->sc_mtx);
1413 sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
1414 mutex_exit(&sc->sc_mtx);
1415
1416 if (sc->calib_to != 0)
1417 (void) untimeout(sc->calib_to);
1418 sc->calib_to = 0;
1419
1420 if (sc->scan_to != 0)
1421 (void) untimeout(sc->scan_to);
1422 sc->scan_to = 0;
1423
1424 ddi_periodic_delete(sc->sc_periodic);
1425
1426 /*
1427 * stop chipset
1428 */
1429 iwn_hw_stop(sc, B_TRUE);
1430
1431 /*
1432 * Unregister from GLD
1433 */
1434 (void) mac_unregister(ic->ic_mach);
1435 ieee80211_detach(ic);
1436
1437 /* Uninstall interrupt handler. */
1438 iwn_intr_teardown(sc);
1439
1440 /* Free DMA resources. */
1441 mutex_enter(&sc->sc_mtx);
1442 iwn_free_rx_ring(sc, &sc->rxq);
1443 for (qid = 0; qid < sc->ntxqs; qid++)
1444 iwn_free_tx_ring(sc, &sc->txq[qid]);
1445 iwn_free_sched(sc);
1446 iwn_free_kw(sc);
1447 if (sc->ict != NULL)
1448 iwn_free_ict(sc);
1449 iwn_free_fwmem(sc);
1450 mutex_exit(&sc->sc_mtx);
1451
1452 iwn_kstat_free(sc->sc_ks_misc, sc->sc_misc,
1453 sizeof (struct iwn_ks_misc));
1454 iwn_kstat_free(sc->sc_ks_ant, sc->sc_ant,
1455 sizeof (struct iwn_ks_ant));
1456 iwn_kstat_free(sc->sc_ks_sens, sc->sc_sens,
1457 sizeof (struct iwn_ks_sens));
1458 iwn_kstat_free(sc->sc_ks_timing, sc->sc_timing,
1459 sizeof (struct iwn_ks_timing));
1460 iwn_kstat_free(sc->sc_ks_edca, sc->sc_edca,
1461 sizeof (struct iwn_ks_edca));
1462 iwn_kstat_free(sc->sc_ks_txpower, sc->sc_txpower,
1463 sizeof (struct iwn_ks_txpower));
1464
1465 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
1466 iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t6000,
1467 sizeof (struct iwn_ks_toff_6000));
1468 else
1469 iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t2000,
1470 sizeof (struct iwn_ks_toff_2000));
1471
1472 ddi_regs_map_free(&sc->sc_regh);
1473 pci_config_teardown(&sc->sc_pcih);
1474 ddi_remove_minor_node(dip, NULL);
1475 ddi_soft_state_free(iwn_state, ddi_get_instance(dip));
1476
1477 return 0;
1478 }
1479
1480 static int
iwn_quiesce(dev_info_t * dip)1481 iwn_quiesce(dev_info_t *dip)
1482 {
1483 struct iwn_softc *sc;
1484
1485 sc = ddi_get_soft_state(iwn_state, ddi_get_instance(dip));
1486 if (sc == NULL)
1487 return (DDI_FAILURE);
1488
1489 #ifdef IWN_DEBUG
1490 /* bypass any messages */
1491 iwn_dbg_print = 0;
1492 #endif
1493
1494 /*
1495 * No more blocking is allowed while we are in the
1496 * quiesce(9E) entry point.
1497 */
1498 sc->sc_flags |= IWN_FLAG_QUIESCED;
1499
1500 /*
1501 * Disable and mask all interrupts.
1502 */
1503 iwn_hw_stop(sc, B_FALSE);
1504
1505 return (DDI_SUCCESS);
1506 }
1507
1508 static int
iwn_nic_lock(struct iwn_softc * sc)1509 iwn_nic_lock(struct iwn_softc *sc)
1510 {
1511 int ntries;
1512
1513 /* Request exclusive access to NIC. */
1514 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1515
1516 /* Spin until we actually get the lock. */
1517 for (ntries = 0; ntries < 1000; ntries++) {
1518 if ((IWN_READ(sc, IWN_GP_CNTRL) &
1519 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1520 IWN_GP_CNTRL_MAC_ACCESS_ENA)
1521 return 0;
1522 DELAY(10);
1523 }
1524 return ETIMEDOUT;
1525 }
1526
1527 static __inline void
iwn_nic_unlock(struct iwn_softc * sc)1528 iwn_nic_unlock(struct iwn_softc *sc)
1529 {
1530 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1531 }
1532
1533 static __inline uint32_t
iwn_prph_read(struct iwn_softc * sc,uint32_t addr)1534 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1535 {
1536 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1537 IWN_BARRIER_READ_WRITE(sc);
1538 return IWN_READ(sc, IWN_PRPH_RDATA);
1539 }
1540
1541 static __inline void
iwn_prph_write(struct iwn_softc * sc,uint32_t addr,uint32_t data)1542 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1543 {
1544 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1545 IWN_BARRIER_WRITE(sc);
1546 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1547 }
1548
1549 static __inline void
iwn_prph_setbits(struct iwn_softc * sc,uint32_t addr,uint32_t mask)1550 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1551 {
1552 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1553 }
1554
1555 static __inline void
iwn_prph_clrbits(struct iwn_softc * sc,uint32_t addr,uint32_t mask)1556 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1557 {
1558 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1559 }
1560
1561 static __inline void
iwn_prph_write_region_4(struct iwn_softc * sc,uint32_t addr,const uint32_t * data,int count)1562 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1563 const uint32_t *data, int count)
1564 {
1565 for (; count > 0; count--, data++, addr += 4)
1566 iwn_prph_write(sc, addr, *data);
1567 }
1568
1569 static __inline uint32_t
iwn_mem_read(struct iwn_softc * sc,uint32_t addr)1570 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1571 {
1572 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1573 IWN_BARRIER_READ_WRITE(sc);
1574 return IWN_READ(sc, IWN_MEM_RDATA);
1575 }
1576
1577 static __inline void
iwn_mem_write(struct iwn_softc * sc,uint32_t addr,uint32_t data)1578 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1579 {
1580 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1581 IWN_BARRIER_WRITE(sc);
1582 IWN_WRITE(sc, IWN_MEM_WDATA, data);
1583 }
1584
1585 #ifndef IEEE80211_NO_HT
1586 static __inline void
iwn_mem_write_2(struct iwn_softc * sc,uint32_t addr,uint16_t data)1587 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1588 {
1589 uint32_t tmp;
1590
1591 tmp = iwn_mem_read(sc, addr & ~3);
1592 if (addr & 3)
1593 tmp = (tmp & 0x0000ffff) | data << 16;
1594 else
1595 tmp = (tmp & 0xffff0000) | data;
1596 iwn_mem_write(sc, addr & ~3, tmp);
1597 }
1598 #endif
1599
1600 static __inline void
iwn_mem_read_region_4(struct iwn_softc * sc,uint32_t addr,uint32_t * data,int count)1601 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1602 int count)
1603 {
1604 for (; count > 0; count--, addr += 4)
1605 *data++ = iwn_mem_read(sc, addr);
1606 }
1607
1608 static __inline void
iwn_mem_set_region_4(struct iwn_softc * sc,uint32_t addr,uint32_t val,int count)1609 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1610 int count)
1611 {
1612 for (; count > 0; count--, addr += 4)
1613 iwn_mem_write(sc, addr, val);
1614 }
1615
1616 static int
iwn_eeprom_lock(struct iwn_softc * sc)1617 iwn_eeprom_lock(struct iwn_softc *sc)
1618 {
1619 int i, ntries;
1620
1621 for (i = 0; i < 100; i++) {
1622 /* Request exclusive access to EEPROM. */
1623 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1624 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1625
1626 /* Spin until we actually get the lock. */
1627 for (ntries = 0; ntries < 100; ntries++) {
1628 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1629 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1630 return 0;
1631 DELAY(10);
1632 }
1633 }
1634 return ETIMEDOUT;
1635 }
1636
1637 static __inline void
iwn_eeprom_unlock(struct iwn_softc * sc)1638 iwn_eeprom_unlock(struct iwn_softc *sc)
1639 {
1640 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1641 }
1642
1643 /*
1644 * Initialize access by host to One Time Programmable ROM.
1645 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1646 */
1647 static int
iwn_init_otprom(struct iwn_softc * sc)1648 iwn_init_otprom(struct iwn_softc *sc)
1649 {
1650 uint16_t prev = 0, base, next;
1651 int count, error;
1652
1653 /* Wait for clock stabilization before accessing prph. */
1654 if ((error = iwn_clock_wait(sc)) != 0)
1655 return error;
1656
1657 if ((error = iwn_nic_lock(sc)) != 0)
1658 return error;
1659 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1660 DELAY(5);
1661 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1662 iwn_nic_unlock(sc);
1663
1664 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1665 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1666 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1667 IWN_RESET_LINK_PWR_MGMT_DIS);
1668 }
1669 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1670 /* Clear ECC status. */
1671 IWN_SETBITS(sc, IWN_OTP_GP,
1672 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1673
1674 /*
1675 * Find the block before last block (contains the EEPROM image)
1676 * for HW without OTP shadow RAM.
1677 */
1678 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1679 /* Switch to absolute addressing mode. */
1680 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1681 base = 0;
1682 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1683 error = iwn_read_prom_data(sc, base, &next, 2);
1684 if (error != 0)
1685 return error;
1686 if (next == 0) /* End of linked-list. */
1687 break;
1688 prev = base;
1689 base = le16toh(next);
1690 }
1691 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1692 return EIO;
1693 /* Skip "next" word. */
1694 sc->prom_base = prev + 1;
1695 }
1696 return 0;
1697 }
1698
1699 static int
iwn_read_prom_data(struct iwn_softc * sc,uint32_t addr,void * data,int count)1700 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1701 {
1702 uint8_t *out = data;
1703 uint32_t val, tmp;
1704 int ntries;
1705
1706 addr += sc->prom_base;
1707 for (; count > 0; count -= 2, addr++) {
1708 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1709 for (ntries = 0; ntries < 10; ntries++) {
1710 val = IWN_READ(sc, IWN_EEPROM);
1711 if (val & IWN_EEPROM_READ_VALID)
1712 break;
1713 DELAY(5);
1714 }
1715 if (ntries == 10) {
1716 dev_err(sc->sc_dip, CE_WARN,
1717 "!timeout reading ROM at 0x%x", addr);
1718 return ETIMEDOUT;
1719 }
1720 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1721 /* OTPROM, check for ECC errors. */
1722 tmp = IWN_READ(sc, IWN_OTP_GP);
1723 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1724 dev_err(sc->sc_dip, CE_WARN,
1725 "!OTPROM ECC error at 0x%x", addr);
1726 return EIO;
1727 }
1728 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1729 /* Correctable ECC error, clear bit. */
1730 IWN_SETBITS(sc, IWN_OTP_GP,
1731 IWN_OTP_GP_ECC_CORR_STTS);
1732 }
1733 }
1734 *out++ = val >> 16;
1735 if (count > 1)
1736 *out++ = val >> 24;
1737 }
1738 return 0;
1739 }
1740
1741 static int
iwn_dma_contig_alloc(struct iwn_softc * sc,struct iwn_dma_info * dma,uint_t size,uint_t flags,void ** kvap,ddi_device_acc_attr_t * acc_attr,uint_t align)1742 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1743 uint_t size, uint_t flags, void **kvap, ddi_device_acc_attr_t *acc_attr,
1744 uint_t align)
1745 {
1746 ddi_dma_attr_t dma_attr = {
1747 .dma_attr_version = DMA_ATTR_V0,
1748 .dma_attr_addr_lo = 0,
1749 .dma_attr_addr_hi = 0xfffffffffULL,
1750 .dma_attr_count_max = 0xfffffffffULL,
1751 .dma_attr_align = align,
1752 .dma_attr_burstsizes = 0x7ff,
1753 .dma_attr_minxfer = 1,
1754 .dma_attr_maxxfer = 0xfffffffffULL,
1755 .dma_attr_seg = 0xfffffffffULL,
1756 .dma_attr_sgllen = 1,
1757 .dma_attr_granular = 1,
1758 .dma_attr_flags = 0,
1759 };
1760 int error;
1761
1762 error = ddi_dma_alloc_handle(sc->sc_dip, &dma_attr, DDI_DMA_SLEEP, NULL,
1763 &dma->dma_hdl);
1764 if (error != DDI_SUCCESS) {
1765 dev_err(sc->sc_dip, CE_WARN,
1766 "ddi_dma_alloc_handle() failed, error = %d", error);
1767 goto fail;
1768 }
1769
1770 error = ddi_dma_mem_alloc(dma->dma_hdl, size, acc_attr,
1771 flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), DDI_DMA_SLEEP, 0,
1772 &dma->vaddr, &dma->length, &dma->acc_hdl);
1773 if (error != DDI_SUCCESS) {
1774 dev_err(sc->sc_dip, CE_WARN,
1775 "ddi_dma_mem_alloc() failed, error = %d", error);
1776 goto fail2;
1777 }
1778
1779 bzero(dma->vaddr, dma->length);
1780
1781 error = ddi_dma_addr_bind_handle(dma->dma_hdl, NULL, dma->vaddr,
1782 dma->length, flags, DDI_DMA_SLEEP, NULL, &dma->cookie,
1783 &dma->ncookies);
1784 if (error != DDI_DMA_MAPPED) {
1785 dma->ncookies = 0;
1786 dev_err(sc->sc_dip, CE_WARN,
1787 "ddi_dma_addr_bind_handle() failed, error = %d", error);
1788 goto fail3;
1789 }
1790
1791 dma->size = size;
1792 dma->paddr = dma->cookie.dmac_laddress;
1793
1794 if (kvap != NULL)
1795 *kvap = (void *)dma->vaddr;
1796
1797 return (DDI_SUCCESS);
1798
1799 fail3:
1800 ddi_dma_mem_free(&dma->acc_hdl);
1801 fail2:
1802 ddi_dma_free_handle(&dma->dma_hdl);
1803 fail:
1804 bzero(dma, sizeof (struct iwn_dma_info));
1805 return (DDI_FAILURE);
1806 }
1807
1808 static void
iwn_dma_contig_free(struct iwn_dma_info * dma)1809 iwn_dma_contig_free(struct iwn_dma_info *dma)
1810 {
1811 if (dma->dma_hdl != NULL) {
1812 if (dma->ncookies)
1813 (void) ddi_dma_unbind_handle(dma->dma_hdl);
1814 ddi_dma_free_handle(&dma->dma_hdl);
1815 }
1816
1817 if (dma->acc_hdl != NULL)
1818 ddi_dma_mem_free(&dma->acc_hdl);
1819
1820 bzero(dma, sizeof (struct iwn_dma_info));
1821 }
1822
1823 static int
iwn_alloc_sched(struct iwn_softc * sc)1824 iwn_alloc_sched(struct iwn_softc *sc)
1825 {
1826 /* TX scheduler rings must be aligned on a 1KB boundary. */
1827
1828 return iwn_dma_contig_alloc(sc, &sc->sched_dma, sc->schedsz,
1829 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&sc->sched,
1830 &iwn_dma_accattr, 1024);
1831 }
1832
1833 static void
iwn_free_sched(struct iwn_softc * sc)1834 iwn_free_sched(struct iwn_softc *sc)
1835 {
1836 iwn_dma_contig_free(&sc->sched_dma);
1837 }
1838
1839 static int
iwn_alloc_kw(struct iwn_softc * sc)1840 iwn_alloc_kw(struct iwn_softc *sc)
1841 {
1842 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1843
1844 return iwn_dma_contig_alloc(sc, &sc->kw_dma, IWN_KW_SIZE,
1845 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, NULL, &iwn_dma_accattr, 4096);
1846 }
1847
1848 static void
iwn_free_kw(struct iwn_softc * sc)1849 iwn_free_kw(struct iwn_softc *sc)
1850 {
1851 iwn_dma_contig_free(&sc->kw_dma);
1852 }
1853
1854 static int
iwn_alloc_ict(struct iwn_softc * sc)1855 iwn_alloc_ict(struct iwn_softc *sc)
1856 {
1857 /* ICT table must be aligned on a 4KB boundary. */
1858
1859 return iwn_dma_contig_alloc(sc, &sc->ict_dma, IWN_ICT_SIZE,
1860 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&sc->ict,
1861 &iwn_dma_descattr, 4096);
1862 }
1863
1864 static void
iwn_free_ict(struct iwn_softc * sc)1865 iwn_free_ict(struct iwn_softc *sc)
1866 {
1867 iwn_dma_contig_free(&sc->ict_dma);
1868 }
1869
1870 static int
iwn_alloc_fwmem(struct iwn_softc * sc)1871 iwn_alloc_fwmem(struct iwn_softc *sc)
1872 {
1873 /* Must be aligned on a 16-byte boundary. */
1874 return iwn_dma_contig_alloc(sc, &sc->fw_dma, sc->fwsz,
1875 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, NULL, &iwn_dma_accattr, 16);
1876 }
1877
1878 static void
iwn_free_fwmem(struct iwn_softc * sc)1879 iwn_free_fwmem(struct iwn_softc *sc)
1880 {
1881 iwn_dma_contig_free(&sc->fw_dma);
1882 }
1883
1884 static int
iwn_alloc_rx_ring(struct iwn_softc * sc,struct iwn_rx_ring * ring)1885 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1886 {
1887 size_t size;
1888 int i, error;
1889
1890 ring->cur = 0;
1891
1892 /* Allocate RX descriptors (256-byte aligned). */
1893 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1894 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, size,
1895 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&ring->desc,
1896 &iwn_dma_descattr, 256);
1897 if (error != DDI_SUCCESS) {
1898 dev_err(sc->sc_dip, CE_WARN,
1899 "!could not allocate RX ring DMA memory");
1900 goto fail;
1901 }
1902
1903 /* Allocate RX status area (16-byte aligned). */
1904 error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1905 sizeof (struct iwn_rx_status), DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1906 (void **)&ring->stat, &iwn_dma_descattr, 16);
1907 if (error != DDI_SUCCESS) {
1908 dev_err(sc->sc_dip, CE_WARN,
1909 "!could not allocate RX status DMA memory");
1910 goto fail;
1911 }
1912
1913 /*
1914 * Allocate and map RX buffers.
1915 */
1916 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1917 struct iwn_rx_data *data = &ring->data[i];
1918
1919 error = iwn_dma_contig_alloc(sc, &data->dma_data, IWN_RBUF_SIZE,
1920 DDI_DMA_CONSISTENT | DDI_DMA_READ, NULL, &iwn_dma_accattr,
1921 256);
1922 if (error != DDI_SUCCESS) {
1923 dev_err(sc->sc_dip, CE_WARN,
1924 "!could not create RX buf DMA map");
1925 goto fail;
1926 }
1927
1928 /* Set physical address of RX buffer (256-byte aligned). */
1929 ring->desc[i] = htole32(data->dma_data.paddr >> 8);
1930 }
1931
1932 (void) ddi_dma_sync(ring->desc_dma.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
1933
1934 return 0;
1935
1936 fail: iwn_free_rx_ring(sc, ring);
1937 return error;
1938 }
1939
1940 static void
iwn_reset_rx_ring(struct iwn_softc * sc,struct iwn_rx_ring * ring)1941 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1942 {
1943 int ntries;
1944
1945 if (iwn_nic_lock(sc) == 0) {
1946 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1947 for (ntries = 0; ntries < 1000; ntries++) {
1948 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1949 IWN_FH_RX_STATUS_IDLE)
1950 break;
1951 DELAY(10);
1952 }
1953 iwn_nic_unlock(sc);
1954 }
1955 ring->cur = 0;
1956 sc->last_rx_valid = 0;
1957 }
1958
1959 static void
iwn_free_rx_ring(struct iwn_softc * sc,struct iwn_rx_ring * ring)1960 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1961 {
1962 _NOTE(ARGUNUSED(sc));
1963 int i;
1964
1965 iwn_dma_contig_free(&ring->desc_dma);
1966 iwn_dma_contig_free(&ring->stat_dma);
1967
1968 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1969 struct iwn_rx_data *data = &ring->data[i];
1970
1971 if (data->dma_data.dma_hdl)
1972 iwn_dma_contig_free(&data->dma_data);
1973 }
1974 }
1975
1976 static int
iwn_alloc_tx_ring(struct iwn_softc * sc,struct iwn_tx_ring * ring,int qid)1977 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1978 {
1979 uintptr_t paddr;
1980 size_t size;
1981 int i, error;
1982
1983 ring->qid = qid;
1984 ring->queued = 0;
1985 ring->cur = 0;
1986
1987 /* Allocate TX descriptors (256-byte aligned). */
1988 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1989 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, size,
1990 DDI_DMA_CONSISTENT | DDI_DMA_WRITE, (void **)&ring->desc,
1991 &iwn_dma_descattr, 256);
1992 if (error != DDI_SUCCESS) {
1993 dev_err(sc->sc_dip, CE_WARN,
1994 "!could not allocate TX ring DMA memory");
1995 goto fail;
1996 }
1997 /*
1998 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1999 * to allocate commands space for other rings.
2000 * XXX Do we really need to allocate descriptors for other rings?
2001 */
2002 if (qid > 4)
2003 return 0;
2004
2005 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
2006 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, size,
2007 DDI_DMA_CONSISTENT | DDI_DMA_WRITE, (void **)&ring->cmd,
2008 &iwn_dma_accattr, 4);
2009 if (error != DDI_SUCCESS) {
2010 dev_err(sc->sc_dip, CE_WARN,
2011 "!could not allocate TX cmd DMA memory");
2012 goto fail;
2013 }
2014
2015 paddr = ring->cmd_dma.paddr;
2016 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2017 struct iwn_tx_data *data = &ring->data[i];
2018
2019 data->cmd_paddr = paddr;
2020 data->scratch_paddr = paddr + 12;
2021 paddr += sizeof (struct iwn_tx_cmd);
2022
2023 error = iwn_dma_contig_alloc(sc, &data->dma_data, IWN_TBUF_SIZE,
2024 DDI_DMA_CONSISTENT | DDI_DMA_WRITE, NULL, &iwn_dma_accattr,
2025 256);
2026 if (error != DDI_SUCCESS) {
2027 dev_err(sc->sc_dip, CE_WARN,
2028 "!could not create TX buf DMA map");
2029 goto fail;
2030 }
2031 }
2032 return 0;
2033
2034 fail: iwn_free_tx_ring(sc, ring);
2035 return error;
2036 }
2037
2038 static void
iwn_reset_tx_ring(struct iwn_softc * sc,struct iwn_tx_ring * ring)2039 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2040 {
2041 int i;
2042
2043 if (ring->qid < 4)
2044 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2045 struct iwn_tx_data *data = &ring->data[i];
2046
2047 (void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0,
2048 DDI_DMA_SYNC_FORDEV);
2049 }
2050
2051 /* Clear TX descriptors. */
2052 memset(ring->desc, 0, ring->desc_dma.size);
2053 (void) ddi_dma_sync(ring->desc_dma.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
2054 sc->qfullmsk &= ~(1 << ring->qid);
2055 ring->queued = 0;
2056 ring->cur = 0;
2057 }
2058
2059 static void
iwn_free_tx_ring(struct iwn_softc * sc,struct iwn_tx_ring * ring)2060 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2061 {
2062 _NOTE(ARGUNUSED(sc));
2063 int i;
2064
2065 iwn_dma_contig_free(&ring->desc_dma);
2066 iwn_dma_contig_free(&ring->cmd_dma);
2067
2068 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2069 struct iwn_tx_data *data = &ring->data[i];
2070
2071 if (data->dma_data.dma_hdl)
2072 iwn_dma_contig_free(&data->dma_data);
2073 }
2074 }
2075
2076 static void
iwn5000_ict_reset(struct iwn_softc * sc)2077 iwn5000_ict_reset(struct iwn_softc *sc)
2078 {
2079 /* Disable interrupts. */
2080 IWN_WRITE(sc, IWN_INT_MASK, 0);
2081
2082 /* Reset ICT table. */
2083 memset(sc->ict, 0, IWN_ICT_SIZE);
2084 sc->ict_cur = 0;
2085
2086 /* Set physical address of ICT table (4KB aligned). */
2087 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2088 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2089
2090 /* Enable periodic RX interrupt. */
2091 sc->int_mask |= IWN_INT_RX_PERIODIC;
2092 /* Switch to ICT interrupt mode in driver. */
2093 sc->sc_flags |= IWN_FLAG_USE_ICT;
2094
2095 /* Re-enable interrupts. */
2096 IWN_WRITE(sc, IWN_INT, 0xffffffff);
2097 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2098 }
2099
2100 static int
iwn_read_eeprom(struct iwn_softc * sc)2101 iwn_read_eeprom(struct iwn_softc *sc)
2102 {
2103 struct iwn_ops *ops = &sc->ops;
2104 struct ieee80211com *ic = &sc->sc_ic;
2105 uint16_t val;
2106 int error;
2107
2108 /* Check whether adapter has an EEPROM or an OTPROM. */
2109 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2110 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2111 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2112 IWN_DBG("%s found",
2113 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2114
2115 /* Adapter has to be powered on for EEPROM access to work. */
2116 if ((error = iwn_apm_init(sc)) != 0) {
2117 dev_err(sc->sc_dip, CE_WARN,
2118 "!could not power ON adapter");
2119 return error;
2120 }
2121
2122 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2123 dev_err(sc->sc_dip, CE_WARN,
2124 "!bad ROM signature");
2125 return EIO;
2126 }
2127 if ((error = iwn_eeprom_lock(sc)) != 0) {
2128 dev_err(sc->sc_dip, CE_WARN,
2129 "!could not lock ROM (error=%d)", error);
2130 return error;
2131 }
2132 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2133 if ((error = iwn_init_otprom(sc)) != 0) {
2134 dev_err(sc->sc_dip, CE_WARN,
2135 "!could not initialize OTPROM");
2136 return error;
2137 }
2138 }
2139
2140 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2141 IWN_DBG("SKU capabilities=0x%04x", le16toh(val));
2142 /* Check if HT support is bonded out. */
2143 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2144 sc->sc_flags |= IWN_FLAG_HAS_11N;
2145
2146 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2147 sc->rfcfg = le16toh(val);
2148 IWN_DBG("radio config=0x%04x", sc->rfcfg);
2149 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
2150 if (sc->txchainmask == 0)
2151 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2152 if (sc->rxchainmask == 0)
2153 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2154
2155 /* Read MAC address. */
2156 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_macaddr, 6);
2157
2158 /* Read adapter-specific information from EEPROM. */
2159 ops->read_eeprom(sc);
2160
2161 iwn_apm_stop(sc); /* Power OFF adapter. */
2162
2163 iwn_eeprom_unlock(sc);
2164 return 0;
2165 }
2166
2167 static void
iwn4965_read_eeprom(struct iwn_softc * sc)2168 iwn4965_read_eeprom(struct iwn_softc *sc)
2169 {
2170 uint32_t addr;
2171 uint16_t val;
2172 int i;
2173
2174 /* Read regulatory domain (4 ASCII characters). */
2175 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2176
2177 /* Read the list of authorized channels (20MHz ones only). */
2178 for (i = 0; i < 5; i++) {
2179 addr = iwn4965_regulatory_bands[i];
2180 iwn_read_eeprom_channels(sc, i, addr);
2181 }
2182
2183 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2184 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2185 sc->maxpwr2GHz = val & 0xff;
2186 sc->maxpwr5GHz = val >> 8;
2187 /* Check that EEPROM values are within valid range. */
2188 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2189 sc->maxpwr5GHz = 38;
2190 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2191 sc->maxpwr2GHz = 38;
2192 IWN_DBG("maxpwr 2GHz=%d 5GHz=%d", sc->maxpwr2GHz, sc->maxpwr5GHz);
2193
2194 /* Read samples for each TX power group. */
2195 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2196 sizeof sc->bands);
2197
2198 /* Read voltage at which samples were taken. */
2199 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2200 sc->eeprom_voltage = (int16_t)le16toh(val);
2201 IWN_DBG("voltage=%d (in 0.3V)", sc->eeprom_voltage);
2202
2203 #ifdef IWN_DEBUG
2204 /* Print samples. */
2205 if (iwn_dbg_print != 0) {
2206 for (i = 0; i < IWN_NBANDS; i++)
2207 iwn4965_print_power_group(sc, i);
2208 }
2209 #endif
2210 }
2211
2212 #ifdef IWN_DEBUG
2213 static void
iwn4965_print_power_group(struct iwn_softc * sc,int i)2214 iwn4965_print_power_group(struct iwn_softc *sc, int i)
2215 {
2216 struct iwn4965_eeprom_band *band = &sc->bands[i];
2217 struct iwn4965_eeprom_chan_samples *chans = band->chans;
2218 int j, c;
2219
2220 dev_err(sc->sc_dip, CE_CONT, "!===band %d===", i);
2221 dev_err(sc->sc_dip, CE_CONT, "!chan lo=%d, chan hi=%d", band->lo,
2222 band->hi);
2223 dev_err(sc->sc_dip, CE_CONT, "!chan1 num=%d", chans[0].num);
2224 for (c = 0; c < 2; c++) {
2225 for (j = 0; j < IWN_NSAMPLES; j++) {
2226 dev_err(sc->sc_dip, CE_CONT, "!chain %d, sample %d: "
2227 "temp=%d gain=%d power=%d pa_det=%d", c, j,
2228 chans[0].samples[c][j].temp,
2229 chans[0].samples[c][j].gain,
2230 chans[0].samples[c][j].power,
2231 chans[0].samples[c][j].pa_det);
2232 }
2233 }
2234 dev_err(sc->sc_dip, CE_CONT, "!chan2 num=%d", chans[1].num);
2235 for (c = 0; c < 2; c++) {
2236 for (j = 0; j < IWN_NSAMPLES; j++) {
2237 dev_err(sc->sc_dip, CE_CONT, "!chain %d, sample %d: "
2238 "temp=%d gain=%d power=%d pa_det=%d", c, j,
2239 chans[1].samples[c][j].temp,
2240 chans[1].samples[c][j].gain,
2241 chans[1].samples[c][j].power,
2242 chans[1].samples[c][j].pa_det);
2243 }
2244 }
2245 }
2246 #endif
2247
2248 static void
iwn5000_read_eeprom(struct iwn_softc * sc)2249 iwn5000_read_eeprom(struct iwn_softc *sc)
2250 {
2251 struct iwn5000_eeprom_calib_hdr hdr;
2252 int32_t volt;
2253 uint32_t base, addr;
2254 uint16_t val;
2255 int i;
2256
2257 /* Read regulatory domain (4 ASCII characters). */
2258 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2259 base = le16toh(val);
2260 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2261 sc->eeprom_domain, 4);
2262
2263 /* Read the list of authorized channels (20MHz ones only). */
2264 for (i = 0; i < 5; i++) {
2265 addr = base + iwn5000_regulatory_bands[i];
2266 iwn_read_eeprom_channels(sc, i, addr);
2267 }
2268
2269 /* Read enhanced TX power information for 6000 Series. */
2270 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
2271 iwn_read_eeprom_enhinfo(sc);
2272
2273 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2274 base = le16toh(val);
2275 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2276 IWN_DBG("calib version=%u pa type=%u voltage=%u",
2277 hdr.version, hdr.pa_type, le16toh(hdr.volt));
2278 sc->calib_ver = hdr.version;
2279
2280 if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2281 sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2282 sc->hw_type == IWN_HW_REV_TYPE_135 ||
2283 sc->hw_type == IWN_HW_REV_TYPE_105) {
2284 sc->eeprom_voltage = le16toh(hdr.volt);
2285 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2286 sc->eeprom_temp = le16toh(val);
2287 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2);
2288 sc->eeprom_rawtemp = le16toh(val);
2289 }
2290
2291 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2292 /* Compute temperature offset. */
2293 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2294 sc->eeprom_temp = le16toh(val);
2295 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2296 volt = le16toh(val);
2297 sc->temp_off = sc->eeprom_temp - (volt / -5);
2298 IWN_DBG("temp=%d volt=%d offset=%dK",
2299 sc->eeprom_temp, volt, sc->temp_off);
2300 } else {
2301 /* Read crystal calibration. */
2302 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2303 &sc->eeprom_crystal, sizeof (uint32_t));
2304 IWN_DBG("crystal calibration 0x%08x",
2305 le32toh(sc->eeprom_crystal));
2306 }
2307 }
2308
2309 static void
iwn_read_eeprom_channels(struct iwn_softc * sc,int n,uint32_t addr)2310 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2311 {
2312 struct ieee80211com *ic = &sc->sc_ic;
2313 const struct iwn_chan_band *band = &iwn_bands[n];
2314 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
2315 uint8_t chan;
2316 int i;
2317
2318 iwn_read_prom_data(sc, addr, channels,
2319 band->nchan * sizeof (struct iwn_eeprom_chan));
2320
2321 for (i = 0; i < band->nchan; i++) {
2322 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
2323 continue;
2324
2325 chan = band->chan[i];
2326
2327 if (n == 0) { /* 2GHz band */
2328 ic->ic_sup_channels[chan].ich_freq =
2329 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
2330 ic->ic_sup_channels[chan].ich_flags =
2331 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
2332 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
2333
2334 } else { /* 5GHz band */
2335 /*
2336 * Some adapters support channels 7, 8, 11 and 12
2337 * both in the 2GHz and 4.9GHz bands.
2338 * Because of limitations in our net80211 layer,
2339 * we don't support them in the 4.9GHz band.
2340 */
2341 if (chan <= 14)
2342 continue;
2343
2344 ic->ic_sup_channels[chan].ich_freq =
2345 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
2346 ic->ic_sup_channels[chan].ich_flags =
2347 IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM;
2348 /* We have at least one valid 5GHz channel. */
2349 sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
2350 }
2351
2352 /* Is active scan allowed on this channel? */
2353 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
2354 ic->ic_sup_channels[chan].ich_flags |=
2355 IEEE80211_CHAN_PASSIVE;
2356 }
2357
2358 /* Save maximum allowed TX power for this channel. */
2359 sc->maxpwr[chan] = channels[i].maxpwr;
2360
2361 IWN_DBG("adding chan %d flags=0x%x maxpwr=%d",
2362 chan, channels[i].flags, sc->maxpwr[chan]);
2363 }
2364 }
2365
2366 static void
iwn_read_eeprom_enhinfo(struct iwn_softc * sc)2367 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2368 {
2369 struct iwn_eeprom_enhinfo enhinfo[35];
2370 uint16_t val, base;
2371 int8_t maxpwr;
2372 int i;
2373
2374 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2375 base = le16toh(val);
2376 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2377 enhinfo, sizeof enhinfo);
2378
2379 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
2380 for (i = 0; i < __arraycount(enhinfo); i++) {
2381 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
2382 continue; /* Skip invalid entries. */
2383
2384 maxpwr = 0;
2385 if (sc->txchainmask & IWN_ANT_A)
2386 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2387 if (sc->txchainmask & IWN_ANT_B)
2388 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2389 if (sc->txchainmask & IWN_ANT_C)
2390 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2391 if (sc->ntxchains == 2)
2392 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2393 else if (sc->ntxchains == 3)
2394 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2395 maxpwr /= 2; /* Convert half-dBm to dBm. */
2396
2397 IWN_DBG("enhinfo %d, maxpwr=%d", i, maxpwr);
2398 sc->enh_maxpwr[i] = maxpwr;
2399 }
2400 }
2401
2402 static struct ieee80211_node *
iwn_node_alloc(ieee80211com_t * ic)2403 iwn_node_alloc(ieee80211com_t *ic)
2404 {
2405 _NOTE(ARGUNUSED(ic));
2406 return (kmem_zalloc(sizeof (struct iwn_node), KM_NOSLEEP));
2407 }
2408
2409 static void
iwn_node_free(ieee80211_node_t * in)2410 iwn_node_free(ieee80211_node_t *in)
2411 {
2412 ASSERT(in != NULL);
2413 ASSERT(in->in_ic != NULL);
2414
2415 if (in->in_wpa_ie != NULL)
2416 ieee80211_free(in->in_wpa_ie);
2417
2418 if (in->in_wme_ie != NULL)
2419 ieee80211_free(in->in_wme_ie);
2420
2421 if (in->in_htcap_ie != NULL)
2422 ieee80211_free(in->in_htcap_ie);
2423
2424 kmem_free(in, sizeof (struct iwn_node));
2425 }
2426
2427 static void
iwn_newassoc(struct ieee80211_node * ni,int isnew)2428 iwn_newassoc(struct ieee80211_node *ni, int isnew)
2429 {
2430 _NOTE(ARGUNUSED(isnew));
2431 struct iwn_softc *sc = (struct iwn_softc *)&ni->in_ic;
2432 struct iwn_node *wn = (void *)ni;
2433 uint8_t rate, ridx;
2434 int i;
2435
2436 ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
2437 /*
2438 * Select a medium rate and depend on AMRR to raise/lower it.
2439 */
2440 ni->in_txrate = ni->in_rates.ir_nrates / 2;
2441
2442 for (i = 0; i < ni->in_rates.ir_nrates; i++) {
2443 rate = ni->in_rates.ir_rates[i] & IEEE80211_RATE_VAL;
2444 /* Map 802.11 rate to HW rate index. */
2445 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
2446 if (iwn_rates[ridx].rate == rate)
2447 break;
2448 wn->ridx[i] = ridx;
2449 }
2450 }
2451
2452 static int
iwn_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)2453 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2454 {
2455 struct iwn_softc *sc = (struct iwn_softc *)ic;
2456 enum ieee80211_state ostate;
2457 int error;
2458
2459 mutex_enter(&sc->sc_mtx);
2460 sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
2461 mutex_exit(&sc->sc_mtx);
2462
2463 (void) untimeout(sc->calib_to);
2464 sc->calib_to = 0;
2465
2466 mutex_enter(&sc->sc_mtx);
2467 ostate = ic->ic_state;
2468
2469 DTRACE_PROBE5(new__state, int, sc->sc_flags,
2470 enum ieee80211_state, ostate,
2471 const char *, ieee80211_state_name[ostate],
2472 enum ieee80211_state, nstate,
2473 const char *, ieee80211_state_name[nstate]);
2474
2475 if ((sc->sc_flags & IWN_FLAG_RADIO_OFF) && nstate != IEEE80211_S_INIT) {
2476 mutex_exit(&sc->sc_mtx);
2477 return (IWN_FAIL);
2478 }
2479
2480 if (!(sc->sc_flags & IWN_FLAG_HW_INITED) &&
2481 nstate != IEEE80211_S_INIT) {
2482 mutex_exit(&sc->sc_mtx);
2483 return (IWN_FAIL);
2484 }
2485
2486 switch (nstate) {
2487 case IEEE80211_S_SCAN:
2488 /* XXX Do not abort a running scan. */
2489 if (sc->sc_flags & IWN_FLAG_SCANNING) {
2490 if (ostate != nstate)
2491 dev_err(sc->sc_dip, CE_WARN, "!scan request(%d)"
2492 " while scanning(%d) ignored", nstate,
2493 ostate);
2494 mutex_exit(&sc->sc_mtx);
2495 return (0);
2496 }
2497
2498 bcopy(&sc->rxon, &sc->rxon_save, sizeof (sc->rxon));
2499 sc->sc_ostate = ostate;
2500
2501 /* XXX Not sure if call and flags are needed. */
2502 ieee80211_node_table_reset(&ic->ic_scan);
2503 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
2504 sc->sc_flags |= IWN_FLAG_SCANNING_2GHZ;
2505
2506 /* Make the link LED blink while we're scanning. */
2507 iwn_set_led(sc, IWN_LED_LINK, 10, 10);
2508
2509 ic->ic_state = nstate;
2510
2511 error = iwn_scan(sc, IEEE80211_CHAN_2GHZ);
2512 if (error != 0) {
2513 dev_err(sc->sc_dip, CE_WARN,
2514 "!could not initiate scan");
2515 sc->sc_flags &= ~IWN_FLAG_SCANNING;
2516 mutex_exit(&sc->sc_mtx);
2517 return (error);
2518 }
2519
2520 mutex_exit(&sc->sc_mtx);
2521 sc->scan_to = timeout(iwn_abort_scan, sc, iwn_scan_timeout *
2522 drv_usectohz(MICROSEC));
2523 return (error);
2524
2525 case IEEE80211_S_ASSOC:
2526 if (ostate != IEEE80211_S_RUN) {
2527 mutex_exit(&sc->sc_mtx);
2528 break;
2529 }
2530 /* FALLTHROUGH */
2531 case IEEE80211_S_AUTH:
2532 /* Reset state to handle reassociations correctly. */
2533 sc->rxon.associd = 0;
2534 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2535 sc->calib.state = IWN_CALIB_STATE_INIT;
2536
2537 if ((error = iwn_auth(sc)) != 0) {
2538 mutex_exit(&sc->sc_mtx);
2539 dev_err(sc->sc_dip, CE_WARN,
2540 "!could not move to auth state");
2541 return error;
2542 }
2543 mutex_exit(&sc->sc_mtx);
2544 break;
2545
2546 case IEEE80211_S_RUN:
2547 if ((error = iwn_run(sc)) != 0) {
2548 mutex_exit(&sc->sc_mtx);
2549 dev_err(sc->sc_dip, CE_WARN,
2550 "!could not move to run state");
2551 return error;
2552 }
2553 mutex_exit(&sc->sc_mtx);
2554 break;
2555
2556 case IEEE80211_S_INIT:
2557 sc->sc_flags &= ~IWN_FLAG_SCANNING;
2558 sc->calib.state = IWN_CALIB_STATE_INIT;
2559
2560 /*
2561 * set LED off after init
2562 */
2563 iwn_set_led(sc, IWN_LED_LINK, 1, 0);
2564
2565 cv_signal(&sc->sc_scan_cv);
2566 mutex_exit(&sc->sc_mtx);
2567 if (sc->scan_to != 0)
2568 (void) untimeout(sc->scan_to);
2569 sc->scan_to = 0;
2570 break;
2571 }
2572
2573 error = sc->sc_newstate(ic, nstate, arg);
2574
2575 if (nstate == IEEE80211_S_RUN)
2576 ieee80211_start_watchdog(ic, 1);
2577
2578 return (error);
2579 }
2580
2581 static void
iwn_iter_func(void * arg,struct ieee80211_node * ni)2582 iwn_iter_func(void *arg, struct ieee80211_node *ni)
2583 {
2584 struct iwn_softc *sc = arg;
2585 struct iwn_node *wn = (struct iwn_node *)ni;
2586
2587 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
2588 }
2589
2590 static void
iwn_calib_timeout(void * arg)2591 iwn_calib_timeout(void *arg)
2592 {
2593 struct iwn_softc *sc = arg;
2594 struct ieee80211com *ic = &sc->sc_ic;
2595
2596 mutex_enter(&sc->sc_mtx);
2597
2598 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
2599 if (ic->ic_opmode == IEEE80211_M_STA)
2600 iwn_iter_func(sc, ic->ic_bss);
2601 else
2602 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc);
2603 }
2604 /* Force automatic TX power calibration every 60 secs. */
2605 if (++sc->calib_cnt >= 120) {
2606 uint32_t flags = 0;
2607
2608 DTRACE_PROBE(get__statistics);
2609 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2610 sizeof flags, 1);
2611 sc->calib_cnt = 0;
2612 }
2613
2614 /* Automatic rate control triggered every 500ms. */
2615 if ((sc->sc_flags & IWN_FLAG_STOP_CALIB_TO) == 0)
2616 sc->calib_to = timeout(iwn_calib_timeout, sc,
2617 drv_usectohz(500000));
2618
2619 mutex_exit(&sc->sc_mtx);
2620 }
2621
2622 /*
2623 * Process an RX_PHY firmware notification. This is usually immediately
2624 * followed by an MPDU_RX_DONE notification.
2625 */
2626 static void
iwn_rx_phy(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2627 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2628 struct iwn_rx_data *data)
2629 {
2630 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2631
2632 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2633 sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2634
2635 DTRACE_PROBE1(rx__phy, struct iwn_rx_stat *, stat);
2636
2637 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2638 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2639 sc->last_rx_valid = 1;
2640 }
2641
2642 /*
2643 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2644 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2645 */
2646 static void
iwn_rx_done(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2647 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2648 struct iwn_rx_data *data)
2649 {
2650 struct iwn_ops *ops = &sc->ops;
2651 struct ieee80211com *ic = &sc->sc_ic;
2652 struct iwn_rx_ring *ring = &sc->rxq;
2653 struct ieee80211_frame *wh;
2654 struct ieee80211_node *ni;
2655 mblk_t *m;
2656 struct iwn_rx_stat *stat;
2657 char *head;
2658 uint32_t flags;
2659 int len, rssi;
2660
2661 if (desc->type == IWN_MPDU_RX_DONE) {
2662 /* Check for prior RX_PHY notification. */
2663 if (!sc->last_rx_valid) {
2664 dev_err(sc->sc_dip, CE_WARN,
2665 "missing RX_PHY");
2666 return;
2667 }
2668 sc->last_rx_valid = 0;
2669 stat = &sc->last_rx_stat;
2670 } else
2671 stat = (struct iwn_rx_stat *)(desc + 1);
2672
2673 (void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0,
2674 DDI_DMA_SYNC_FORKERNEL);
2675
2676 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2677 dev_err(sc->sc_dip, CE_WARN,
2678 "!invalid RX statistic header");
2679 return;
2680 }
2681 if (desc->type == IWN_MPDU_RX_DONE) {
2682 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2683 head = (char *)(mpdu + 1);
2684 len = le16toh(mpdu->len);
2685 } else {
2686 head = (char *)(stat + 1) + stat->cfg_phy_len;
2687 len = le16toh(stat->len);
2688 }
2689 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2690 flags = le32toh(*(uint32_t *)(head + len));
2691
2692 /* Discard frames with a bad FCS early. */
2693 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2694 sc->sc_rx_err++;
2695 ic->ic_stats.is_fcs_errors++;
2696 return;
2697 }
2698 /* Discard frames that are too short. */
2699 if (len < sizeof (*wh)) {
2700 sc->sc_rx_err++;
2701 return;
2702 }
2703
2704 m = allocb(len, BPRI_MED);
2705 if (m == NULL) {
2706 sc->sc_rx_nobuf++;
2707 return;
2708 }
2709
2710 /* Update RX descriptor. */
2711 ring->desc[ring->cur] =
2712 htole32(data->dma_data.paddr >> 8);
2713 (void) ddi_dma_sync(ring->desc_dma.dma_hdl,
2714 ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2715 DDI_DMA_SYNC_FORDEV);
2716
2717 /* Grab a reference to the source node. */
2718 wh = (struct ieee80211_frame*)head;
2719 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame *)wh);
2720
2721 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */
2722 /* NetBSD does decryption in ieee80211_input. */
2723
2724 rssi = ops->get_rssi(stat);
2725
2726 /*
2727 * convert dBm to percentage
2728 */
2729 rssi = (100 * 75 * 75 - (-20 - rssi) * (15 * 75 + 62 * (-20 - rssi)))
2730 / (75 * 75);
2731 if (rssi > 100)
2732 rssi = 100;
2733 else if (rssi < 1)
2734 rssi = 1;
2735
2736 bcopy(wh, m->b_wptr, len);
2737 m->b_wptr += len;
2738
2739 /* XXX Added for NetBSD: scans never stop without it */
2740 if (ic->ic_state == IEEE80211_S_SCAN)
2741 iwn_fix_channel(sc, m, stat);
2742
2743 /* Send the frame to the 802.11 layer. */
2744 ieee80211_input(ic, m, ni, rssi, 0);
2745
2746 /* Node is no longer needed. */
2747 ieee80211_free_node(ni);
2748 }
2749
2750 #ifndef IEEE80211_NO_HT
2751 /* Process an incoming Compressed BlockAck. */
2752 static void
iwn_rx_compressed_ba(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2753 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2754 struct iwn_rx_data *data)
2755 {
2756 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2757 struct iwn_tx_ring *txq;
2758
2759 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2760 sizeof (*ba), DDI_DMA_SYNC_FORKERNEL);
2761
2762 txq = &sc->txq[le16toh(ba->qid)];
2763 /* XXX TBD */
2764 }
2765 #endif
2766
2767 /*
2768 * Process a CALIBRATION_RESULT notification sent by the initialization
2769 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2770 */
2771 static void
iwn5000_rx_calib_results(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2772 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2773 struct iwn_rx_data *data)
2774 {
2775 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2776 int len, idx = -1;
2777
2778 /* Runtime firmware should not send such a notification. */
2779 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2780 return;
2781
2782 len = (le32toh(desc->len) & 0x3fff) - 4;
2783 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc), len,
2784 DDI_DMA_SYNC_FORKERNEL);
2785
2786 switch (calib->code) {
2787 case IWN5000_PHY_CALIB_DC:
2788 if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2789 sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2790 sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2791 sc->hw_type == IWN_HW_REV_TYPE_135 ||
2792 sc->hw_type == IWN_HW_REV_TYPE_105)
2793 idx = 0;
2794 break;
2795 case IWN5000_PHY_CALIB_LO:
2796 idx = 1;
2797 break;
2798 case IWN5000_PHY_CALIB_TX_IQ:
2799 idx = 2;
2800 break;
2801 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2802 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2803 sc->hw_type != IWN_HW_REV_TYPE_5150)
2804 idx = 3;
2805 break;
2806 case IWN5000_PHY_CALIB_BASE_BAND:
2807 idx = 4;
2808 break;
2809 }
2810 if (idx == -1) /* Ignore other results. */
2811 return;
2812
2813 /* Save calibration result. */
2814 if (sc->calibcmd[idx].buf != NULL)
2815 kmem_free(sc->calibcmd[idx].buf, sc->calibcmd[idx].len);
2816 sc->calibcmd[idx].buf = kmem_zalloc(len, KM_NOSLEEP);
2817 if (sc->calibcmd[idx].buf == NULL) {
2818 return;
2819 }
2820 sc->calibcmd[idx].len = len;
2821 memcpy(sc->calibcmd[idx].buf, calib, len);
2822 }
2823
2824 /*
2825 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2826 * The latter is sent by the firmware after each received beacon.
2827 */
2828 static void
iwn_rx_statistics(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2829 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2830 struct iwn_rx_data *data)
2831 {
2832 struct iwn_ops *ops = &sc->ops;
2833 struct ieee80211com *ic = &sc->sc_ic;
2834 struct iwn_calib_state *calib = &sc->calib;
2835 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2836 int temp = 0;
2837
2838 /* Ignore statistics received during a scan. */
2839 if (ic->ic_state != IEEE80211_S_RUN)
2840 return;
2841
2842 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2843 sizeof (*stats), DDI_DMA_SYNC_FORKERNEL);
2844
2845 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2846
2847 /* Test if temperature has changed. */
2848 if (stats->general.temp != sc->rawtemp) {
2849 /* Convert "raw" temperature to degC. */
2850 sc->rawtemp = stats->general.temp;
2851 temp = ops->get_temperature(sc);
2852 sc->sc_misc->temp.value.ul = temp;
2853
2854 /* Update TX power if need be (4965AGN only). */
2855 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2856 iwn4965_power_calibration(sc, temp);
2857 }
2858
2859 DTRACE_PROBE2(rx__statistics, struct iwn_stats *, stats, int, temp);
2860
2861 if (desc->type != IWN_BEACON_STATISTICS)
2862 return; /* Reply to a statistics request. */
2863
2864 sc->noise = iwn_get_noise(&stats->rx.general);
2865 sc->sc_misc->noise.value.l = sc->noise;
2866
2867 /* Test that RSSI and noise are present in stats report. */
2868 if (le32toh(stats->rx.general.flags) != 1) {
2869 return;
2870 }
2871
2872 /*
2873 * XXX Differential gain calibration makes the 6005 firmware
2874 * crap out, so skip it for now. This effectively disables
2875 * sensitivity tuning as well.
2876 */
2877 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2878 return;
2879
2880 if (calib->state == IWN_CALIB_STATE_ASSOC)
2881 iwn_collect_noise(sc, &stats->rx.general);
2882 else if (calib->state == IWN_CALIB_STATE_RUN)
2883 iwn_tune_sensitivity(sc, &stats->rx);
2884 }
2885
2886 /*
2887 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2888 * and 5000 adapters have different incompatible TX status formats.
2889 */
2890 static void
iwn4965_tx_done(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2891 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2892 struct iwn_rx_data *data)
2893 {
2894 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2895
2896 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2897 sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2898 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2899 }
2900
2901 static void
iwn5000_tx_done(struct iwn_softc * sc,struct iwn_rx_desc * desc,struct iwn_rx_data * data)2902 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2903 struct iwn_rx_data *data)
2904 {
2905 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2906
2907 #ifdef notyet
2908 /* Reset TX scheduler slot. */
2909 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2910 #endif
2911
2912 (void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2913 sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2914 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2915 }
2916
2917 /*
2918 * Adapter-independent backend for TX_DONE firmware notifications.
2919 */
2920 static void
iwn_tx_done(struct iwn_softc * sc,struct iwn_rx_desc * desc,int ackfailcnt,uint8_t status)2921 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2922 uint8_t status)
2923 {
2924 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2925 struct iwn_tx_data *data = &ring->data[desc->idx];
2926 struct iwn_node *wn = (struct iwn_node *)data->ni;
2927
2928 /* Update rate control statistics. */
2929 wn->amn.amn_txcnt++;
2930 if (ackfailcnt > 0)
2931 wn->amn.amn_retrycnt++;
2932
2933 if (status != 1 && status != 2)
2934 sc->sc_tx_err++;
2935 else
2936 sc->sc_ic.ic_stats.is_tx_frags++;
2937
2938 ieee80211_free_node(data->ni);
2939 data->ni = NULL;
2940
2941 mutex_enter(&sc->sc_tx_mtx);
2942 sc->sc_tx_timer = 0;
2943 if (--ring->queued < IWN_TX_RING_LOMARK) {
2944 sc->qfullmsk &= ~(1 << ring->qid);
2945 }
2946 mac_tx_update(sc->sc_ic.ic_mach);
2947 mutex_exit(&sc->sc_tx_mtx);
2948 }
2949
2950 /*
2951 * Process a "command done" firmware notification. This is where we wakeup
2952 * processes waiting for a synchronous command completion.
2953 */
2954 static void
iwn_cmd_done(struct iwn_softc * sc,struct iwn_rx_desc * desc)2955 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2956 {
2957 struct iwn_tx_ring *ring = &sc->txq[IWN_CMD_QUEUE_NUM];
2958 struct iwn_tx_data *data;
2959
2960 if ((desc->qid & 0xf) != IWN_CMD_QUEUE_NUM)
2961 return; /* Not a command ack. */
2962
2963 data = &ring->data[desc->idx];
2964
2965 (void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
2966
2967 /* If the command was mapped in an extra buffer, free it. */
2968 if (data->cmd_dma.dma_hdl) {
2969 (void) ddi_dma_sync(data->cmd_dma.dma_hdl, 0, 0,
2970 DDI_DMA_SYNC_FORDEV);
2971 iwn_dma_contig_free(&data->cmd_dma);
2972 }
2973
2974 mutex_enter(&sc->sc_mtx);
2975 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2976 cv_signal(&sc->sc_cmd_cv);
2977 mutex_exit(&sc->sc_mtx);
2978 }
2979
2980 /*
2981 * Process an INT_FH_RX or INT_SW_RX interrupt.
2982 */
2983 static void
iwn_notif_intr(struct iwn_softc * sc)2984 iwn_notif_intr(struct iwn_softc *sc)
2985 {
2986 struct iwn_ops *ops = &sc->ops;
2987 struct ieee80211com *ic = &sc->sc_ic;
2988 uint16_t hw;
2989
2990 ASSERT(sc != NULL);
2991
2992 (void) ddi_dma_sync(sc->rxq.stat_dma.dma_hdl, 0, 0,
2993 DDI_DMA_SYNC_FORKERNEL);
2994
2995 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2996 while (sc->rxq.cur != hw) {
2997 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2998 struct iwn_rx_desc *desc;
2999
3000 (void) ddi_dma_sync(data->dma_data.dma_hdl, 0, sizeof (*desc),
3001 DDI_DMA_SYNC_FORKERNEL);
3002 desc = (struct iwn_rx_desc *)data->dma_data.vaddr;
3003
3004 DTRACE_PROBE1(notification__intr, struct iwn_rx_desc *, desc);
3005
3006 if (!(desc->qid & 0x80)) /* Reply to a command. */
3007 iwn_cmd_done(sc, desc);
3008
3009 switch (desc->type) {
3010 case IWN_RX_PHY:
3011 iwn_rx_phy(sc, desc, data);
3012 break;
3013
3014 case IWN_RX_DONE: /* 4965AGN only. */
3015 case IWN_MPDU_RX_DONE:
3016 /* An 802.11 frame has been received. */
3017 iwn_rx_done(sc, desc, data);
3018 break;
3019 #ifndef IEEE80211_NO_HT
3020 case IWN_RX_COMPRESSED_BA:
3021 /* A Compressed BlockAck has been received. */
3022 iwn_rx_compressed_ba(sc, desc, data);
3023 break;
3024 #endif
3025 case IWN_TX_DONE:
3026 /* An 802.11 frame has been transmitted. */
3027 ops->tx_done(sc, desc, data);
3028 break;
3029
3030 case IWN_RX_STATISTICS:
3031 case IWN_BEACON_STATISTICS:
3032 mutex_enter(&sc->sc_mtx);
3033 iwn_rx_statistics(sc, desc, data);
3034 mutex_exit(&sc->sc_mtx);
3035 break;
3036
3037 case IWN_BEACON_MISSED:
3038 {
3039 struct iwn_beacon_missed *miss =
3040 (struct iwn_beacon_missed *)(desc + 1);
3041
3042 (void) ddi_dma_sync(data->dma_data.dma_hdl,
3043 sizeof (*desc), sizeof (*miss),
3044 DDI_DMA_SYNC_FORKERNEL);
3045 /*
3046 * If more than iwn_beacons_missed_disconnect
3047 * consecutive beacons are missed, we've probably lost
3048 * our connection.
3049 * If more than iwn_beacons_missed_sensitivity
3050 * consecutive beacons are missed, reinitialize the
3051 * sensitivity state machine.
3052 */
3053 DTRACE_PROBE1(beacons__missed,
3054 struct iwn_beacon_missed *, miss);
3055 if (ic->ic_state == IEEE80211_S_RUN) {
3056 if (le32toh(miss->consecutive)
3057 > iwn_beacons_missed_disconnect) {
3058 dev_err(sc->sc_dip, CE_WARN,
3059 "!iwn_notif_intr(): %d consecutive "
3060 "beacons missed, disconnecting",
3061 le32toh(miss->consecutive));
3062 ieee80211_new_state(ic,
3063 IEEE80211_S_INIT, -1);
3064 } else if (le32toh(miss->consecutive)
3065 > iwn_beacons_missed_sensitivity) {
3066 mutex_enter(&sc->sc_mtx);
3067 (void)iwn_init_sensitivity(sc);
3068 mutex_exit(&sc->sc_mtx);
3069 }
3070 }
3071 break;
3072 }
3073 case IWN_UC_READY:
3074 {
3075 struct iwn_ucode_info *uc =
3076 (struct iwn_ucode_info *)(desc + 1);
3077
3078 /* The microcontroller is ready. */
3079 (void) ddi_dma_sync(data->dma_data.dma_hdl,
3080 sizeof (*desc), sizeof (*uc),
3081 DDI_DMA_SYNC_FORKERNEL);
3082 DTRACE_PROBE1(uc__ready, struct iwn_ucode_info *, uc)
3083
3084 if (le32toh(uc->valid) != 1) {
3085 dev_err(sc->sc_dip, CE_WARN,
3086 "!microcontroller initialization failed");
3087 break;
3088 }
3089 if (uc->subtype == IWN_UCODE_INIT) {
3090 /* Save microcontroller report. */
3091 memcpy(&sc->ucode_info, uc, sizeof (*uc));
3092 }
3093 /* Save the address of the error log in SRAM. */
3094 sc->errptr = le32toh(uc->errptr);
3095 break;
3096 }
3097 case IWN_STATE_CHANGED:
3098 {
3099 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
3100 uint32_t *status = (uint32_t *)(desc + 1);
3101
3102 /* Enabled/disabled notification. */
3103 (void) ddi_dma_sync(data->dma_data.dma_hdl,
3104 sizeof (*desc), sizeof (*status),
3105 DDI_DMA_SYNC_FORKERNEL);
3106 DTRACE_PROBE1(state__changed, uint32_t, *status);
3107
3108 if (le32toh(*status) & 1) {
3109 /* The radio button has to be pushed. */
3110 dev_err(sc->sc_dip, CE_WARN,
3111 "!Radio transmitter is off");
3112 /* Turn the interface down. */
3113 mutex_enter(&sc->sc_mtx);
3114 sc->sc_flags |=
3115 IWN_FLAG_HW_ERR_RECOVER |
3116 IWN_FLAG_RADIO_OFF;
3117 mutex_exit(&sc->sc_mtx);
3118 ieee80211_new_state(&sc->sc_ic,
3119 IEEE80211_S_INIT, -1);
3120
3121 return; /* No further processing. */
3122 }
3123 break;
3124 }
3125 case IWN_START_SCAN:
3126 {
3127 struct iwn_start_scan *scan =
3128 (struct iwn_start_scan *)(desc + 1);
3129
3130 (void) ddi_dma_sync(data->dma_data.dma_hdl,
3131 sizeof (*desc), sizeof (*scan),
3132 DDI_DMA_SYNC_FORKERNEL);
3133 DTRACE_PROBE2(start__scan, uint8_t, scan->chan,
3134 uint32_t, le32toh(scan->status));
3135
3136 /* Fix current channel. */
3137 ic->ic_curchan = ic->ic_bss->in_chan =
3138 &ic->ic_sup_channels[scan->chan];
3139 break;
3140 }
3141 case IWN_STOP_SCAN:
3142 {
3143 struct iwn_stop_scan *scan =
3144 (struct iwn_stop_scan *)(desc + 1);
3145
3146 (void) ddi_dma_sync(data->dma_data.dma_hdl,
3147 sizeof (*desc), sizeof (*scan),
3148 DDI_DMA_SYNC_FORKERNEL);
3149 DTRACE_PROBE3(stop__scan, uint8_t, scan->chan,
3150 uint32_t, le32toh(scan->status),
3151 uint8_t, scan->nchan);
3152
3153 if (iwn_enable_5ghz != 0 &&
3154 (sc->sc_flags & IWN_FLAG_SCANNING_2GHZ) &&
3155 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
3156 /*
3157 * We just finished scanning 2GHz channels,
3158 * start scanning 5GHz ones.
3159 */
3160 mutex_enter(&sc->sc_mtx);
3161 sc->sc_flags |= IWN_FLAG_SCANNING_5GHZ;
3162 sc->sc_flags &= ~IWN_FLAG_SCANNING_2GHZ;
3163 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) {
3164 mutex_exit(&sc->sc_mtx);
3165 break;
3166 }
3167 mutex_exit(&sc->sc_mtx);
3168 }
3169 ieee80211_end_scan(ic);
3170 mutex_enter(&sc->sc_mtx);
3171 sc->sc_flags &= ~IWN_FLAG_SCANNING;
3172 cv_signal(&sc->sc_scan_cv);
3173 mutex_exit(&sc->sc_mtx);
3174 (void) untimeout(sc->scan_to);
3175 sc->scan_to = 0;
3176 break;
3177 }
3178 case IWN5000_CALIBRATION_RESULT:
3179 iwn5000_rx_calib_results(sc, desc, data);
3180 break;
3181
3182 case IWN5000_CALIBRATION_DONE:
3183 mutex_enter(&sc->sc_mtx);
3184 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3185 cv_signal(&sc->sc_calib_cv);
3186 mutex_exit(&sc->sc_mtx);
3187 break;
3188 }
3189
3190 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3191 }
3192
3193 /* Tell the firmware what we have processed. */
3194 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3195 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3196 }
3197
3198 /*
3199 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3200 * from power-down sleep mode.
3201 */
3202 static void
iwn_wakeup_intr(struct iwn_softc * sc)3203 iwn_wakeup_intr(struct iwn_softc *sc)
3204 {
3205 int qid;
3206
3207 DTRACE_PROBE(wakeup__intr);
3208
3209 /* Wakeup RX and TX rings. */
3210 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3211 for (qid = 0; qid < sc->ntxqs; qid++) {
3212 struct iwn_tx_ring *ring = &sc->txq[qid];
3213 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3214 }
3215 }
3216
3217 /*
3218 * Dump the error log of the firmware when a firmware panic occurs. Although
3219 * we can't debug the firmware because it is neither open source nor free, it
3220 * can help us to identify certain classes of problems.
3221 */
3222 static void
iwn_fatal_intr(struct iwn_softc * sc)3223 iwn_fatal_intr(struct iwn_softc *sc)
3224 {
3225 struct iwn_fw_dump *dump;
3226 uint32_t buf[P2ROUNDUP(sizeof (*dump), sizeof (uint32_t)) /
3227 sizeof (uint32_t)];
3228 int i;
3229
3230 /* Force a complete recalibration on next init. */
3231 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3232
3233 /* Check that the error log address is valid. */
3234 if (sc->errptr < IWN_FW_DATA_BASE ||
3235 sc->errptr + sizeof (*dump) >
3236 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3237 dev_err(sc->sc_dip, CE_WARN,
3238 "!bad firmware error log address 0x%08x", sc->errptr);
3239 return;
3240 }
3241 if (iwn_nic_lock(sc) != 0) {
3242 dev_err(sc->sc_dip, CE_WARN,
3243 "!could not read firmware error log");
3244 return;
3245 }
3246 /* Read firmware error log from SRAM. */
3247 dump = (struct iwn_fw_dump *)buf;
3248 iwn_mem_read_region_4(sc, sc->errptr, buf, ARRAY_SIZE(buf));
3249 iwn_nic_unlock(sc);
3250
3251 if (dump->valid == 0) {
3252 dev_err(sc->sc_dip, CE_WARN,
3253 "!firmware error log is empty");
3254 return;
3255 }
3256 dev_err(sc->sc_dip, CE_WARN, "!firmware error log:");
3257 dev_err(sc->sc_dip, CE_CONT, "! error type = \"%s\" (0x%08X)",
3258 (dump->id < __arraycount(iwn_fw_errmsg)) ?
3259 iwn_fw_errmsg[dump->id] : "UNKNOWN",
3260 dump->id);
3261 dev_err(sc->sc_dip, CE_CONT, "! program counter = 0x%08X", dump->pc);
3262 dev_err(sc->sc_dip, CE_CONT, "! source line = 0x%08X",
3263 dump->src_line);
3264 dev_err(sc->sc_dip, CE_CONT, "! error data = 0x%08X%08X",
3265 dump->error_data[0], dump->error_data[1]);
3266 dev_err(sc->sc_dip, CE_CONT, "! branch link = 0x%08X%08X",
3267 dump->branch_link[0], dump->branch_link[1]);
3268 dev_err(sc->sc_dip, CE_CONT, "! interrupt link = 0x%08X%08X",
3269 dump->interrupt_link[0], dump->interrupt_link[1]);
3270 dev_err(sc->sc_dip, CE_CONT, "! time = %u", dump->time[0]);
3271
3272 /* Dump driver status (TX and RX rings) while we're here. */
3273 dev_err(sc->sc_dip, CE_WARN, "!driver status:");
3274 for (i = 0; i < sc->ntxqs; i++) {
3275 struct iwn_tx_ring *ring = &sc->txq[i];
3276 dev_err(sc->sc_dip, CE_WARN,
3277 "! tx ring %2d: qid=%2d cur=%3d queued=%3d",
3278 i, ring->qid, ring->cur, ring->queued);
3279 }
3280 dev_err(sc->sc_dip, CE_WARN, "! rx ring: cur=%d", sc->rxq.cur);
3281 dev_err(sc->sc_dip, CE_WARN, "! 802.11 state %d", sc->sc_ic.ic_state);
3282 }
3283
3284 /*ARGSUSED1*/
3285 static uint_t
iwn_intr(caddr_t arg,caddr_t unused)3286 iwn_intr(caddr_t arg, caddr_t unused)
3287 {
3288 _NOTE(ARGUNUSED(unused));
3289 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
3290 struct iwn_softc *sc = (struct iwn_softc *)arg;
3291 uint32_t r1, r2, tmp;
3292
3293 if (sc == NULL)
3294 return (DDI_INTR_UNCLAIMED);
3295
3296 /* Disable interrupts. */
3297 IWN_WRITE(sc, IWN_INT_MASK, 0);
3298
3299 /* Read interrupts from ICT (fast) or from registers (slow). */
3300 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3301 (void) ddi_dma_sync(sc->ict_dma.dma_hdl, 0, 0,
3302 DDI_DMA_SYNC_FORKERNEL);
3303 tmp = 0;
3304 while (sc->ict[sc->ict_cur] != 0) {
3305 tmp |= sc->ict[sc->ict_cur];
3306 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
3307 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3308 }
3309 (void) ddi_dma_sync(sc->ict_dma.dma_hdl, 0, 0,
3310 DDI_DMA_SYNC_FORDEV);
3311 tmp = le32toh(tmp);
3312 if (tmp == 0xffffffff) /* Shouldn't happen. */
3313 tmp = 0;
3314 else if (tmp & 0xc0000) /* Workaround a HW bug. */
3315 tmp |= 0x8000;
3316 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3317 r2 = 0; /* Unused. */
3318 } else {
3319 r1 = IWN_READ(sc, IWN_INT);
3320 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3321 return (DDI_INTR_UNCLAIMED); /* Hardware gone! */
3322 r2 = IWN_READ(sc, IWN_FH_INT);
3323 }
3324 if (r1 == 0 && r2 == 0) {
3325 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3326 return (DDI_INTR_UNCLAIMED); /* Interrupt not for us. */
3327 }
3328
3329 /* Acknowledge interrupts. */
3330 IWN_WRITE(sc, IWN_INT, r1);
3331 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3332 IWN_WRITE(sc, IWN_FH_INT, r2);
3333
3334 if (r1 & IWN_INT_RF_TOGGLED) {
3335 tmp = IWN_READ(sc, IWN_GP_CNTRL);
3336 dev_err(sc->sc_dip, CE_NOTE,
3337 "!RF switch: radio %s",
3338 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3339 }
3340 if (r1 & IWN_INT_CT_REACHED) {
3341 dev_err(sc->sc_dip, CE_WARN,
3342 "!critical temperature reached!");
3343 }
3344 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3345 dev_err(sc->sc_dip, CE_WARN,
3346 "!fatal firmware error");
3347 /* Dump firmware error log and stop. */
3348 iwn_fatal_intr(sc);
3349 iwn_hw_stop(sc, B_TRUE);
3350 if (!IWN_CHK_FAST_RECOVER(sc))
3351 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3352 mutex_enter(&sc->sc_mtx);
3353 sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
3354 mutex_exit(&sc->sc_mtx);
3355
3356 return (DDI_INTR_CLAIMED);
3357 }
3358 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3359 (r2 & IWN_FH_INT_RX)) {
3360 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3361 int ena = (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX));
3362
3363 if (ena)
3364 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3365 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3366 IWN_INT_PERIODIC_DIS);
3367 iwn_notif_intr(sc);
3368 if (ena)
3369 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3370 IWN_INT_PERIODIC_ENA);
3371 } else {
3372 iwn_notif_intr(sc);
3373 }
3374 }
3375
3376 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3377 if (sc->sc_flags & IWN_FLAG_USE_ICT)
3378 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3379 mutex_enter(&sc->sc_mtx);
3380 sc->sc_flags |= IWN_FLAG_FW_DMA;
3381 cv_signal(&sc->sc_fhdma_cv);
3382 mutex_exit(&sc->sc_mtx);
3383 }
3384
3385 if (r1 & IWN_INT_ALIVE) {
3386 mutex_enter(&sc->sc_mtx);
3387 sc->sc_flags |= IWN_FLAG_FW_ALIVE;
3388 cv_signal(&sc->sc_alive_cv);
3389 mutex_exit(&sc->sc_mtx);
3390 }
3391
3392 if (r1 & IWN_INT_WAKEUP)
3393 iwn_wakeup_intr(sc);
3394
3395 /* Re-enable interrupts. */
3396 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3397 return (DDI_INTR_CLAIMED);
3398 }
3399
3400 /*
3401 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3402 * 5000 adapters use a slightly different format).
3403 */
3404 static void
iwn4965_update_sched(struct iwn_softc * sc,int qid,int idx,uint8_t id,uint16_t len)3405 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3406 uint16_t len)
3407 {
3408 _NOTE(ARGUNUSED(id));
3409 int w_idx = qid * IWN4965_SCHED_COUNT + idx;
3410 uint16_t *w = &sc->sched[w_idx];
3411
3412 *w = htole16(len + 8);
3413 (void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3414 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3415 if (idx < IWN_SCHED_WINSZ) {
3416 *(w + IWN_TX_RING_COUNT) = *w;
3417 (void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3418 (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3419 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3420 }
3421 }
3422
3423 static void
iwn5000_update_sched(struct iwn_softc * sc,int qid,int idx,uint8_t id,uint16_t len)3424 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3425 uint16_t len)
3426 {
3427 int w_idx = qid * IWN5000_SCHED_COUNT + idx;
3428 uint16_t *w = &sc->sched[w_idx];
3429
3430 *w = htole16(id << 12 | (len + 8));
3431 (void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3432 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3433 if (idx < IWN_SCHED_WINSZ) {
3434 *(w + IWN_TX_RING_COUNT) = *w;
3435 (void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3436 (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3437 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3438 }
3439 }
3440
3441 #ifdef notyet
3442 static void
iwn5000_reset_sched(struct iwn_softc * sc,int qid,int idx)3443 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3444 {
3445 int w_idx = qid * IWN5000_SCHED_COUNT + idx;
3446 uint16_t *w = &sc->sched[w_idx];
3447
3448 *w = (*w & htole16(0xf000)) | htole16(1);
3449 (void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3450 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3451 if (idx < IWN_SCHED_WINSZ) {
3452 *(w + IWN_TX_RING_COUNT) = *w;
3453 (void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3454 (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3455 sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3456 }
3457 }
3458 #endif
3459
3460 /*
3461 * This function is only for compatibility with Net80211 module.
3462 * iwn_qosparam_to_hw() is the actual function updating EDCA
3463 * parameters to hardware.
3464 */
3465 static int
iwn_wme_update(struct ieee80211com * ic)3466 iwn_wme_update(struct ieee80211com *ic)
3467 {
3468 _NOTE(ARGUNUSED(ic));
3469 return (0);
3470 }
3471
3472 static int
iwn_wme_to_qos_ac(struct iwn_softc * sc,int wme_ac)3473 iwn_wme_to_qos_ac(struct iwn_softc *sc, int wme_ac)
3474 {
3475 int qos_ac;
3476
3477 switch (wme_ac) {
3478 case WME_AC_BE:
3479 qos_ac = QOS_AC_BK;
3480 break;
3481 case WME_AC_BK:
3482 qos_ac = QOS_AC_BE;
3483 break;
3484 case WME_AC_VI:
3485 qos_ac = QOS_AC_VI;
3486 break;
3487 case WME_AC_VO:
3488 qos_ac = QOS_AC_VO;
3489 break;
3490 default:
3491 dev_err(sc->sc_dip, CE_WARN, "!iwn_wme_to_qos_ac(): "
3492 "WME AC index is not in suitable range.\n");
3493 qos_ac = QOS_AC_INVALID;
3494 break;
3495 }
3496
3497 return (qos_ac);
3498 }
3499
3500 static uint16_t
iwn_cw_e_to_cw(uint8_t cw_e)3501 iwn_cw_e_to_cw(uint8_t cw_e)
3502 {
3503 uint16_t cw = 1;
3504
3505 while (cw_e > 0) {
3506 cw <<= 1;
3507 cw_e--;
3508 }
3509
3510 cw -= 1;
3511 return (cw);
3512 }
3513
3514 static int
iwn_wmeparam_check(struct iwn_softc * sc,struct wmeParams * wmeparam)3515 iwn_wmeparam_check(struct iwn_softc *sc, struct wmeParams *wmeparam)
3516 {
3517 int i;
3518
3519 for (i = 0; i < WME_NUM_AC; i++) {
3520
3521 if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
3522 (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
3523 cmn_err(CE_WARN, "iwn_wmeparam_check(): "
3524 "Contention window is not in suitable range.\n");
3525 return (IWN_FAIL);
3526 }
3527
3528 if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
3529 (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
3530 dev_err(sc->sc_dip, CE_WARN, "!iwn_wmeparam_check(): "
3531 "Arbitration interframe space number"
3532 "is not in suitable range.\n");
3533 return (IWN_FAIL);
3534 }
3535 }
3536
3537 return (IWN_SUCCESS);
3538 }
3539
3540 /*
3541 * This function updates EDCA parameters into hardware.
3542 * FIFO0-background, FIFO1-best effort, FIFO2-video, FIFO3-voice.
3543 */
3544 static int
iwn_qosparam_to_hw(struct iwn_softc * sc,int async)3545 iwn_qosparam_to_hw(struct iwn_softc *sc, int async)
3546 {
3547 ieee80211com_t *ic = &sc->sc_ic;
3548 ieee80211_node_t *in = ic->ic_bss;
3549 struct wmeParams *wmeparam;
3550 struct iwn_edca_params edcaparam;
3551 int i, j;
3552 int err = IWN_FAIL;
3553
3554 if ((in->in_flags & IEEE80211_NODE_QOS) &&
3555 (IEEE80211_M_STA == ic->ic_opmode)) {
3556 wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
3557 } else {
3558 return (IWN_SUCCESS);
3559 }
3560
3561 (void) memset(&edcaparam, 0, sizeof (edcaparam));
3562
3563 err = iwn_wmeparam_check(sc, wmeparam);
3564 if (err != IWN_SUCCESS) {
3565 return (err);
3566 }
3567
3568 if (in->in_flags & IEEE80211_NODE_QOS) {
3569 edcaparam.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
3570 }
3571
3572 if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
3573 edcaparam.flags |= QOS_PARAM_FLG_TGN;
3574 }
3575
3576 for (i = 0; i < WME_NUM_AC; i++) {
3577
3578 j = iwn_wme_to_qos_ac(sc, i);
3579 if (j < QOS_AC_BK || j > QOS_AC_VO) {
3580 return (IWN_FAIL);
3581 }
3582
3583 sc->sc_edca->ac[j].cwmin.value.ul = edcaparam.ac[j].cwmin =
3584 iwn_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
3585 sc->sc_edca->ac[j].cwmax.value.ul = edcaparam.ac[j].cwmax =
3586 iwn_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
3587 sc->sc_edca->ac[j].aifsn.value.ul = edcaparam.ac[j].aifsn =
3588 wmeparam[i].wmep_aifsn;
3589 sc->sc_edca->ac[j].txop.value.ul = edcaparam.ac[j].txoplimit =
3590 (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
3591 }
3592
3593 err = iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &edcaparam,
3594 sizeof (edcaparam), async);
3595 if (err != IWN_SUCCESS) {
3596 dev_err(sc->sc_dip, CE_WARN, "!iwn_qosparam_to_hw(): "
3597 "failed to update QoS parameters into hardware.");
3598 return (err);
3599 }
3600
3601 return (err);
3602 }
3603
3604 static inline int
iwn_wme_tid_qos_ac(int tid)3605 iwn_wme_tid_qos_ac(int tid)
3606 {
3607 switch (tid) {
3608 case 1:
3609 case 2:
3610 return (QOS_AC_BK);
3611 case 0:
3612 case 3:
3613 return (QOS_AC_BE);
3614 case 4:
3615 case 5:
3616 return (QOS_AC_VI);
3617 case 6:
3618 case 7:
3619 return (QOS_AC_VO);
3620 }
3621
3622 return (QOS_AC_BE);
3623 }
3624
3625 static inline int
iwn_qos_ac_to_txq(int qos_ac)3626 iwn_qos_ac_to_txq(int qos_ac)
3627 {
3628 switch (qos_ac) {
3629 case QOS_AC_BK:
3630 return (QOS_AC_BK_TO_TXQ);
3631 case QOS_AC_BE:
3632 return (QOS_AC_BE_TO_TXQ);
3633 case QOS_AC_VI:
3634 return (QOS_AC_VI_TO_TXQ);
3635 case QOS_AC_VO:
3636 return (QOS_AC_VO_TO_TXQ);
3637 }
3638
3639 return (QOS_AC_BE_TO_TXQ);
3640 }
3641
3642 static int
iwn_wme_tid_to_txq(struct iwn_softc * sc,int tid)3643 iwn_wme_tid_to_txq(struct iwn_softc *sc, int tid)
3644 {
3645 int queue_n = TXQ_FOR_AC_INVALID;
3646 int qos_ac;
3647
3648 if (tid < WME_TID_MIN ||
3649 tid > WME_TID_MAX) {
3650 dev_err(sc->sc_dip, CE_WARN, "!wme_tid_to_txq(): "
3651 "TID is not in suitable range.");
3652 return (queue_n);
3653 }
3654
3655 qos_ac = iwn_wme_tid_qos_ac(tid);
3656 queue_n = iwn_qos_ac_to_txq(qos_ac);
3657
3658 return (queue_n);
3659 }
3660
3661 static int
iwn_send(ieee80211com_t * ic,mblk_t * mp,uint8_t type)3662 iwn_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3663 {
3664 struct iwn_softc *sc = (struct iwn_softc *)ic;
3665 struct iwn_node *wn;
3666 struct iwn_tx_ring *ring;
3667 struct iwn_tx_desc *desc;
3668 struct iwn_tx_data *data;
3669 struct iwn_tx_cmd *cmd;
3670 struct iwn_cmd_data *tx;
3671 ieee80211_node_t *in;
3672 const struct iwn_rate *rinfo;
3673 struct ieee80211_frame *wh;
3674 struct ieee80211_key *k = NULL;
3675 uint32_t flags;
3676 uint_t hdrlen;
3677 uint8_t ridx, txant;
3678 int i, totlen, seglen, pad;
3679 int txq_id = NON_QOS_TXQ;
3680 struct ieee80211_qosframe *qwh = NULL;
3681 uint8_t tid = WME_TID_INVALID;
3682 ddi_dma_cookie_t cookie;
3683 mblk_t *m0, *m;
3684 int mblen, off;
3685
3686 int noack = 0;
3687
3688 if (ic == NULL)
3689 return (EIO);
3690
3691 if ((mp == NULL) || (MBLKL(mp) <= 0))
3692 return (EIO);
3693
3694 if (sc->sc_flags & IWN_FLAG_SUSPEND) {
3695 freemsg(mp);
3696 sc->sc_tx_err++;
3697 return(EIO);
3698 }
3699
3700 wh = (struct ieee80211_frame *)mp->b_rptr;
3701
3702 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3703
3704 /*
3705 * determine send which AP or station in IBSS
3706 */
3707 in = ieee80211_find_txnode(ic, wh->i_addr1);
3708 if (in == NULL) {
3709 dev_err(sc->sc_dip, CE_WARN, "!iwn_send(): "
3710 "failed to find tx node");
3711 freemsg(mp);
3712 sc->sc_tx_err++;
3713 return(EIO);
3714 }
3715
3716 wn = (struct iwn_node *)in;
3717
3718 /*
3719 * Determine TX queue according to traffic ID in frame
3720 * if working in QoS mode.
3721 */
3722 if (in->in_flags & IEEE80211_NODE_QOS) {
3723 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3724 IEEE80211_FC0_TYPE_DATA) {
3725 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3726 qwh = (struct ieee80211_qosframe *)wh;
3727
3728 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3729 txq_id = iwn_wme_tid_to_txq(sc, tid);
3730
3731 if (txq_id < TXQ_FOR_AC_MIN ||
3732 (txq_id > TXQ_FOR_AC_MAX)) {
3733 freemsg(mp);
3734 sc->sc_tx_err++;
3735 return(EIO);
3736 }
3737 } else {
3738 txq_id = NON_QOS_TXQ;
3739 }
3740 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3741 IEEE80211_FC0_TYPE_MGT) {
3742 txq_id = QOS_TXQ_FOR_MGT;
3743 } else {
3744 txq_id = NON_QOS_TXQ;
3745 }
3746 } else {
3747 txq_id = NON_QOS_TXQ;
3748 }
3749
3750 if (sc->qfullmsk & (1 << txq_id)) {
3751 sc->sc_tx_err++;
3752 /* net80211-initiated send */
3753 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3754 IEEE80211_FC0_TYPE_DATA)
3755 freemsg(mp);
3756 return (EAGAIN);
3757 }
3758
3759 /* Choose a TX rate index. */
3760 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3761 type != IEEE80211_FC0_TYPE_DATA) {
3762 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3763 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
3764 } else if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3765 ridx = sc->fixed_ridx;
3766 } else
3767 ridx = wn->ridx[in->in_txrate];
3768 rinfo = &iwn_rates[ridx];
3769
3770 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3771 if (m) {
3772 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3773 mblen = MBLKL(m0);
3774 bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3775 off += mblen;
3776 }
3777
3778 m->b_wptr += off;
3779
3780 freemsg(mp);
3781 mp = m;
3782
3783 wh = (struct ieee80211_frame *)mp->b_rptr;
3784 } else {
3785 dev_err(sc->sc_dip, CE_WARN, "!iwn_send(): can't copy");
3786 /* net80211-initiated send */
3787 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3788 IEEE80211_FC0_TYPE_DATA)
3789 freemsg(mp);
3790 return (EAGAIN);
3791 }
3792
3793
3794 /*
3795 * Net80211 module encapsulate outbound data frames.
3796 * Add some fields of 80211 frame.
3797 */
3798 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3799 IEEE80211_FC0_TYPE_DATA)
3800 (void) ieee80211_encap(ic, mp, in);
3801
3802 /* Encrypt the frame if need be. */
3803 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3804 k = ieee80211_crypto_encap(ic, mp);
3805 if (k == NULL) {
3806 freemsg(mp);
3807 return(EIO);
3808 }
3809 /* Packet header may have moved, reset our local pointer. */
3810 wh = (struct ieee80211_frame *)mp->b_rptr;
3811 }
3812 totlen = msgdsize(mp);
3813
3814 mutex_enter(&sc->sc_tx_mtx);
3815 ring = &sc->txq[txq_id];
3816 desc = &ring->desc[ring->cur];
3817 data = &ring->data[ring->cur];
3818
3819 /* Prepare TX firmware command. */
3820 cmd = &ring->cmd[ring->cur];
3821 cmd->code = IWN_CMD_TX_DATA;
3822 cmd->flags = 0;
3823 cmd->qid = ring->qid;
3824 cmd->idx = ring->cur;
3825
3826 tx = (struct iwn_cmd_data *)cmd->data;
3827 /* NB: No need to clear tx, all fields are reinitialized here. */
3828 tx->scratch = 0; /* clear "scratch" area */
3829
3830 flags = 0;
3831 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3832 /* Unicast frame, check if an ACK is expected. */
3833 if (!noack)
3834 flags |= IWN_TX_NEED_ACK;
3835 }
3836
3837 if ((wh->i_fc[0] &
3838 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3839 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3840 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
3841
3842 ASSERT((flags & IWN_TX_IMM_BA) == 0);
3843
3844 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3845 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
3846
3847 ASSERT((flags & IWN_TX_MORE_FRAG) == 0);
3848
3849 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3850 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3851 /* NB: Group frames are sent using CCK in 802.11b/g. */
3852 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
3853 flags |= IWN_TX_NEED_RTS;
3854 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3855 ridx >= IWN_RIDX_OFDM6) {
3856 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3857 flags |= IWN_TX_NEED_CTS;
3858 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3859 flags |= IWN_TX_NEED_RTS;
3860 }
3861 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3862 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3863 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3864 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3865 flags |= IWN_TX_NEED_PROTECTION;
3866 } else
3867 flags |= IWN_TX_FULL_TXOP;
3868 }
3869 }
3870
3871 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3872 type != IEEE80211_FC0_TYPE_DATA)
3873 tx->id = sc->broadcast_id;
3874 else
3875 tx->id = wn->id;
3876
3877 if (type == IEEE80211_FC0_TYPE_MGT) {
3878 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3879
3880 #ifndef IEEE80211_STA_ONLY
3881 /* Tell HW to set timestamp in probe responses. */
3882 /* XXX NetBSD rev 1.11 added probe requests here but */
3883 /* probe requests do not take timestamps (from Bergamini). */
3884 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3885 flags |= IWN_TX_INSERT_TSTAMP;
3886 #endif
3887 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */
3888 /* changes here. These are not needed (from Bergamini). */
3889 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3890 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3891 tx->timeout = htole16(3);
3892 else
3893 tx->timeout = htole16(2);
3894 } else
3895 tx->timeout = htole16(0);
3896
3897 if (hdrlen & 3) {
3898 /* First segment length must be a multiple of 4. */
3899 flags |= IWN_TX_NEED_PADDING;
3900 pad = 4 - (hdrlen & 3);
3901 } else
3902 pad = 0;
3903
3904 if (tid != WME_TID_INVALID) {
3905 flags &= ~IWN_TX_AUTO_SEQ;
3906 } else {
3907 flags |= IWN_TX_AUTO_SEQ;
3908 tid = 0;
3909 }
3910
3911 tx->len = htole16(totlen);
3912 tx->tid = tid;
3913 tx->rts_ntries = 60;
3914 tx->data_ntries = 15;
3915 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3916 tx->plcp = rinfo->plcp;
3917 tx->rflags = rinfo->flags;
3918 if (tx->id == sc->broadcast_id) {
3919 /* Group or management frame. */
3920 tx->linkq = 0;
3921 /* XXX Alternate between antenna A and B? */
3922 txant = IWN_LSB(sc->txchainmask);
3923 tx->rflags |= IWN_RFLAG_ANT(txant);
3924 } else {
3925 tx->linkq = in->in_rates.ir_nrates - in->in_txrate - 1;
3926 flags |= IWN_TX_LINKQ; /* enable MRR */
3927 }
3928 /* Set physical address of "scratch area". */
3929 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3930 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3931
3932 /* Copy 802.11 header in TX command. */
3933 /* XXX NetBSD changed this in rev 1.20 */
3934 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3935 mp->b_rptr += hdrlen;
3936
3937 bcopy(mp->b_rptr, data->dma_data.vaddr, totlen - hdrlen);
3938 tx->security = 0;
3939 tx->flags = htole32(flags);
3940
3941 data->ni = in;
3942
3943 DTRACE_PROBE4(tx, int, ring->qid, int, ring->cur, size_t, MBLKL(mp),
3944 int, data->dma_data.ncookies);
3945
3946 /* Fill TX descriptor. */
3947 desc->nsegs = 1 + data->dma_data.ncookies;
3948 /* First DMA segment is used by the TX command. */
3949 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3950 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3951 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3952
3953 /* Other DMA segments are for data payload. */
3954 cookie = data->dma_data.cookie;
3955 for (i = 1, seglen = totlen - hdrlen;
3956 i <= data->dma_data.ncookies;
3957 i++, seglen -= cookie.dmac_size) {
3958 desc->segs[i].addr = htole32(IWN_LOADDR(cookie.dmac_laddress));
3959 desc->segs[i].len = htole16(IWN_HIADDR(cookie.dmac_laddress) |
3960 seglen << 4);
3961 if (i < data->dma_data.ncookies)
3962 ddi_dma_nextcookie(data->dma_data.dma_hdl, &cookie);
3963 }
3964
3965 (void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
3966 (void) ddi_dma_sync(ring->cmd_dma.dma_hdl, ring->cur * sizeof (*cmd),
3967 sizeof (*cmd), DDI_DMA_SYNC_FORDEV);
3968 (void) ddi_dma_sync(ring->desc_dma.dma_hdl, ring->cur * sizeof (*desc),
3969 sizeof (*desc), DDI_DMA_SYNC_FORDEV);
3970
3971 /* Update TX scheduler. */
3972 sc->ops.update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3973
3974 /* Kick TX ring. */
3975 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3976 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3977
3978 /* Mark TX ring as full if we reach a certain threshold. */
3979 if (++ring->queued > IWN_TX_RING_HIMARK)
3980 sc->qfullmsk |= 1 << ring->qid;
3981 mutex_exit(&sc->sc_tx_mtx);
3982 freemsg(mp);
3983
3984 ic->ic_stats.is_tx_bytes += totlen;
3985
3986 mutex_enter(&sc->sc_mt_mtx);
3987 if (sc->sc_tx_timer == 0)
3988 sc->sc_tx_timer = 5;
3989 mutex_exit(&sc->sc_mt_mtx);
3990
3991 return 0;
3992 }
3993
3994 static mblk_t *
iwn_m_tx(void * arg,mblk_t * mp)3995 iwn_m_tx(void *arg, mblk_t *mp)
3996 {
3997 struct iwn_softc *sc;
3998 ieee80211com_t *ic;
3999 mblk_t *next;
4000
4001 sc = (struct iwn_softc *)arg;
4002 ASSERT(sc != NULL);
4003 ic = &sc->sc_ic;
4004
4005 if (sc->sc_flags & IWN_FLAG_SUSPEND) {
4006 freemsgchain(mp);
4007 return (NULL);
4008 }
4009
4010 if (ic->ic_state != IEEE80211_S_RUN) {
4011 freemsgchain(mp);
4012 return (NULL);
4013 }
4014
4015 if ((sc->sc_flags & IWN_FLAG_HW_ERR_RECOVER)) {
4016 freemsgchain(mp);
4017 return (NULL);
4018 }
4019
4020 while (mp != NULL) {
4021 next = mp->b_next;
4022 mp->b_next = NULL;
4023 if (iwn_send(ic, mp, IEEE80211_FC0_TYPE_DATA) == EAGAIN) {
4024 mp->b_next = next;
4025 break;
4026 }
4027 mp = next;
4028 }
4029
4030 return (mp);
4031 }
4032
4033 static void
iwn_watchdog(void * arg)4034 iwn_watchdog(void *arg)
4035 {
4036 struct iwn_softc *sc = (struct iwn_softc *)arg;
4037 ieee80211com_t *ic = &sc->sc_ic;
4038 timeout_id_t timeout_id = ic->ic_watchdog_timer;
4039
4040 ieee80211_stop_watchdog(ic);
4041
4042 mutex_enter(&sc->sc_mt_mtx);
4043 if (sc->sc_tx_timer > 0) {
4044 if (--sc->sc_tx_timer == 0) {
4045 dev_err(sc->sc_dip, CE_WARN, "!device timeout");
4046 sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
4047 sc->sc_ostate = IEEE80211_S_RUN;
4048 DTRACE_PROBE(recover__send__fail);
4049 }
4050 }
4051 mutex_exit(&sc->sc_mt_mtx);
4052
4053 if ((ic->ic_state != IEEE80211_S_AUTH) &&
4054 (ic->ic_state != IEEE80211_S_ASSOC))
4055 return;
4056
4057 if (ic->ic_bss->in_fails > 10) {
4058 DTRACE_PROBE2(watchdog__reset, timeout_id_t, timeout_id,
4059 struct ieee80211node *, ic->ic_bss);
4060 dev_err(sc->sc_dip, CE_WARN, "!iwn_watchdog reset");
4061 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4062 } else {
4063 ic->ic_bss->in_fails++;
4064
4065 DTRACE_PROBE2(watchdog__timeout, timeout_id_t, timeout_id,
4066 struct ieee80211node *, ic->ic_bss);
4067
4068 ieee80211_watchdog(ic);
4069 }
4070 }
4071
4072 static void
iwn_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4073 iwn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4074 {
4075 struct iwn_softc *sc;
4076 struct ieee80211com *ic;
4077 int error = 0;
4078
4079 sc = (struct iwn_softc *)arg;
4080 ASSERT(sc != NULL);
4081 ic = &sc->sc_ic;
4082
4083 mutex_enter(&sc->sc_mtx);
4084 while (sc->sc_flags & IWN_FLAG_SCANNING)
4085 cv_wait(&sc->sc_scan_cv, &sc->sc_mtx);
4086 mutex_exit(&sc->sc_mtx);
4087
4088 error = ieee80211_ioctl(ic, wq, mp);
4089 if (error == ENETRESET) {
4090 /*
4091 * This is special for the hidden AP connection.
4092 * In any case, we should make sure only one 'scan'
4093 * in the driver for a 'connect' CLI command. So
4094 * when connecting to a hidden AP, the scan is just
4095 * sent out to the air when we know the desired
4096 * essid of the AP we want to connect.
4097 */
4098 if (ic->ic_des_esslen) {
4099 if (sc->sc_flags & IWN_FLAG_RUNNING) {
4100 DTRACE_PROBE(netreset);
4101 iwn_m_stop(sc);
4102 (void) iwn_m_start(sc);
4103 (void) ieee80211_new_state(ic,
4104 IEEE80211_S_SCAN, -1);
4105 }
4106 }
4107 }
4108 }
4109
4110 /*
4111 * Call back functions for get/set property
4112 */
4113 static int
iwn_m_getprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,void * wldp_buf)4114 iwn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4115 uint_t wldp_length, void *wldp_buf)
4116 {
4117 struct iwn_softc *sc;
4118
4119 sc = (struct iwn_softc *)arg;
4120 ASSERT(sc != NULL);
4121
4122 return (ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
4123 wldp_length, wldp_buf));
4124 }
4125
4126 static void
iwn_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,mac_prop_info_handle_t prh)4127 iwn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4128 mac_prop_info_handle_t prh)
4129 {
4130 struct iwn_softc *sc;
4131
4132 sc = (struct iwn_softc *)arg;
4133 ASSERT(sc != NULL);
4134
4135 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
4136 }
4137
4138 static int
iwn_m_setprop(void * arg,const char * pr_name,mac_prop_id_t wldp_pr_num,uint_t wldp_length,const void * wldp_buf)4139 iwn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4140 uint_t wldp_length, const void *wldp_buf)
4141 {
4142 struct iwn_softc *sc;
4143 ieee80211com_t *ic;
4144 int err = EINVAL;
4145
4146 sc = (struct iwn_softc *)arg;
4147 ASSERT(sc != NULL);
4148 ic = &sc->sc_ic;
4149
4150 mutex_enter(&sc->sc_mtx);
4151 while (sc->sc_flags & IWN_FLAG_SCANNING)
4152 cv_wait(&sc->sc_scan_cv, &sc->sc_mtx);
4153 mutex_exit(&sc->sc_mtx);
4154
4155 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
4156 wldp_buf);
4157
4158 if (err == ENETRESET) {
4159 if (ic->ic_des_esslen) {
4160 if (sc->sc_flags & IWN_FLAG_RUNNING) {
4161 DTRACE_PROBE(netreset);
4162 iwn_m_stop(sc);
4163 (void) iwn_m_start(sc);
4164 (void) ieee80211_new_state(ic,
4165 IEEE80211_S_SCAN, -1);
4166 }
4167 }
4168 err = 0;
4169 }
4170
4171 return (err);
4172 }
4173
4174 /*
4175 * invoked by GLD get statistics from NIC and driver
4176 */
4177 static int
iwn_m_stat(void * arg,uint_t stat,uint64_t * val)4178 iwn_m_stat(void *arg, uint_t stat, uint64_t *val)
4179 {
4180 struct iwn_softc *sc;
4181 ieee80211com_t *ic;
4182 ieee80211_node_t *in;
4183
4184 sc = (struct iwn_softc *)arg;
4185 ASSERT(sc != NULL);
4186 ic = &sc->sc_ic;
4187
4188 mutex_enter(&sc->sc_mtx);
4189
4190 switch (stat) {
4191 case MAC_STAT_IFSPEED:
4192 in = ic->ic_bss;
4193 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
4194 IEEE80211_RATE(in->in_txrate) :
4195 ic->ic_fixed_rate) / 2 * 1000000;
4196 break;
4197 case MAC_STAT_NOXMTBUF:
4198 *val = sc->sc_tx_nobuf;
4199 break;
4200 case MAC_STAT_NORCVBUF:
4201 *val = sc->sc_rx_nobuf;
4202 break;
4203 case MAC_STAT_IERRORS:
4204 *val = sc->sc_rx_err;
4205 break;
4206 case MAC_STAT_RBYTES:
4207 *val = ic->ic_stats.is_rx_bytes;
4208 break;
4209 case MAC_STAT_IPACKETS:
4210 *val = ic->ic_stats.is_rx_frags;
4211 break;
4212 case MAC_STAT_OBYTES:
4213 *val = ic->ic_stats.is_tx_bytes;
4214 break;
4215 case MAC_STAT_OPACKETS:
4216 *val = ic->ic_stats.is_tx_frags;
4217 break;
4218 case MAC_STAT_OERRORS:
4219 case WIFI_STAT_TX_FAILED:
4220 *val = sc->sc_tx_err;
4221 break;
4222 case WIFI_STAT_TX_RETRANS:
4223 *val = sc->sc_tx_retries;
4224 break;
4225 case WIFI_STAT_FCS_ERRORS:
4226 case WIFI_STAT_WEP_ERRORS:
4227 case WIFI_STAT_TX_FRAGS:
4228 case WIFI_STAT_MCAST_TX:
4229 case WIFI_STAT_RTS_SUCCESS:
4230 case WIFI_STAT_RTS_FAILURE:
4231 case WIFI_STAT_ACK_FAILURE:
4232 case WIFI_STAT_RX_FRAGS:
4233 case WIFI_STAT_MCAST_RX:
4234 case WIFI_STAT_RX_DUPS:
4235 mutex_exit(&sc->sc_mtx);
4236 return (ieee80211_stat(ic, stat, val));
4237 default:
4238 mutex_exit(&sc->sc_mtx);
4239 return (ENOTSUP);
4240 }
4241
4242 mutex_exit(&sc->sc_mtx);
4243
4244 return (0);
4245
4246 }
4247
4248 /*
4249 * invoked by GLD to configure NIC
4250 */
4251 static int
iwn_m_unicst(void * arg,const uint8_t * macaddr)4252 iwn_m_unicst(void *arg, const uint8_t *macaddr)
4253 {
4254 struct iwn_softc *sc;
4255 ieee80211com_t *ic;
4256 int err = IWN_SUCCESS;
4257
4258 sc = (struct iwn_softc *)arg;
4259 ASSERT(sc != NULL);
4260 ic = &sc->sc_ic;
4261
4262 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
4263 mutex_enter(&sc->sc_mtx);
4264 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
4265 err = iwn_config(sc);
4266 mutex_exit(&sc->sc_mtx);
4267 if (err != IWN_SUCCESS) {
4268 dev_err(sc->sc_dip, CE_WARN, "!iwn_m_unicst(): "
4269 "failed to configure device");
4270 goto fail;
4271 }
4272 }
4273
4274 return (err);
4275
4276 fail:
4277 return (err);
4278 }
4279
4280 /*ARGSUSED*/
4281 static int
iwn_m_multicst(void * arg,boolean_t add,const uint8_t * m)4282 iwn_m_multicst(void *arg, boolean_t add, const uint8_t *m)
4283 {
4284 return (IWN_SUCCESS);
4285 }
4286
4287 /*ARGSUSED*/
4288 static int
iwn_m_promisc(void * arg,boolean_t on)4289 iwn_m_promisc(void *arg, boolean_t on)
4290 {
4291 _NOTE(ARGUNUSED(on));
4292
4293 return (IWN_SUCCESS);
4294 }
4295
4296 static void
iwn_abort_scan(void * arg)4297 iwn_abort_scan(void *arg)
4298 {
4299 struct iwn_softc *sc = (struct iwn_softc *)arg;
4300 ieee80211com_t *ic = &sc->sc_ic;
4301
4302 mutex_enter(&sc->sc_mtx);
4303 if ((sc->sc_flags & IWN_FLAG_SCANNING) == 0) {
4304 mutex_exit(&sc->sc_mtx);
4305 return;
4306 }
4307
4308 dev_err(sc->sc_dip, CE_WARN,
4309 "!aborting scan, flags = %x, state = %s",
4310 sc->sc_flags, ieee80211_state_name[ic->ic_state]);
4311 sc->sc_flags &= ~IWN_FLAG_SCANNING;
4312 iwn_hw_stop(sc, B_FALSE);
4313 mutex_exit(&sc->sc_mtx);
4314
4315 sc->scan_to = 0;
4316 (void) iwn_init(sc);
4317 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4318 }
4319
4320 /*
4321 * periodic function to deal with RF switch and HW error recovery
4322 */
4323 static void
iwn_periodic(void * arg)4324 iwn_periodic(void *arg)
4325 {
4326 struct iwn_softc *sc = (struct iwn_softc *)arg;
4327 ieee80211com_t *ic = &sc->sc_ic;
4328 int err;
4329 uint32_t tmp;
4330
4331 mutex_enter(&sc->sc_mtx);
4332 tmp = IWN_READ(sc, IWN_GP_CNTRL);
4333 if (tmp & IWN_GP_CNTRL_RFKILL) {
4334 sc->sc_flags &= ~IWN_FLAG_RADIO_OFF;
4335 } else {
4336 sc->sc_flags |= IWN_FLAG_RADIO_OFF;
4337 }
4338
4339 /*
4340 * If the RF is OFF, do nothing.
4341 */
4342 if (sc->sc_flags & IWN_FLAG_RADIO_OFF) {
4343 mutex_exit(&sc->sc_mtx);
4344 return;
4345 }
4346
4347 mutex_exit(&sc->sc_mtx);
4348
4349 /*
4350 * recovery fatal error
4351 */
4352 if (ic->ic_mach &&
4353 (sc->sc_flags & IWN_FLAG_HW_ERR_RECOVER)) {
4354 dev_err(sc->sc_dip, CE_WARN,
4355 "!trying to restore previous state");
4356
4357 mutex_enter(&sc->sc_mtx);
4358 sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
4359 mutex_exit(&sc->sc_mtx);
4360
4361 if (sc->calib_to != 0)
4362 (void) untimeout(sc->calib_to);
4363 sc->calib_to = 0;
4364
4365 if (sc->scan_to != 0)
4366 (void) untimeout(sc->scan_to);
4367 sc->scan_to = 0;
4368
4369 iwn_hw_stop(sc, B_TRUE);
4370
4371 if (IWN_CHK_FAST_RECOVER(sc)) {
4372 /* save runtime configuration */
4373 bcopy(&sc->rxon, &sc->rxon_save, sizeof (sc->rxon));
4374 } else {
4375 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4376 }
4377
4378 err = iwn_init(sc);
4379 if (err != IWN_SUCCESS)
4380 return;
4381
4382 mutex_enter(&sc->sc_mtx);
4383 sc->sc_flags |= IWN_FLAG_RUNNING;
4384 mutex_exit(&sc->sc_mtx);
4385
4386 if (!IWN_CHK_FAST_RECOVER(sc) ||
4387 iwn_fast_recover(sc) != IWN_SUCCESS) {
4388 mutex_enter(&sc->sc_mtx);
4389 sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
4390 mutex_exit(&sc->sc_mtx);
4391 if (sc->sc_ostate != IEEE80211_S_INIT) {
4392 ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
4393 }
4394 }
4395 }
4396 }
4397
4398 /*
4399 * Send a command to the firmware.
4400 */
4401 static int
iwn_cmd(struct iwn_softc * sc,uint8_t code,void * buf,int size,int async)4402 iwn_cmd(struct iwn_softc *sc, uint8_t code, void *buf, int size, int async)
4403 {
4404 struct iwn_tx_ring *ring = &sc->txq[IWN_CMD_QUEUE_NUM];
4405 struct iwn_tx_desc *desc;
4406 struct iwn_tx_data *data;
4407 struct iwn_tx_cmd *cmd;
4408 clock_t clk;
4409 uintptr_t paddr;
4410 int totlen, ret;
4411
4412 ASSERT(mutex_owned(&sc->sc_mtx));
4413
4414 desc = &ring->desc[ring->cur];
4415 data = &ring->data[ring->cur];
4416 totlen = 4 + size;
4417
4418 if (size > sizeof (cmd->data)) {
4419 /* Command is too large to fit in a descriptor. */
4420 if (iwn_dma_contig_alloc(sc, &data->cmd_dma, totlen,
4421 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&cmd,
4422 &iwn_dma_accattr, 1) != DDI_SUCCESS)
4423 return ENOBUFS;
4424 paddr = data->cmd_dma.paddr;
4425 } else {
4426 cmd = &ring->cmd[ring->cur];
4427 paddr = data->cmd_paddr;
4428 }
4429
4430 cmd->code = code;
4431 cmd->flags = 0;
4432 cmd->qid = ring->qid;
4433 cmd->idx = ring->cur;
4434 bzero(cmd->data, size);
4435 memcpy(cmd->data, buf, size);
4436
4437 bzero(desc, sizeof(*desc));
4438 desc->nsegs = 1;
4439 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4440 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
4441
4442 if (size > sizeof cmd->data) {
4443 (void) ddi_dma_sync(data->cmd_dma.dma_hdl, 0, totlen,
4444 DDI_DMA_SYNC_FORDEV);
4445 } else {
4446 (void) ddi_dma_sync(ring->cmd_dma.dma_hdl,
4447 ring->cur * sizeof (*cmd),
4448 totlen, DDI_DMA_SYNC_FORDEV);
4449 }
4450 (void) ddi_dma_sync(ring->desc_dma.dma_hdl,
4451 ring->cur * sizeof (*desc),
4452 sizeof (*desc), DDI_DMA_SYNC_FORDEV);
4453
4454 /* Update TX scheduler. */
4455 sc->ops.update_sched(sc, ring->qid, ring->cur, 0, 0);
4456
4457 /* Kick command ring. */
4458 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4459 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4460
4461 if (async)
4462 return (IWN_SUCCESS);
4463
4464 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
4465 clk = ddi_get_lbolt() + drv_usectohz(2000000);
4466 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE)
4467 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_mtx, clk) < 0)
4468 break;
4469
4470 ret = (sc->sc_cmd_flag == SC_CMD_FLG_DONE) ? IWN_SUCCESS : IWN_FAIL;
4471 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
4472
4473 return (ret);
4474 }
4475
4476 static int
iwn4965_add_node(struct iwn_softc * sc,struct iwn_node_info * node,int async)4477 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4478 {
4479 struct iwn4965_node_info hnode;
4480 char *src, *dst;
4481
4482 /*
4483 * We use the node structure for 5000 Series internally (it is
4484 * a superset of the one for 4965AGN). We thus copy the common
4485 * fields before sending the command.
4486 */
4487 src = (char *)node;
4488 dst = (char *)&hnode;
4489 memcpy(dst, src, 48);
4490 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4491 memcpy(dst + 48, src + 72, 20);
4492 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4493 }
4494
4495 static int
iwn5000_add_node(struct iwn_softc * sc,struct iwn_node_info * node,int async)4496 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4497 {
4498 /* Direct mapping. */
4499 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4500 }
4501
4502 static int
iwn_set_link_quality(struct iwn_softc * sc,struct ieee80211_node * ni)4503 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4504 {
4505 struct iwn_node *wn = (void *)ni;
4506 struct ieee80211_rateset *rs = &ni->in_rates;
4507 struct iwn_cmd_link_quality linkq;
4508 const struct iwn_rate *rinfo;
4509 uint8_t txant;
4510 int i, txrate;
4511
4512 /* Use the first valid TX antenna. */
4513 txant = IWN_LSB(sc->txchainmask);
4514
4515 memset(&linkq, 0, sizeof linkq);
4516 linkq.id = wn->id;
4517 linkq.antmsk_1stream = txant;
4518 linkq.antmsk_2stream = IWN_ANT_AB;
4519 linkq.ampdu_max = 31;
4520 linkq.ampdu_threshold = 3;
4521 linkq.ampdu_limit = htole16(4000); /* 4ms */
4522
4523 /* Start at highest available bit-rate. */
4524 txrate = rs->ir_nrates - 1;
4525 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4526 rinfo = &iwn_rates[wn->ridx[txrate]];
4527 linkq.retry[i].plcp = rinfo->plcp;
4528 linkq.retry[i].rflags = rinfo->flags;
4529 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4530 /* Next retry at immediate lower bit-rate. */
4531 if (txrate > 0)
4532 txrate--;
4533 }
4534 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4535 }
4536
4537 /*
4538 * Broadcast node is used to send group-addressed and management frames.
4539 */
4540 static int
iwn_add_broadcast_node(struct iwn_softc * sc,int async)4541 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4542 {
4543 struct iwn_ops *ops = &sc->ops;
4544 struct iwn_node_info node;
4545 struct iwn_cmd_link_quality linkq;
4546 const struct iwn_rate *rinfo;
4547 uint8_t txant;
4548 int i, error;
4549
4550 memset(&node, 0, sizeof node);
4551 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
4552 node.id = sc->broadcast_id;
4553 DTRACE_PROBE(add__broadcast__node);
4554 if ((error = ops->add_node(sc, &node, async)) != 0)
4555 return error;
4556
4557 /* Use the first valid TX antenna. */
4558 txant = IWN_LSB(sc->txchainmask);
4559
4560 memset(&linkq, 0, sizeof linkq);
4561 linkq.id = sc->broadcast_id;
4562 linkq.antmsk_1stream = txant;
4563 linkq.antmsk_2stream = IWN_ANT_AB;
4564 linkq.ampdu_max = 64;
4565 linkq.ampdu_threshold = 3;
4566 linkq.ampdu_limit = htole16(4000); /* 4ms */
4567
4568 /* Use lowest mandatory bit-rate. */
4569 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ?
4570 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6];
4571 linkq.retry[0].plcp = rinfo->plcp;
4572 linkq.retry[0].rflags = rinfo->flags;
4573 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
4574 /* Use same bit-rate for all TX retries. */
4575 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4576 linkq.retry[i].plcp = linkq.retry[0].plcp;
4577 linkq.retry[i].rflags = linkq.retry[0].rflags;
4578 }
4579 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4580 }
4581
4582 static void
iwn_set_led(struct iwn_softc * sc,uint8_t which,uint8_t off,uint8_t on)4583 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4584 {
4585 struct iwn_cmd_led led;
4586
4587 /* Clear microcode LED ownership. */
4588 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4589
4590 led.which = which;
4591 led.unit = htole32(10000); /* on/off in unit of 100ms */
4592 led.off = off;
4593 led.on = on;
4594 DTRACE_PROBE1(led__change, const char *,
4595 (off != 0 && on != 0) ? "blinking" :
4596 (off != 0) ? "off" : "on");
4597 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4598 }
4599
4600 /*
4601 * Set the critical temperature at which the firmware will stop the radio
4602 * and notify us.
4603 */
4604 static int
iwn_set_critical_temp(struct iwn_softc * sc)4605 iwn_set_critical_temp(struct iwn_softc *sc)
4606 {
4607 struct iwn_critical_temp crit;
4608 int32_t temp;
4609
4610 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4611
4612 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4613 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4614 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4615 temp = IWN_CTOK(110);
4616 else
4617 temp = 110;
4618
4619 sc->sc_misc->crit_temp.value.ul = temp;
4620
4621 memset(&crit, 0, sizeof crit);
4622 crit.tempR = htole32(temp);
4623 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4624 }
4625
4626 static int
iwn_set_timing(struct iwn_softc * sc,struct ieee80211_node * ni)4627 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4628 {
4629 struct iwn_cmd_timing cmd;
4630 uint64_t val, mod;
4631
4632 memset(&cmd, 0, sizeof cmd);
4633 memcpy(&cmd.tstamp, ni->in_tstamp.data, sizeof (uint64_t));
4634 cmd.bintval = htole16(ni->in_intval);
4635 cmd.lintval = htole16(10);
4636
4637 /* Compute remaining time until next beacon. */
4638 val = (uint64_t)ni->in_intval * 1024; /* msecs -> usecs */
4639 mod = le64toh(cmd.tstamp) % val;
4640 cmd.binitval = htole32((uint32_t)(val - mod));
4641
4642 sc->sc_timing->bintval.value.ul = ni->in_intval;
4643 sc->sc_timing->tstamp.value.ul = ni->in_tstamp.tsf;
4644 sc->sc_timing->init.value.ul = (uint32_t)(val - mod);
4645
4646 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4647 }
4648
4649 static void
iwn4965_power_calibration(struct iwn_softc * sc,int temp)4650 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4651 {
4652 /* Adjust TX power if need be (delta >= 3 degC). */
4653 IWN_DBG("temperature %d->%d", sc->temp, temp);
4654 if (abs(temp - sc->temp) >= 3) {
4655 /* Record temperature of last calibration. */
4656 sc->temp = temp;
4657 (void)iwn4965_set_txpower(sc, 1);
4658 }
4659 }
4660
4661 /*
4662 * Set TX power for current channel (each rate has its own power settings).
4663 * This function takes into account the regulatory information from EEPROM,
4664 * the current temperature and the current voltage.
4665 */
4666 static int
iwn4965_set_txpower(struct iwn_softc * sc,int async)4667 iwn4965_set_txpower(struct iwn_softc *sc, int async)
4668 {
4669 /* Fixed-point arithmetic division using a n-bit fractional part. */
4670 #define fdivround(a, b, n) \
4671 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4672 /* Linear interpolation. */
4673 #define interpolate(x, x1, y1, x2, y2, n) \
4674 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4675
4676 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4677 struct ieee80211com *ic = &sc->sc_ic;
4678 struct iwn_ucode_info *uc = &sc->ucode_info;
4679 struct ieee80211_channel *ch;
4680 struct iwn4965_cmd_txpower cmd;
4681 struct iwn4965_eeprom_chan_samples *chans;
4682 const uint8_t *rf_gain, *dsp_gain;
4683 int32_t vdiff, tdiff;
4684 int i, c, grp, maxpwr;
4685 uint8_t chan;
4686
4687 /* Retrieve current channel from last RXON. */
4688 chan = sc->rxon.chan;
4689 sc->sc_txpower->chan.value.l = chan;
4690 ch = &ic->ic_sup_channels[chan];
4691
4692 memset(&cmd, 0, sizeof cmd);
4693 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4694 cmd.chan = chan;
4695
4696 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4697 maxpwr = sc->maxpwr5GHz;
4698 rf_gain = iwn4965_rf_gain_5ghz;
4699 dsp_gain = iwn4965_dsp_gain_5ghz;
4700 } else {
4701 maxpwr = sc->maxpwr2GHz;
4702 rf_gain = iwn4965_rf_gain_2ghz;
4703 dsp_gain = iwn4965_dsp_gain_2ghz;
4704 }
4705
4706 /* Compute voltage compensation. */
4707 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4708 if (vdiff > 0)
4709 vdiff *= 2;
4710 if (abs(vdiff) > 2)
4711 vdiff = 0;
4712 sc->sc_txpower->vdiff.value.l = vdiff;
4713
4714 /* Get channel attenuation group. */
4715 if (chan <= 20) /* 1-20 */
4716 grp = 4;
4717 else if (chan <= 43) /* 34-43 */
4718 grp = 0;
4719 else if (chan <= 70) /* 44-70 */
4720 grp = 1;
4721 else if (chan <= 124) /* 71-124 */
4722 grp = 2;
4723 else /* 125-200 */
4724 grp = 3;
4725 sc->sc_txpower->group.value.l = grp;
4726
4727 /* Get channel sub-band. */
4728 for (i = 0; i < IWN_NBANDS; i++)
4729 if (sc->bands[i].lo != 0 &&
4730 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4731 break;
4732 if (i == IWN_NBANDS) /* Can't happen in real-life. */
4733 return EINVAL;
4734 chans = sc->bands[i].chans;
4735 sc->sc_txpower->subband.value.l = i;
4736
4737 for (c = 0; c < 2; c++) {
4738 uint8_t power, gain, temp;
4739 int maxchpwr, pwr, ridx, idx;
4740
4741 power = interpolate(chan,
4742 chans[0].num, chans[0].samples[c][1].power,
4743 chans[1].num, chans[1].samples[c][1].power, 1);
4744 gain = interpolate(chan,
4745 chans[0].num, chans[0].samples[c][1].gain,
4746 chans[1].num, chans[1].samples[c][1].gain, 1);
4747 temp = interpolate(chan,
4748 chans[0].num, chans[0].samples[c][1].temp,
4749 chans[1].num, chans[1].samples[c][1].temp, 1);
4750 sc->sc_txpower->txchain[c].power.value.l = power;
4751 sc->sc_txpower->txchain[c].gain.value.l = gain;
4752 sc->sc_txpower->txchain[c].temp.value.l = temp;
4753
4754 /* Compute temperature compensation. */
4755 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4756 sc->sc_txpower->txchain[c].tcomp.value.l = tdiff;
4757
4758 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4759 /* Convert dBm to half-dBm. */
4760 maxchpwr = sc->maxpwr[chan] * 2;
4761 if ((ridx / 8) & 1)
4762 maxchpwr -= 6; /* MIMO 2T: -3dB */
4763
4764 pwr = maxpwr;
4765
4766 /* Adjust TX power based on rate. */
4767 if ((ridx % 8) == 5)
4768 pwr -= 15; /* OFDM48: -7.5dB */
4769 else if ((ridx % 8) == 6)
4770 pwr -= 17; /* OFDM54: -8.5dB */
4771 else if ((ridx % 8) == 7)
4772 pwr -= 20; /* OFDM60: -10dB */
4773 else
4774 pwr -= 10; /* Others: -5dB */
4775
4776 /* Do not exceed channel max TX power. */
4777 if (pwr > maxchpwr)
4778 pwr = maxchpwr;
4779
4780 idx = gain - (pwr - power) - tdiff - vdiff;
4781 if ((ridx / 8) & 1) /* MIMO */
4782 idx += (int32_t)le32toh(uc->atten[grp][c]);
4783
4784 if (cmd.band == 0)
4785 idx += 9; /* 5GHz */
4786 if (ridx == IWN_RIDX_MAX)
4787 idx += 5; /* CCK */
4788
4789 /* Make sure idx stays in a valid range. */
4790 if (idx < 0)
4791 idx = 0;
4792 else if (idx > IWN4965_MAX_PWR_INDEX)
4793 idx = IWN4965_MAX_PWR_INDEX;
4794
4795 sc->sc_txpower->txchain[c].rate[ridx].rf_gain.value.l =
4796 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4797 sc->sc_txpower->txchain[c].rate[ridx].dsp_gain.value.l =
4798 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4799 }
4800 }
4801
4802 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4803
4804 #undef interpolate
4805 #undef fdivround
4806 }
4807
4808 static int
iwn5000_set_txpower(struct iwn_softc * sc,int async)4809 iwn5000_set_txpower(struct iwn_softc *sc, int async)
4810 {
4811 struct iwn5000_cmd_txpower cmd;
4812
4813 /*
4814 * TX power calibration is handled automatically by the firmware
4815 * for 5000 Series.
4816 */
4817 memset(&cmd, 0, sizeof cmd);
4818 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
4819 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4820 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4821 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4822 }
4823
4824 /*
4825 * Retrieve the maximum RSSI (in dBm) among receivers.
4826 */
4827 static int
iwn4965_get_rssi(const struct iwn_rx_stat * stat)4828 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
4829 {
4830 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf;
4831 uint8_t mask, agc;
4832 int rssi;
4833
4834 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4835 agc = (le16toh(phy->agc) >> 7) & 0x7f;
4836
4837 rssi = 0;
4838 if (mask & IWN_ANT_A)
4839 rssi = MAX(rssi, phy->rssi[0]);
4840 if (mask & IWN_ANT_B)
4841 rssi = MAX(rssi, phy->rssi[2]);
4842 if (mask & IWN_ANT_C)
4843 rssi = MAX(rssi, phy->rssi[4]);
4844
4845 return rssi - agc - IWN_RSSI_TO_DBM;
4846 }
4847
4848 static int
iwn5000_get_rssi(const struct iwn_rx_stat * stat)4849 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
4850 {
4851 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf;
4852 uint8_t agc;
4853 int rssi;
4854
4855 agc = (le32toh(phy->agc) >> 9) & 0x7f;
4856
4857 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4858 le16toh(phy->rssi[1]) & 0xff);
4859 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4860
4861 return rssi - agc - IWN_RSSI_TO_DBM;
4862 }
4863
4864 /*
4865 * Retrieve the average noise (in dBm) among receivers.
4866 */
4867 static int
iwn_get_noise(const struct iwn_rx_general_stats * stats)4868 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4869 {
4870 int i, total, nbant, noise;
4871
4872 total = nbant = 0;
4873 for (i = 0; i < 3; i++) {
4874 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4875 continue;
4876 total += noise;
4877 nbant++;
4878 }
4879 /* There should be at least one antenna but check anyway. */
4880 return (nbant == 0) ? -127 : (total / nbant) - 107;
4881 }
4882
4883 /*
4884 * Compute temperature (in degC) from last received statistics.
4885 */
4886 static int
iwn4965_get_temperature(struct iwn_softc * sc)4887 iwn4965_get_temperature(struct iwn_softc *sc)
4888 {
4889 struct iwn_ucode_info *uc = &sc->ucode_info;
4890 int32_t r1, r2, r3, r4, temp;
4891
4892 r1 = le32toh(uc->temp[0].chan20MHz);
4893 r2 = le32toh(uc->temp[1].chan20MHz);
4894 r3 = le32toh(uc->temp[2].chan20MHz);
4895 r4 = le32toh(sc->rawtemp);
4896
4897 if (r1 == r3) /* Prevents division by 0 (should not happen). */
4898 return 0;
4899
4900 /* Sign-extend 23-bit R4 value to 32-bit. */
4901 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4902 /* Compute temperature in Kelvin. */
4903 temp = (259 * (r4 - r2)) / (r3 - r1);
4904 temp = (temp * 97) / 100 + 8;
4905
4906 return IWN_KTOC(temp);
4907 }
4908
4909 static int
iwn5000_get_temperature(struct iwn_softc * sc)4910 iwn5000_get_temperature(struct iwn_softc *sc)
4911 {
4912 int32_t temp;
4913
4914 /*
4915 * Temperature is not used by the driver for 5000 Series because
4916 * TX power calibration is handled by firmware. We export it to
4917 * users through a kstat though.
4918 */
4919 temp = le32toh(sc->rawtemp);
4920 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4921 temp = (temp / -5) + sc->temp_off;
4922 temp = IWN_KTOC(temp);
4923 }
4924 return temp;
4925 }
4926
4927 /*
4928 * Initialize sensitivity calibration state machine.
4929 */
4930 static int
iwn_init_sensitivity(struct iwn_softc * sc)4931 iwn_init_sensitivity(struct iwn_softc *sc)
4932 {
4933 struct iwn_ops *ops = &sc->ops;
4934 struct iwn_calib_state *calib = &sc->calib;
4935 uint32_t flags;
4936 int error;
4937
4938 /* Reset calibration state machine. */
4939 memset(calib, 0, sizeof (*calib));
4940 calib->state = IWN_CALIB_STATE_INIT;
4941 calib->cck_state = IWN_CCK_STATE_HIFA;
4942 /* Set initial correlation values. */
4943 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
4944 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4945 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
4946 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4947 calib->cck_x4 = 125;
4948 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
4949 calib->energy_cck = sc->limits->energy_cck;
4950
4951 /* Write initial sensitivity. */
4952 if ((error = iwn_send_sensitivity(sc)) != 0)
4953 return error;
4954
4955 /* Write initial gains. */
4956 if ((error = ops->init_gains(sc)) != 0)
4957 return error;
4958
4959 /* Request statistics at each beacon interval. */
4960 flags = 0;
4961 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4962 }
4963
4964 /*
4965 * Collect noise and RSSI statistics for the first 20 beacons received
4966 * after association and use them to determine connected antennas and
4967 * to set differential gains.
4968 */
4969 static void
iwn_collect_noise(struct iwn_softc * sc,const struct iwn_rx_general_stats * stats)4970 iwn_collect_noise(struct iwn_softc *sc,
4971 const struct iwn_rx_general_stats *stats)
4972 {
4973 struct iwn_ops *ops = &sc->ops;
4974 struct iwn_calib_state *calib = &sc->calib;
4975 uint32_t val;
4976 int i;
4977
4978 /* Accumulate RSSI and noise for all 3 antennas. */
4979 for (i = 0; i < 3; i++) {
4980 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4981 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4982 }
4983 /* NB: We update differential gains only once after 20 beacons. */
4984 if (++calib->nbeacons < 20)
4985 return;
4986
4987 /* Determine highest average RSSI. */
4988 val = MAX(calib->rssi[0], calib->rssi[1]);
4989 val = MAX(calib->rssi[2], val);
4990
4991 /* Determine which antennas are connected. */
4992 sc->chainmask = sc->rxchainmask;
4993 for (i = 0; i < 3; i++)
4994 if (val - calib->rssi[i] > 15 * 20)
4995 sc->chainmask &= ~(1 << i);
4996
4997 sc->sc_ant->conn_ant.value.ul = sc->chainmask;
4998
4999 /* If none of the TX antennas are connected, keep at least one. */
5000 if ((sc->chainmask & sc->txchainmask) == 0)
5001 sc->chainmask |= IWN_LSB(sc->txchainmask);
5002
5003 (void)ops->set_gains(sc);
5004 calib->state = IWN_CALIB_STATE_RUN;
5005
5006 #ifdef notyet
5007 /* XXX Disable RX chains with no antennas connected. */
5008 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
5009 DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5010 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5011 #endif
5012
5013 /* Enable power-saving mode if requested by user. */
5014 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
5015 (void)iwn_set_pslevel(sc, 0, 3, 1);
5016 }
5017
5018 static int
iwn4965_init_gains(struct iwn_softc * sc)5019 iwn4965_init_gains(struct iwn_softc *sc)
5020 {
5021 struct iwn_phy_calib_gain cmd;
5022
5023 memset(&cmd, 0, sizeof cmd);
5024 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5025 /* Differential gains initially set to 0 for all 3 antennas. */
5026 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5027 }
5028
5029 static int
iwn5000_init_gains(struct iwn_softc * sc)5030 iwn5000_init_gains(struct iwn_softc *sc)
5031 {
5032 struct iwn_phy_calib cmd;
5033
5034 memset(&cmd, 0, sizeof cmd);
5035 cmd.code = sc->reset_noise_gain;
5036 cmd.ngroups = 1;
5037 cmd.isvalid = 1;
5038 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5039 }
5040
5041 static int
iwn4965_set_gains(struct iwn_softc * sc)5042 iwn4965_set_gains(struct iwn_softc *sc)
5043 {
5044 struct iwn_calib_state *calib = &sc->calib;
5045 struct iwn_phy_calib_gain cmd;
5046 int i, delta, noise;
5047
5048 /* Get minimal noise among connected antennas. */
5049 noise = INT_MAX; /* NB: There's at least one antenna. */
5050 for (i = 0; i < 3; i++)
5051 if (sc->chainmask & (1 << i))
5052 noise = MIN(calib->noise[i], noise);
5053
5054 memset(&cmd, 0, sizeof cmd);
5055 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5056 /* Set differential gains for connected antennas. */
5057 for (i = 0; i < 3; i++) {
5058 if (sc->chainmask & (1 << i)) {
5059 /* Compute attenuation (in unit of 1.5dB). */
5060 delta = (noise - calib->noise[i]) / 30;
5061 /* NB: delta <= 0 */
5062 /* Limit to [-4.5dB,0]. */
5063 cmd.gain[i] = (uint8_t)MIN(abs(delta), 3);
5064 if (delta < 0)
5065 cmd.gain[i] |= 1 << 2; /* sign bit */
5066 sc->sc_ant->gain[i].value.ul = cmd.gain[i];
5067 }
5068 }
5069 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5070 }
5071
5072 static int
iwn5000_set_gains(struct iwn_softc * sc)5073 iwn5000_set_gains(struct iwn_softc *sc)
5074 {
5075 struct iwn_calib_state *calib = &sc->calib;
5076 struct iwn_phy_calib_gain cmd;
5077 int i, ant, div, delta;
5078
5079 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
5080 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5081
5082 memset(&cmd, 0, sizeof cmd);
5083 cmd.code = sc->noise_gain;
5084 cmd.ngroups = 1;
5085 cmd.isvalid = 1;
5086 /* Get first available RX antenna as referential. */
5087 ant = IWN_LSB(sc->rxchainmask);
5088 /* Set differential gains for other antennas. */
5089 for (i = ant + 1; i < 3; i++) {
5090 if (sc->chainmask & (1 << i)) {
5091 /* The delta is relative to antenna "ant". */
5092 delta = (calib->noise[ant] - calib->noise[i]) / div;
5093 /* Limit to [-4.5dB,+4.5dB]. */
5094 cmd.gain[i - 1] = (uint8_t)MIN(abs(delta), 3);
5095 if (delta < 0)
5096 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
5097 sc->sc_ant->gain[i - 1].value.ul
5098 = cmd.gain[i - 1];
5099 }
5100 }
5101 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5102 }
5103
5104 /*
5105 * Tune RF RX sensitivity based on the number of false alarms detected
5106 * during the last beacon period.
5107 */
5108 static void
iwn_tune_sensitivity(struct iwn_softc * sc,const struct iwn_rx_stats * stats)5109 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5110 {
5111 #define inc(val, inc, max) \
5112 if ((val) < (max)) { \
5113 if ((val) < (max) - (inc)) \
5114 (val) += (inc); \
5115 else \
5116 (val) = (max); \
5117 needs_update = 1; \
5118 }
5119 #define dec(val, dec, min) \
5120 if ((val) > (min)) { \
5121 if ((val) > (min) + (dec)) \
5122 (val) -= (dec); \
5123 else \
5124 (val) = (min); \
5125 needs_update = 1; \
5126 }
5127
5128 const struct iwn_sensitivity_limits *limits = sc->limits;
5129 struct iwn_calib_state *calib = &sc->calib;
5130 uint32_t val, rxena, fa;
5131 uint32_t energy[3], energy_min;
5132 uint8_t noise[3], noise_ref;
5133 int i, needs_update = 0;
5134
5135 /* Check that we've been enabled long enough. */
5136 if ((rxena = le32toh(stats->general.load)) == 0)
5137 return;
5138
5139 /* Compute number of false alarms since last call for OFDM. */
5140 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
5141 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
5142 fa *= 200 * 1024; /* 200TU */
5143
5144 /* Save counters values for next call. */
5145 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
5146 calib->fa_ofdm = le32toh(stats->ofdm.fa);
5147
5148 if (fa > 50 * rxena) {
5149 /* High false alarm count, decrease sensitivity. */
5150 IWN_DBG("OFDM high false alarm count: %u", fa);
5151 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
5152 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
5153 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
5154 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
5155
5156 } else if (fa < 5 * rxena) {
5157 /* Low false alarm count, increase sensitivity. */
5158 IWN_DBG("OFDM low false alarm count: %u", fa);
5159 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
5160 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
5161 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
5162 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
5163 }
5164
5165 /* Compute maximum noise among 3 receivers. */
5166 for (i = 0; i < 3; i++)
5167 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
5168 val = MAX(noise[0], noise[1]);
5169 val = MAX(noise[2], val);
5170 /* Insert it into our samples table. */
5171 calib->noise_samples[calib->cur_noise_sample] = (uint8_t)val;
5172 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
5173
5174 /* Compute maximum noise among last 20 samples. */
5175 noise_ref = calib->noise_samples[0];
5176 for (i = 1; i < 20; i++)
5177 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
5178
5179 /* Compute maximum energy among 3 receivers. */
5180 for (i = 0; i < 3; i++)
5181 energy[i] = le32toh(stats->general.energy[i]);
5182 val = MIN(energy[0], energy[1]);
5183 val = MIN(energy[2], val);
5184 /* Insert it into our samples table. */
5185 calib->energy_samples[calib->cur_energy_sample] = val;
5186 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
5187
5188 /* Compute minimum energy among last 10 samples. */
5189 energy_min = calib->energy_samples[0];
5190 for (i = 1; i < 10; i++)
5191 energy_min = MAX(energy_min, calib->energy_samples[i]);
5192 energy_min += 6;
5193
5194 /* Compute number of false alarms since last call for CCK. */
5195 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
5196 fa += le32toh(stats->cck.fa) - calib->fa_cck;
5197 fa *= 200 * 1024; /* 200TU */
5198
5199 /* Save counters values for next call. */
5200 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
5201 calib->fa_cck = le32toh(stats->cck.fa);
5202
5203 if (fa > 50 * rxena) {
5204 /* High false alarm count, decrease sensitivity. */
5205 IWN_DBG("CCK high false alarm count: %u", fa);
5206 calib->cck_state = IWN_CCK_STATE_HIFA;
5207 calib->low_fa = 0;
5208
5209 if (calib->cck_x4 > 160) {
5210 calib->noise_ref = noise_ref;
5211 if (calib->energy_cck > 2)
5212 dec(calib->energy_cck, 2, energy_min);
5213 }
5214 if (calib->cck_x4 < 160) {
5215 calib->cck_x4 = 161;
5216 needs_update = 1;
5217 } else
5218 inc(calib->cck_x4, 3, limits->max_cck_x4);
5219
5220 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
5221
5222 } else if (fa < 5 * rxena) {
5223 /* Low false alarm count, increase sensitivity. */
5224 IWN_DBG("CCK low false alarm count: %u", fa);
5225 calib->cck_state = IWN_CCK_STATE_LOFA;
5226 calib->low_fa++;
5227
5228 if (calib->cck_state != IWN_CCK_STATE_INIT &&
5229 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
5230 calib->low_fa > 100)) {
5231 inc(calib->energy_cck, 2, limits->min_energy_cck);
5232 dec(calib->cck_x4, 3, limits->min_cck_x4);
5233 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
5234 }
5235 } else {
5236 /* Not worth to increase or decrease sensitivity. */
5237 IWN_DBG("CCK normal false alarm count: %u", fa);
5238 calib->low_fa = 0;
5239 calib->noise_ref = noise_ref;
5240
5241 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
5242 /* Previous interval had many false alarms. */
5243 dec(calib->energy_cck, 8, energy_min);
5244 }
5245 calib->cck_state = IWN_CCK_STATE_INIT;
5246 }
5247
5248 if (needs_update)
5249 (void)iwn_send_sensitivity(sc);
5250 #undef dec
5251 #undef inc
5252 }
5253
5254 static int
iwn_send_sensitivity(struct iwn_softc * sc)5255 iwn_send_sensitivity(struct iwn_softc *sc)
5256 {
5257 struct iwn_calib_state *calib = &sc->calib;
5258 struct iwn_enhanced_sensitivity_cmd cmd;
5259 int len;
5260
5261 memset(&cmd, 0, sizeof cmd);
5262 len = sizeof (struct iwn_sensitivity_cmd);
5263 cmd.which = IWN_SENSITIVITY_WORKTBL;
5264 /* OFDM modulation. */
5265 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
5266 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
5267 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
5268 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
5269 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
5270 cmd.energy_ofdm_th = htole16(62);
5271 /* CCK modulation. */
5272 cmd.corr_cck_x4 = htole16(calib->cck_x4);
5273 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
5274 cmd.energy_cck = htole16(calib->energy_cck);
5275 /* Barker modulation: use default values. */
5276 cmd.corr_barker = htole16(190);
5277 cmd.corr_barker_mrc = htole16(390);
5278 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
5279 goto send;
5280 /* Enhanced sensitivity settings. */
5281 len = sizeof (struct iwn_enhanced_sensitivity_cmd);
5282 cmd.ofdm_det_slope_mrc = htole16(668);
5283 cmd.ofdm_det_icept_mrc = htole16(4);
5284 cmd.ofdm_det_slope = htole16(486);
5285 cmd.ofdm_det_icept = htole16(37);
5286 cmd.cck_det_slope_mrc = htole16(853);
5287 cmd.cck_det_icept_mrc = htole16(4);
5288 cmd.cck_det_slope = htole16(476);
5289 cmd.cck_det_icept = htole16(99);
5290 send:
5291
5292 sc->sc_sens->ofdm_x1.value.ul = calib->ofdm_x1;
5293 sc->sc_sens->ofdm_mrc_x1.value.ul = calib->ofdm_mrc_x1;
5294 sc->sc_sens->ofdm_x4.value.ul = calib->ofdm_x4;
5295 sc->sc_sens->ofdm_mrc_x4.value.ul = calib->ofdm_mrc_x4;
5296 sc->sc_sens->cck_x4.value.ul = calib->cck_x4;
5297 sc->sc_sens->cck_mrc_x4.value.ul = calib->cck_mrc_x4;
5298 sc->sc_sens->energy_cck.value.ul = calib->energy_cck;
5299
5300 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
5301 }
5302
5303 /*
5304 * Set STA mode power saving level (between 0 and 5).
5305 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
5306 */
5307 static int
iwn_set_pslevel(struct iwn_softc * sc,int dtim,int level,int async)5308 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
5309 {
5310 struct iwn_pmgt_cmd cmd;
5311 const struct iwn_pmgt *pmgt;
5312 uint32_t maxp, skip_dtim;
5313 uint32_t reg;
5314 int i;
5315
5316 /* Select which PS parameters to use. */
5317 if (dtim <= 2)
5318 pmgt = &iwn_pmgt[0][level];
5319 else if (dtim <= 10)
5320 pmgt = &iwn_pmgt[1][level];
5321 else
5322 pmgt = &iwn_pmgt[2][level];
5323
5324 memset(&cmd, 0, sizeof cmd);
5325 if (level != 0) /* not CAM */
5326 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
5327 if (level == 5)
5328 cmd.flags |= htole16(IWN_PS_FAST_PD);
5329 /* Retrieve PCIe Active State Power Management (ASPM). */
5330 reg = pci_config_get32(sc->sc_pcih,
5331 sc->sc_cap_off + PCIE_LINKCTL);
5332 if (!(reg & PCIE_LINKCTL_ASPM_CTL_L0S)) /* L0s Entry disabled. */
5333 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
5334 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
5335 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
5336
5337 if (dtim == 0) {
5338 dtim = 1;
5339 skip_dtim = 0;
5340 } else
5341 skip_dtim = pmgt->skip_dtim;
5342 if (skip_dtim != 0) {
5343 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
5344 maxp = pmgt->intval[4];
5345 if (maxp == (uint32_t)-1)
5346 maxp = dtim * (skip_dtim + 1);
5347 else if (maxp > dtim)
5348 maxp = (maxp / dtim) * dtim;
5349 } else
5350 maxp = dtim;
5351 for (i = 0; i < 5; i++)
5352 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i]));
5353
5354 sc->sc_misc->pslevel.value.ul = level;
5355 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
5356 }
5357
5358 int
iwn5000_runtime_calib(struct iwn_softc * sc)5359 iwn5000_runtime_calib(struct iwn_softc *sc)
5360 {
5361 struct iwn5000_calib_config cmd;
5362
5363 memset(&cmd, 0, sizeof cmd);
5364 cmd.ucode.once.enable = 0xffffffff;
5365 cmd.ucode.once.start = IWN5000_CALIB_DC;
5366 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5367 }
5368
5369 static int
iwn_config_bt_coex_bluetooth(struct iwn_softc * sc)5370 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc)
5371 {
5372 struct iwn_bluetooth bluetooth;
5373
5374 memset(&bluetooth, 0, sizeof bluetooth);
5375 bluetooth.flags = IWN_BT_COEX_ENABLE;
5376 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
5377 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
5378
5379 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
5380 }
5381
5382 static int
iwn_config_bt_coex_prio_table(struct iwn_softc * sc)5383 iwn_config_bt_coex_prio_table(struct iwn_softc *sc)
5384 {
5385 uint8_t prio_table[16];
5386
5387 memset(&prio_table, 0, sizeof prio_table);
5388 prio_table[ 0] = 6; /* init calibration 1 */
5389 prio_table[ 1] = 7; /* init calibration 2 */
5390 prio_table[ 2] = 2; /* periodic calib low 1 */
5391 prio_table[ 3] = 3; /* periodic calib low 2 */
5392 prio_table[ 4] = 4; /* periodic calib high 1 */
5393 prio_table[ 5] = 5; /* periodic calib high 2 */
5394 prio_table[ 6] = 6; /* dtim */
5395 prio_table[ 7] = 8; /* scan52 */
5396 prio_table[ 8] = 10; /* scan24 */
5397
5398 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE,
5399 &prio_table, sizeof prio_table, 0);
5400 }
5401
5402 static int
iwn_config_bt_coex_adv_config(struct iwn_softc * sc,struct iwn_bt_basic * basic,size_t len)5403 iwn_config_bt_coex_adv_config(struct iwn_softc *sc, struct iwn_bt_basic *basic,
5404 size_t len)
5405 {
5406 struct iwn_btcoex_prot btprot;
5407 int error;
5408
5409 basic->bt.flags = IWN_BT_COEX_ENABLE;
5410 basic->bt.lead_time = IWN_BT_LEAD_TIME_DEF;
5411 basic->bt.max_kill = IWN_BT_MAX_KILL_DEF;
5412 basic->bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF;
5413 basic->bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF;
5414 basic->bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF;
5415 basic->bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF;
5416 basic->bt3_timer_t2_value = IWN_BT_BT3_T2_DEF;
5417 basic->bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */
5418 basic->bt3_lookup_table[ 1] = htole32(0xaaaaaaaa);
5419 basic->bt3_lookup_table[ 2] = htole32(0xaeaaaaaa);
5420 basic->bt3_lookup_table[ 3] = htole32(0xaaaaaaaa);
5421 basic->bt3_lookup_table[ 4] = htole32(0xcc00ff28);
5422 basic->bt3_lookup_table[ 5] = htole32(0x0000aaaa);
5423 basic->bt3_lookup_table[ 6] = htole32(0xcc00aaaa);
5424 basic->bt3_lookup_table[ 7] = htole32(0x0000aaaa);
5425 basic->bt3_lookup_table[ 8] = htole32(0xc0004000);
5426 basic->bt3_lookup_table[ 9] = htole32(0x00004000);
5427 basic->bt3_lookup_table[10] = htole32(0xf0005000);
5428 basic->bt3_lookup_table[11] = htole32(0xf0005000);
5429 basic->reduce_txpower = 0; /* as not implemented */
5430 basic->valid = IWN_BT_ALL_VALID_MASK;
5431
5432 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &basic, len, 0);
5433 if (error != 0) {
5434 dev_err(sc->sc_dip, CE_WARN,
5435 "!could not configure advanced bluetooth coexistence");
5436 return error;
5437 }
5438
5439 error = iwn_config_bt_coex_prio_table(sc);
5440 if (error != 0) {
5441 dev_err(sc->sc_dip, CE_WARN,
5442 "!could not configure send BT priority table");
5443 return error;
5444 }
5445
5446 /* Force BT state machine change */
5447 memset(&btprot, 0, sizeof btprot);
5448 btprot.open = 1;
5449 btprot.type = 1;
5450 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1);
5451 if (error != 0) {
5452 dev_err(sc->sc_dip, CE_WARN, "!could not open BT protcol");
5453 return error;
5454 }
5455
5456 btprot.open = 0;
5457 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1);
5458 if (error != 0) {
5459 dev_err(sc->sc_dip, CE_WARN, "!could not close BT protcol");
5460 return error;
5461 }
5462 return 0;
5463 }
5464
5465 static int
iwn_config_bt_coex_adv1(struct iwn_softc * sc)5466 iwn_config_bt_coex_adv1(struct iwn_softc *sc)
5467 {
5468 struct iwn_bt_adv1 d;
5469
5470 memset(&d, 0, sizeof d);
5471 d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
5472 d.tx_prio_boost = 0;
5473 d.rx_prio_boost = 0;
5474 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d);
5475 }
5476
5477 static int
iwn_config_bt_coex_adv2(struct iwn_softc * sc)5478 iwn_config_bt_coex_adv2(struct iwn_softc *sc)
5479 {
5480 struct iwn_bt_adv2 d;
5481
5482 memset(&d, 0, sizeof d);
5483 d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
5484 d.tx_prio_boost = 0;
5485 d.rx_prio_boost = 0;
5486 return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d);
5487 }
5488
5489 static int
iwn_config(struct iwn_softc * sc)5490 iwn_config(struct iwn_softc *sc)
5491 {
5492 struct iwn_ops *ops = &sc->ops;
5493 struct ieee80211com *ic = &sc->sc_ic;
5494 uint32_t txmask;
5495 uint16_t rxchain;
5496 int error;
5497
5498 error = ops->config_bt_coex(sc);
5499 if (error != 0) {
5500 dev_err(sc->sc_dip, CE_WARN,
5501 "!could not configure bluetooth coexistence");
5502 return error;
5503 }
5504
5505 /* Set radio temperature sensor offset. */
5506 if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5507 error = iwn6000_temp_offset_calib(sc);
5508 if (error != 0) {
5509 dev_err(sc->sc_dip, CE_WARN,
5510 "!could not set temperature offset");
5511 return error;
5512 }
5513 }
5514
5515 if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
5516 sc->hw_type == IWN_HW_REV_TYPE_2000 ||
5517 sc->hw_type == IWN_HW_REV_TYPE_135 ||
5518 sc->hw_type == IWN_HW_REV_TYPE_105) {
5519 error = iwn2000_temp_offset_calib(sc);
5520 if (error != 0) {
5521 dev_err(sc->sc_dip, CE_WARN,
5522 "!could not set temperature offset");
5523 return error;
5524 }
5525 }
5526
5527 if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5528 sc->hw_type == IWN_HW_REV_TYPE_6005) {
5529 /* Configure runtime DC calibration. */
5530 error = iwn5000_runtime_calib(sc);
5531 if (error != 0) {
5532 dev_err(sc->sc_dip, CE_WARN,
5533 "!could not configure runtime calibration");
5534 return error;
5535 }
5536 }
5537
5538 /* Configure valid TX chains for 5000 Series. */
5539 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5540 txmask = htole32(sc->txchainmask);
5541 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5542 sizeof txmask, 0);
5543 if (error != 0) {
5544 dev_err(sc->sc_dip, CE_WARN,
5545 "!could not configure valid TX chains");
5546 return error;
5547 }
5548 }
5549
5550 /* Set mode, channel, RX filter and enable RX. */
5551 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5552 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_macaddr);
5553 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_macaddr);
5554 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
5555 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5556 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan))
5557 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5558 switch (ic->ic_opmode) {
5559 case IEEE80211_M_IBSS:
5560 sc->rxon.mode = IWN_MODE_IBSS;
5561 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5562 break;
5563 case IEEE80211_M_STA:
5564 sc->rxon.mode = IWN_MODE_STA;
5565 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5566 break;
5567 case IEEE80211_M_MONITOR:
5568 sc->rxon.mode = IWN_MODE_MONITOR;
5569 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5570 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5571 break;
5572 default:
5573 /* Should not get there. */
5574 ASSERT(ic->ic_opmode == IEEE80211_M_IBSS ||
5575 ic->ic_opmode == IEEE80211_M_STA ||
5576 ic->ic_opmode == IEEE80211_M_MONITOR);
5577 break;
5578 }
5579 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
5580 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
5581 sc->rxon.ht_single_mask = 0xff;
5582 sc->rxon.ht_dual_mask = 0xff;
5583 sc->rxon.ht_triple_mask = 0xff;
5584 rxchain =
5585 IWN_RXCHAIN_VALID(sc->rxchainmask) |
5586 IWN_RXCHAIN_MIMO_COUNT(2) |
5587 IWN_RXCHAIN_IDLE_COUNT(2);
5588 sc->rxon.rxchain = htole16(rxchain);
5589 DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5590 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5591 if (error != 0) {
5592 dev_err(sc->sc_dip, CE_WARN,
5593 "!RXON command failed");
5594 return error;
5595 }
5596
5597 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5598 dev_err(sc->sc_dip, CE_WARN,
5599 "!could not add broadcast node");
5600 return error;
5601 }
5602
5603 /* Configuration has changed, set TX power accordingly. */
5604 if ((error = ops->set_txpower(sc, 0)) != 0) {
5605 dev_err(sc->sc_dip, CE_WARN,
5606 "!could not set TX power");
5607 return error;
5608 }
5609
5610 if ((error = iwn_set_critical_temp(sc)) != 0) {
5611 dev_err(sc->sc_dip, CE_WARN,
5612 "!could not set critical temperature");
5613 return error;
5614 }
5615
5616 /* Set power saving level to CAM during initialization. */
5617 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5618 dev_err(sc->sc_dip, CE_WARN,
5619 "!could not set power saving level");
5620 return error;
5621 }
5622 return 0;
5623 }
5624
5625 static uint16_t
iwn_get_active_dwell_time(struct iwn_softc * sc,uint16_t flags,uint8_t n_probes)5626 iwn_get_active_dwell_time(struct iwn_softc *sc, uint16_t flags,
5627 uint8_t n_probes)
5628 {
5629 _NOTE(ARGUNUSED(sc));
5630
5631 /* No channel? Default to 2GHz settings */
5632 if (flags & IEEE80211_CHAN_2GHZ)
5633 return IWN_ACTIVE_DWELL_TIME_2GHZ +
5634 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1);
5635
5636 /* 5GHz dwell time */
5637 return IWN_ACTIVE_DWELL_TIME_5GHZ +
5638 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1);
5639 }
5640
5641 /*
5642 * Limit the total dwell time to 85% of the beacon interval.
5643 *
5644 * Returns the dwell time in milliseconds.
5645 */
5646 static uint16_t
iwn_limit_dwell(struct iwn_softc * sc,uint16_t dwell_time)5647 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
5648 {
5649 _NOTE(ARGUNUSED(dwell_time));
5650
5651 struct ieee80211com *ic = &sc->sc_ic;
5652 struct ieee80211_node *ni = ic->ic_bss;
5653 int bintval = 0;
5654
5655 /* bintval is in TU (1.024mS) */
5656 if (ni != NULL)
5657 bintval = ni->in_intval;
5658
5659 /*
5660 * If it's non-zero, we should calculate the minimum of
5661 * it and the DWELL_BASE.
5662 *
5663 * XXX Yes, the math should take into account that bintval
5664 * is 1.024mS, not 1mS..
5665 */
5666 if (bintval > 0)
5667 return MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100));
5668
5669 /* No association context? Default */
5670 return IWN_PASSIVE_DWELL_BASE;
5671 }
5672
5673 static uint16_t
iwn_get_passive_dwell_time(struct iwn_softc * sc,uint16_t flags)5674 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags)
5675 {
5676 uint16_t passive;
5677 if (flags & IEEE80211_CHAN_2GHZ)
5678 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
5679 else
5680 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
5681
5682 /* Clamp to the beacon interval if we're associated */
5683 return iwn_limit_dwell(sc, passive);
5684 }
5685
5686 static int
iwn_scan(struct iwn_softc * sc,uint16_t flags)5687 iwn_scan(struct iwn_softc *sc, uint16_t flags)
5688 {
5689 struct ieee80211com *ic = &sc->sc_ic;
5690 struct iwn_scan_hdr *hdr;
5691 struct iwn_cmd_data *tx;
5692 struct iwn_scan_essid *essid;
5693 struct iwn_scan_chan *chan;
5694 struct ieee80211_frame *wh;
5695 struct ieee80211_rateset *rs;
5696 struct ieee80211_channel *c;
5697 uint8_t *buf, *frm;
5698 uint16_t rxchain, dwell_active, dwell_passive;
5699 uint8_t txant;
5700 int buflen, error, is_active;
5701
5702 buf = kmem_zalloc(IWN_SCAN_MAXSZ, KM_NOSLEEP);
5703 if (buf == NULL) {
5704 dev_err(sc->sc_dip, CE_WARN,
5705 "!could not allocate buffer for scan command");
5706 return ENOMEM;
5707 }
5708 hdr = (struct iwn_scan_hdr *)buf;
5709 /*
5710 * Move to the next channel if no frames are received within 20ms
5711 * after sending the probe request.
5712 */
5713 hdr->quiet_time = htole16(20); /* timeout in milliseconds */
5714 hdr->quiet_threshold = htole16(1); /* min # of packets */
5715
5716 /* Select antennas for scanning. */
5717 rxchain =
5718 IWN_RXCHAIN_VALID(sc->rxchainmask) |
5719 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5720 IWN_RXCHAIN_DRIVER_FORCE;
5721 if ((flags & IEEE80211_CHAN_5GHZ) &&
5722 sc->hw_type == IWN_HW_REV_TYPE_4965) {
5723 /* Ant A must be avoided in 5GHz because of an HW bug. */
5724 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
5725 } else /* Use all available RX antennas. */
5726 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5727 hdr->rxchain = htole16(rxchain);
5728 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5729
5730 tx = (struct iwn_cmd_data *)(hdr + 1);
5731 tx->flags = htole32(IWN_TX_AUTO_SEQ);
5732 tx->id = sc->broadcast_id;
5733 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5734
5735 if (flags & IEEE80211_CHAN_5GHZ) {
5736 /* Send probe requests at 6Mbps. */
5737 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5738 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5739 } else {
5740 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5741 /* Send probe requests at 1Mbps. */
5742 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
5743 tx->rflags = IWN_RFLAG_CCK;
5744 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5745 }
5746
5747 hdr->crc_threshold = 0xffff;
5748
5749 /* Use the first valid TX antenna. */
5750 txant = IWN_LSB(sc->txchainmask);
5751 tx->rflags |= IWN_RFLAG_ANT(txant);
5752
5753 /*
5754 * Only do active scanning if we're announcing a probe request
5755 * for a given SSID (or more, if we ever add it to the driver.)
5756 */
5757 is_active = 0;
5758
5759 essid = (struct iwn_scan_essid *)(tx + 1);
5760 if (ic->ic_des_esslen != 0) {
5761 char essidstr[IEEE80211_NWID_LEN+1];
5762 memcpy(essidstr, ic->ic_des_essid, ic->ic_des_esslen);
5763 essidstr[ic->ic_des_esslen] = '\0';
5764
5765 DTRACE_PROBE1(scan__direct, char *, essidstr);
5766
5767 essid[0].id = IEEE80211_ELEMID_SSID;
5768 essid[0].len = ic->ic_des_esslen;
5769 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
5770
5771 is_active = 1;
5772 /* hdr->crc_threshold = 0x1; */
5773 hdr->scan_flags = htole32(IWN_SCAN_PASSIVE2ACTIVE);
5774 }
5775 /*
5776 * Build a probe request frame. Most of the following code is a
5777 * copy & paste of what is done in net80211.
5778 */
5779 wh = (struct ieee80211_frame *)(essid + 20);
5780 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5781 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5782 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5783 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5784 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
5785 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5786 wh->i_dur[0] = wh->i_dur[1] = 0; /* filled by HW */
5787 wh->i_seq[0] = wh->i_seq[1] = 0; /* filled by HW */
5788
5789 frm = (uint8_t *)(wh + 1);
5790 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5791 frm = ieee80211_add_rates(frm, rs);
5792 #ifndef IEEE80211_NO_HT
5793 if (ic->ic_flags & IEEE80211_F_HTON)
5794 frm = ieee80211_add_htcaps(frm, ic);
5795 #endif
5796 if (rs->ir_nrates > IEEE80211_RATE_SIZE)
5797 frm = ieee80211_add_xrates(frm, rs);
5798
5799 /* Set length of probe request. */
5800 /*LINTED: E_PTRDIFF_OVERFLOW*/
5801 tx->len = htole16(frm - (uint8_t *)wh);
5802
5803
5804 /*
5805 * If active scanning is requested but a certain channel is
5806 * marked passive, we can do active scanning if we detect
5807 * transmissions.
5808 *
5809 * There is an issue with some firmware versions that triggers
5810 * a sysassert on a "good CRC threshold" of zero (== disabled),
5811 * on a radar channel even though this means that we should NOT
5812 * send probes.
5813 *
5814 * The "good CRC threshold" is the number of frames that we
5815 * need to receive during our dwell time on a channel before
5816 * sending out probes -- setting this to a huge value will
5817 * mean we never reach it, but at the same time work around
5818 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER
5819 * here instead of IWN_GOOD_CRC_TH_DISABLED.
5820 *
5821 * This was fixed in later versions along with some other
5822 * scan changes, and the threshold behaves as a flag in those
5823 * versions.
5824 */
5825
5826 /*
5827 * If we're doing active scanning, set the crc_threshold
5828 * to a suitable value. This is different to active veruss
5829 * passive scanning depending upon the channel flags; the
5830 * firmware will obey that particular check for us.
5831 */
5832 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
5833 hdr->crc_threshold = is_active ?
5834 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
5835 else
5836 hdr->crc_threshold = is_active ?
5837 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
5838
5839 chan = (struct iwn_scan_chan *)frm;
5840 for (c = &ic->ic_sup_channels[1];
5841 c <= &ic->ic_sup_channels[IEEE80211_CHAN_MAX]; c++) {
5842 if ((c->ich_flags & flags) != flags)
5843 continue;
5844 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5845 chan->flags = 0;
5846 if (!(c->ich_flags & IEEE80211_CHAN_PASSIVE))
5847 chan->flags |= htole32(IWN_CHAN_ACTIVE);
5848 if (ic->ic_des_esslen != 0)
5849 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5850
5851 /*
5852 * Calculate the active/passive dwell times.
5853 */
5854
5855 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active);
5856 dwell_passive = iwn_get_passive_dwell_time(sc, flags);
5857
5858 /* Make sure they're valid */
5859 if (dwell_passive <= dwell_active)
5860 dwell_passive = dwell_active + 1;
5861
5862 chan->active = htole16(dwell_active);
5863 chan->passive = htole16(dwell_passive);
5864
5865 chan->dsp_gain = 0x6e;
5866 if (IEEE80211_IS_CHAN_5GHZ(c)) {
5867 chan->rf_gain = 0x3b;
5868 } else {
5869 chan->rf_gain = 0x28;
5870 }
5871 DTRACE_PROBE5(add__channel, uint8_t, chan->chan,
5872 uint32_t, chan->flags, uint8_t, chan->rf_gain,
5873 uint16_t, chan->active, uint16_t, chan->passive);
5874 hdr->nchan++;
5875 chan++;
5876 }
5877
5878 /*LINTED: E_PTRDIFF_OVERFLOW*/
5879 buflen = (uint8_t *)chan - buf;
5880 hdr->len = htole16(buflen);
5881
5882 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5883 kmem_free(buf, IWN_SCAN_MAXSZ);
5884 return error;
5885 }
5886
5887 static int
iwn_auth(struct iwn_softc * sc)5888 iwn_auth(struct iwn_softc *sc)
5889 {
5890 struct iwn_ops *ops = &sc->ops;
5891 struct ieee80211com *ic = &sc->sc_ic;
5892 struct ieee80211_node *ni = ic->ic_bss;
5893 int error;
5894
5895 ASSERT(ni->in_chan != NULL);
5896
5897 /* Update adapter configuration. */
5898 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->in_bssid);
5899 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->in_chan);
5900 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5901 if ((ni->in_chan != IEEE80211_CHAN_ANYC) &&
5902 IEEE80211_IS_CHAN_2GHZ(ni->in_chan))
5903 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5904 if (ic->ic_flags & IEEE80211_F_SHSLOT)
5905 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5906 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5907 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5908 switch (ic->ic_curmode) {
5909 case IEEE80211_MODE_11A:
5910 sc->rxon.cck_mask = 0;
5911 sc->rxon.ofdm_mask = 0x15;
5912 break;
5913 case IEEE80211_MODE_11B:
5914 sc->rxon.cck_mask = 0x03;
5915 sc->rxon.ofdm_mask = 0;
5916 break;
5917 default: /* Assume 802.11b/g. */
5918 sc->rxon.cck_mask = 0x0f;
5919 sc->rxon.ofdm_mask = 0x15;
5920 }
5921 DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5922 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5923 if (error != 0) {
5924 dev_err(sc->sc_dip, CE_WARN,
5925 "!RXON command failed");
5926 return error;
5927 }
5928
5929 /* Configuration has changed, set TX power accordingly. */
5930 if ((error = ops->set_txpower(sc, 1)) != 0) {
5931 dev_err(sc->sc_dip, CE_WARN,
5932 "!could not set TX power");
5933 return error;
5934 }
5935 /*
5936 * Reconfiguring RXON clears the firmware nodes table so we must
5937 * add the broadcast node again.
5938 */
5939 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5940 dev_err(sc->sc_dip, CE_WARN,
5941 "!could not add broadcast node");
5942 return error;
5943 }
5944 return 0;
5945 }
5946
5947 static int
iwn_fast_recover(struct iwn_softc * sc)5948 iwn_fast_recover(struct iwn_softc *sc)
5949 {
5950 int err = IWN_FAIL;
5951
5952 mutex_enter(&sc->sc_mtx);
5953
5954 /* restore runtime configuration */
5955 bcopy(&sc->rxon_save, &sc->rxon,
5956 sizeof (sc->rxon));
5957
5958 sc->rxon.associd = 0;
5959 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
5960
5961 if ((err = iwn_auth(sc)) != IWN_SUCCESS) {
5962 dev_err(sc->sc_dip, CE_WARN, "!iwn_fast_recover(): "
5963 "could not setup authentication");
5964 mutex_exit(&sc->sc_mtx);
5965 return (err);
5966 }
5967
5968 bcopy(&sc->rxon_save, &sc->rxon, sizeof (sc->rxon));
5969
5970 /* update adapter's configuration */
5971 err = iwn_run(sc);
5972 if (err != IWN_SUCCESS) {
5973 dev_err(sc->sc_dip, CE_WARN, "!iwn_fast_recover(): "
5974 "failed to setup association");
5975 mutex_exit(&sc->sc_mtx);
5976 return (err);
5977 }
5978 /* set LED on */
5979 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5980
5981 sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
5982 mutex_exit(&sc->sc_mtx);
5983
5984 /* start queue */
5985 DTRACE_PROBE(resume__xmit);
5986
5987 return (IWN_SUCCESS);
5988 }
5989
5990 static int
iwn_run(struct iwn_softc * sc)5991 iwn_run(struct iwn_softc *sc)
5992 {
5993 struct iwn_ops *ops = &sc->ops;
5994 struct ieee80211com *ic = &sc->sc_ic;
5995 struct ieee80211_node *ni = ic->ic_bss;
5996 struct iwn_node_info node;
5997 int error;
5998
5999 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6000 /* Link LED blinks while monitoring. */
6001 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
6002 return 0;
6003 }
6004 if ((error = iwn_set_timing(sc, ni)) != 0) {
6005 dev_err(sc->sc_dip, CE_WARN,
6006 "!could not set timing");
6007 return error;
6008 }
6009
6010 /* Update adapter configuration. */
6011 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->in_bssid);
6012 sc->rxon.associd = htole16(IEEE80211_AID(ni->in_associd));
6013 /* Short preamble and slot time are negotiated when associating. */
6014 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
6015 if (ic->ic_flags & IEEE80211_F_SHSLOT)
6016 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
6017 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6018 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
6019 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
6020 if (ic->ic_opmode != IEEE80211_M_STA &&
6021 ic->ic_opmode != IEEE80211_M_IBSS)
6022 sc->rxon.filter |= htole32(IWN_FILTER_BEACON);
6023 DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
6024 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
6025 if (error != 0) {
6026 dev_err(sc->sc_dip, CE_WARN,
6027 "!could not update configuration");
6028 return error;
6029 }
6030
6031 /* Configuration has changed, set TX power accordingly. */
6032 if ((error = ops->set_txpower(sc, 1)) != 0) {
6033 dev_err(sc->sc_dip, CE_WARN,
6034 "!could not set TX power");
6035 return error;
6036 }
6037
6038 /* Fake a join to initialize the TX rate. */
6039 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
6040 iwn_newassoc(ni, 1);
6041
6042 /* Add BSS node. */
6043 memset(&node, 0, sizeof node);
6044 IEEE80211_ADDR_COPY(node.macaddr, ni->in_macaddr);
6045 node.id = IWN_ID_BSS;
6046 #ifdef notyet
6047 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
6048 IWN_AMDPU_DENSITY(5)); /* 2us */
6049 #endif
6050 error = ops->add_node(sc, &node, 1);
6051 if (error != 0) {
6052 dev_err(sc->sc_dip, CE_WARN,
6053 "!could not add BSS node");
6054 return error;
6055 }
6056 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
6057 dev_err(sc->sc_dip, CE_WARN,
6058 "!could not setup link quality for node %d", node.id);
6059 return error;
6060 }
6061
6062 if ((error = iwn_init_sensitivity(sc)) != 0) {
6063 dev_err(sc->sc_dip, CE_WARN,
6064 "!could not set sensitivity");
6065 return error;
6066 }
6067
6068 if ((error = iwn_qosparam_to_hw(sc, 1)) != 0) {
6069 dev_err(sc->sc_dip, CE_WARN,
6070 "!could not set QoS params");
6071 return (error);
6072 }
6073
6074 /* Start periodic calibration timer. */
6075 sc->sc_flags &= ~IWN_FLAG_STOP_CALIB_TO;
6076 sc->calib.state = IWN_CALIB_STATE_ASSOC;
6077 sc->calib_cnt = 0;
6078 sc->calib_to = timeout(iwn_calib_timeout, sc, drv_usectohz(500000));
6079
6080 /* Link LED always on while associated. */
6081 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6082 return 0;
6083 }
6084
6085 #ifdef IWN_HWCRYPTO
6086 /*
6087 * We support CCMP hardware encryption/decryption of unicast frames only.
6088 * HW support for TKIP really sucks. We should let TKIP die anyway.
6089 */
6090 static int
iwn_set_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)6091 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6092 struct ieee80211_key *k)
6093 {
6094 struct iwn_softc *sc = ic->ic_softc;
6095 struct iwn_ops *ops = &sc->ops;
6096 struct iwn_node *wn = (void *)ni;
6097 struct iwn_node_info node;
6098 uint16_t kflags;
6099
6100 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6101 k->k_cipher != IEEE80211_CIPHER_CCMP)
6102 return ieee80211_set_key(ic, ni, k);
6103
6104 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
6105 if (k->k_flags & IEEE80211_KEY_GROUP)
6106 kflags |= IWN_KFLAG_GROUP;
6107
6108 memset(&node, 0, sizeof node);
6109 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
6110 sc->broadcast_id : wn->id;
6111 node.control = IWN_NODE_UPDATE;
6112 node.flags = IWN_FLAG_SET_KEY;
6113 node.kflags = htole16(kflags);
6114 node.kid = k->k_id;
6115 memcpy(node.key, k->k_key, k->k_len);
6116 DTRACE_PROBE2(set__key, int, k->k_id, int, node.id);
6117 return ops->add_node(sc, &node, 1);
6118 }
6119
6120 static void
iwn_delete_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)6121 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6122 struct ieee80211_key *k)
6123 {
6124 struct iwn_softc *sc = ic->ic_softc;
6125 struct iwn_ops *ops = &sc->ops;
6126 struct iwn_node *wn = (void *)ni;
6127 struct iwn_node_info node;
6128
6129 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6130 k->k_cipher != IEEE80211_CIPHER_CCMP) {
6131 /* See comment about other ciphers above. */
6132 ieee80211_delete_key(ic, ni, k);
6133 return;
6134 }
6135 if (ic->ic_state != IEEE80211_S_RUN)
6136 return; /* Nothing to do. */
6137 memset(&node, 0, sizeof node);
6138 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
6139 sc->broadcast_id : wn->id;
6140 node.control = IWN_NODE_UPDATE;
6141 node.flags = IWN_FLAG_SET_KEY;
6142 node.kflags = htole16(IWN_KFLAG_INVALID);
6143 node.kid = 0xff;
6144 DTRACE_PROBE1(del__key, int, node.id);
6145 (void)ops->add_node(sc, &node, 1);
6146 }
6147 #endif
6148
6149 #ifndef IEEE80211_NO_HT
6150 /*
6151 * This function is called by upper layer when an ADDBA request is received
6152 * from another STA and before the ADDBA response is sent.
6153 */
6154 static int
iwn_ampdu_rx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)6155 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6156 uint8_t tid)
6157 {
6158 struct ieee80211_rx_ba *ba = &ni->in_rx_ba[tid];
6159 struct iwn_softc *sc = ic->ic_softc;
6160 struct iwn_ops *ops = &sc->ops;
6161 struct iwn_node *wn = (void *)ni;
6162 struct iwn_node_info node;
6163
6164 memset(&node, 0, sizeof node);
6165 node.id = wn->id;
6166 node.control = IWN_NODE_UPDATE;
6167 node.flags = IWN_FLAG_SET_ADDBA;
6168 node.addba_tid = tid;
6169 node.addba_ssn = htole16(ba->ba_winstart);
6170 DTRACE_PROBE3(addba, uint8_t, wn->id, uint8_t, tid, int, ba->ba_winstart);
6171 return ops->add_node(sc, &node, 1);
6172 }
6173
6174 /*
6175 * This function is called by upper layer on teardown of an HT-immediate
6176 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
6177 */
6178 static void
iwn_ampdu_rx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)6179 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
6180 uint8_t tid)
6181 {
6182 struct iwn_softc *sc = ic->ic_softc;
6183 struct iwn_ops *ops = &sc->ops;
6184 struct iwn_node *wn = (void *)ni;
6185 struct iwn_node_info node;
6186
6187 memset(&node, 0, sizeof node);
6188 node.id = wn->id;
6189 node.control = IWN_NODE_UPDATE;
6190 node.flags = IWN_FLAG_SET_DELBA;
6191 node.delba_tid = tid;
6192 DTRACE_PROBE2(delba, uint8_t, wn->id, uint8_t, tid);
6193 (void)ops->add_node(sc, &node, 1);
6194 }
6195
6196 /*
6197 * This function is called by upper layer when an ADDBA response is received
6198 * from another STA.
6199 */
6200 static int
iwn_ampdu_tx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)6201 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6202 uint8_t tid)
6203 {
6204 struct ieee80211_tx_ba *ba = &ni->in_tx_ba[tid];
6205 struct iwn_softc *sc = ic->ic_softc;
6206 struct iwn_ops *ops = &sc->ops;
6207 struct iwn_node *wn = (void *)ni;
6208 struct iwn_node_info node;
6209 int error;
6210
6211 /* Enable TX for the specified RA/TID. */
6212 wn->disable_tid &= ~(1 << tid);
6213 memset(&node, 0, sizeof node);
6214 node.id = wn->id;
6215 node.control = IWN_NODE_UPDATE;
6216 node.flags = IWN_FLAG_SET_DISABLE_TID;
6217 node.disable_tid = htole16(wn->disable_tid);
6218 error = ops->add_node(sc, &node, 1);
6219 if (error != 0)
6220 return error;
6221
6222 if ((error = iwn_nic_lock(sc)) != 0)
6223 return error;
6224 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
6225 iwn_nic_unlock(sc);
6226 return 0;
6227 }
6228
6229 static void
iwn_ampdu_tx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)6230 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
6231 uint8_t tid)
6232 {
6233 struct ieee80211_tx_ba *ba = &ni->in_tx_ba[tid];
6234 struct iwn_softc *sc = ic->ic_softc;
6235 struct iwn_ops *ops = &sc->ops;
6236
6237 if (iwn_nic_lock(sc) != 0)
6238 return;
6239 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
6240 iwn_nic_unlock(sc);
6241 }
6242
6243 static void
iwn4965_ampdu_tx_start(struct iwn_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn)6244 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6245 uint8_t tid, uint16_t ssn)
6246 {
6247 struct iwn_node *wn = (void *)ni;
6248 int qid = 7 + tid;
6249
6250 /* Stop TX scheduler while we're changing its configuration. */
6251 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6252 IWN4965_TXQ_STATUS_CHGACT);
6253
6254 /* Assign RA/TID translation to the queue. */
6255 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
6256 wn->id << 4 | tid);
6257
6258 /* Enable chain-building mode for the queue. */
6259 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
6260
6261 /* Set starting sequence number from the ADDBA request. */
6262 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6263 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6264
6265 /* Set scheduler window size. */
6266 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
6267 IWN_SCHED_WINSZ);
6268 /* Set scheduler frame limit. */
6269 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6270 IWN_SCHED_LIMIT << 16);
6271
6272 /* Enable interrupts for the queue. */
6273 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6274
6275 /* Mark the queue as active. */
6276 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6277 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
6278 iwn_tid2fifo[tid] << 1);
6279 }
6280
6281 static void
iwn4965_ampdu_tx_stop(struct iwn_softc * sc,uint8_t tid,uint16_t ssn)6282 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6283 {
6284 int qid = 7 + tid;
6285
6286 /* Stop TX scheduler while we're changing its configuration. */
6287 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6288 IWN4965_TXQ_STATUS_CHGACT);
6289
6290 /* Set starting sequence number from the ADDBA request. */
6291 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6292 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6293
6294 /* Disable interrupts for the queue. */
6295 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6296
6297 /* Mark the queue as inactive. */
6298 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6299 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
6300 }
6301
6302 static void
iwn5000_ampdu_tx_start(struct iwn_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn)6303 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6304 uint8_t tid, uint16_t ssn)
6305 {
6306 struct iwn_node *wn = (void *)ni;
6307 int qid = 10 + tid;
6308
6309 /* Stop TX scheduler while we're changing its configuration. */
6310 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6311 IWN5000_TXQ_STATUS_CHGACT);
6312
6313 /* Assign RA/TID translation to the queue. */
6314 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
6315 wn->id << 4 | tid);
6316
6317 /* Enable chain-building mode for the queue. */
6318 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
6319
6320 /* Enable aggregation for the queue. */
6321 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6322
6323 /* Set starting sequence number from the ADDBA request. */
6324 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6325 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6326
6327 /* Set scheduler window size and frame limit. */
6328 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6329 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6330
6331 /* Enable interrupts for the queue. */
6332 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6333
6334 /* Mark the queue as active. */
6335 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6336 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6337 }
6338
6339 static void
iwn5000_ampdu_tx_stop(struct iwn_softc * sc,uint8_t tid,uint16_t ssn)6340 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6341 {
6342 int qid = 10 + tid;
6343
6344 /* Stop TX scheduler while we're changing its configuration. */
6345 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6346 IWN5000_TXQ_STATUS_CHGACT);
6347
6348 /* Disable aggregation for the queue. */
6349 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6350
6351 /* Set starting sequence number from the ADDBA request. */
6352 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6353 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6354
6355 /* Disable interrupts for the queue. */
6356 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6357
6358 /* Mark the queue as inactive. */
6359 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6360 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6361 }
6362 #endif /* !IEEE80211_NO_HT */
6363
6364 /*
6365 * Query calibration tables from the initialization firmware. We do this
6366 * only once at first boot. Called from a process context.
6367 */
6368 static int
iwn5000_query_calibration(struct iwn_softc * sc)6369 iwn5000_query_calibration(struct iwn_softc *sc)
6370 {
6371 struct iwn5000_calib_config cmd;
6372 int error;
6373 clock_t clk;
6374
6375 ASSERT(mutex_owned(&sc->sc_mtx));
6376
6377 memset(&cmd, 0, sizeof cmd);
6378 cmd.ucode.once.enable = 0xffffffff;
6379 cmd.ucode.once.start = 0xffffffff;
6380 cmd.ucode.once.send = 0xffffffff;
6381 cmd.ucode.flags = 0xffffffff;
6382 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6383 if (error != 0)
6384 return error;
6385
6386 /* Wait at most two seconds for calibration to complete. */
6387 clk = ddi_get_lbolt() + drv_usectohz(2000000);
6388 while (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6389 if (cv_timedwait(&sc->sc_calib_cv, &sc->sc_mtx, clk) < 0)
6390 return (IWN_FAIL);
6391
6392 return (IWN_SUCCESS);
6393 }
6394
6395 /*
6396 * Send calibration results to the runtime firmware. These results were
6397 * obtained on first boot from the initialization firmware.
6398 */
6399 static int
iwn5000_send_calibration(struct iwn_softc * sc)6400 iwn5000_send_calibration(struct iwn_softc *sc)
6401 {
6402 int idx, error;
6403
6404 for (idx = 0; idx < 5; idx++) {
6405 if (sc->calibcmd[idx].buf == NULL)
6406 continue; /* No results available. */
6407 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6408 sc->calibcmd[idx].len, 0);
6409 if (error != 0) {
6410 dev_err(sc->sc_dip, CE_WARN,
6411 "!could not send calibration result");
6412 return error;
6413 }
6414 }
6415 return 0;
6416 }
6417
6418 static int
iwn5000_send_wimax_coex(struct iwn_softc * sc)6419 iwn5000_send_wimax_coex(struct iwn_softc *sc)
6420 {
6421 struct iwn5000_wimax_coex wimax;
6422
6423 #ifdef notyet
6424 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6425 /* Enable WiMAX coexistence for combo adapters. */
6426 wimax.flags =
6427 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6428 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6429 IWN_WIMAX_COEX_STA_TABLE_VALID |
6430 IWN_WIMAX_COEX_ENABLE;
6431 memcpy(wimax.events, iwn6050_wimax_events,
6432 sizeof iwn6050_wimax_events);
6433 } else
6434 #endif
6435 {
6436 /* Disable WiMAX coexistence. */
6437 wimax.flags = 0;
6438 memset(wimax.events, 0, sizeof wimax.events);
6439 }
6440 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6441 }
6442
6443 static int
iwn6000_temp_offset_calib(struct iwn_softc * sc)6444 iwn6000_temp_offset_calib(struct iwn_softc *sc)
6445 {
6446 struct iwn6000_phy_calib_temp_offset cmd;
6447
6448 memset(&cmd, 0, sizeof cmd);
6449 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET;
6450 cmd.ngroups = 1;
6451 cmd.isvalid = 1;
6452 if (sc->eeprom_temp != 0)
6453 cmd.offset = htole16(sc->eeprom_temp);
6454 else
6455 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6456 sc->sc_toff.t6000->toff.value.l = le16toh(cmd.offset);
6457 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6458 }
6459
6460 static int
iwn2000_temp_offset_calib(struct iwn_softc * sc)6461 iwn2000_temp_offset_calib(struct iwn_softc *sc)
6462 {
6463 struct iwn2000_phy_calib_temp_offset cmd;
6464
6465 memset(&cmd, 0, sizeof cmd);
6466 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET;
6467 cmd.ngroups = 1;
6468 cmd.isvalid = 1;
6469 if (sc->eeprom_rawtemp != 0) {
6470 cmd.offset_low = htole16(sc->eeprom_rawtemp);
6471 cmd.offset_high = htole16(sc->eeprom_temp);
6472 } else {
6473 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
6474 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
6475 }
6476 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
6477 sc->sc_toff.t2000->toff_lo.value.l = le16toh(cmd.offset_low);
6478 sc->sc_toff.t2000->toff_hi.value.l = le16toh(cmd.offset_high);
6479 sc->sc_toff.t2000->volt.value.l = le16toh(cmd.burnt_voltage_ref);
6480
6481 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6482 }
6483
6484 /*
6485 * This function is called after the runtime firmware notifies us of its
6486 * readiness (called in a process context).
6487 */
6488 static int
iwn4965_post_alive(struct iwn_softc * sc)6489 iwn4965_post_alive(struct iwn_softc *sc)
6490 {
6491 int error, qid;
6492
6493 if ((error = iwn_nic_lock(sc)) != 0)
6494 return error;
6495
6496 /* Clear TX scheduler state in SRAM. */
6497 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6498 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6499 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6500
6501 /* Set physical address of TX scheduler rings (1KB aligned). */
6502 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6503
6504 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6505
6506 /* Disable chain mode for all our 16 queues. */
6507 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6508
6509 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6510 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6511 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6512
6513 /* Set scheduler window size. */
6514 iwn_mem_write(sc, sc->sched_base +
6515 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6516 /* Set scheduler frame limit. */
6517 iwn_mem_write(sc, sc->sched_base +
6518 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6519 IWN_SCHED_LIMIT << 16);
6520 }
6521
6522 /* Enable interrupts for all our 16 queues. */
6523 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6524 /* Identify TX FIFO rings (0-7). */
6525 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6526
6527 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6528 for (qid = 0; qid < 7; qid++) {
6529 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6530 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6531 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6532 }
6533 iwn_nic_unlock(sc);
6534 return 0;
6535 }
6536
6537 /*
6538 * This function is called after the initialization or runtime firmware
6539 * notifies us of its readiness (called in a process context).
6540 */
6541 static int
iwn5000_post_alive(struct iwn_softc * sc)6542 iwn5000_post_alive(struct iwn_softc *sc)
6543 {
6544 int error, qid;
6545
6546 /* Switch to using ICT interrupt mode. */
6547 iwn5000_ict_reset(sc);
6548
6549 if ((error = iwn_nic_lock(sc)) != 0)
6550 return error;
6551
6552 /* Clear TX scheduler state in SRAM. */
6553 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6554 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6555 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6556
6557 /* Set physical address of TX scheduler rings (1KB aligned). */
6558 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6559
6560 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6561
6562 /* Enable chain mode for all queues, except command queue. */
6563 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6564 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6565
6566 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6567 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6568 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6569
6570 iwn_mem_write(sc, sc->sched_base +
6571 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6572 /* Set scheduler window size and frame limit. */
6573 iwn_mem_write(sc, sc->sched_base +
6574 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6575 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6576 }
6577
6578 /* Enable interrupts for all our 20 queues. */
6579 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6580 /* Identify TX FIFO rings (0-7). */
6581 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6582
6583 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6584 for (qid = 0; qid < 7; qid++) {
6585 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6586 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6587 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6588 }
6589 iwn_nic_unlock(sc);
6590
6591 /* Configure WiMAX coexistence for combo adapters. */
6592 error = iwn5000_send_wimax_coex(sc);
6593 if (error != 0) {
6594 dev_err(sc->sc_dip, CE_WARN,
6595 "!could not configure WiMAX coexistence");
6596 return error;
6597 }
6598 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6599 struct iwn5000_phy_calib_crystal cmd;
6600
6601 /* Perform crystal calibration. */
6602 memset(&cmd, 0, sizeof cmd);
6603 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6604 cmd.ngroups = 1;
6605 cmd.isvalid = 1;
6606 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
6607 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
6608 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6609 if (error != 0) {
6610 dev_err(sc->sc_dip, CE_WARN,
6611 "!crystal calibration failed");
6612 return error;
6613 }
6614 }
6615 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6616 /* Query calibration from the initialization firmware. */
6617 if ((error = iwn5000_query_calibration(sc)) != 0) {
6618 dev_err(sc->sc_dip, CE_WARN,
6619 "!could not query calibration");
6620 return error;
6621 }
6622 /*
6623 * We have the calibration results now, reboot with the
6624 * runtime firmware (call ourselves recursively!)
6625 */
6626 iwn_hw_stop(sc, B_FALSE);
6627 error = iwn_hw_init(sc);
6628 } else {
6629 /* Send calibration results to runtime firmware. */
6630 error = iwn5000_send_calibration(sc);
6631 }
6632 return error;
6633 }
6634
6635 /*
6636 * The firmware boot code is small and is intended to be copied directy into
6637 * the NIC internal memory (no DMA transfer).
6638 */
6639 static int
iwn4965_load_bootcode(struct iwn_softc * sc,const uint8_t * ucode,int size)6640 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6641 {
6642 int error, ntries;
6643
6644 size /= sizeof (uint32_t);
6645
6646 if ((error = iwn_nic_lock(sc)) != 0)
6647 return error;
6648
6649 /* Copy microcode image into NIC memory. */
6650 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6651 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
6652 (const uint32_t *)ucode, size);
6653
6654 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6655 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6656 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6657
6658 /* Start boot load now. */
6659 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6660
6661 /* Wait for transfer to complete. */
6662 for (ntries = 0; ntries < 1000; ntries++) {
6663 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6664 IWN_BSM_WR_CTRL_START))
6665 break;
6666 DELAY(10);
6667 }
6668 if (ntries == 1000) {
6669 dev_err(sc->sc_dip, CE_WARN,
6670 "!could not load boot firmware");
6671 iwn_nic_unlock(sc);
6672 return ETIMEDOUT;
6673 }
6674
6675 /* Enable boot after power up. */
6676 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6677
6678 iwn_nic_unlock(sc);
6679 return 0;
6680 }
6681
6682 static int
iwn4965_load_firmware(struct iwn_softc * sc)6683 iwn4965_load_firmware(struct iwn_softc *sc)
6684 {
6685 struct iwn_fw_info *fw = &sc->fw;
6686 struct iwn_dma_info *dma = &sc->fw_dma;
6687 int error;
6688 clock_t clk;
6689
6690 ASSERT(mutex_owned(&sc->sc_mtx));
6691
6692 /* Copy initialization sections into pre-allocated DMA-safe memory. */
6693 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6694 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6695 fw->init.text, fw->init.textsz);
6696 (void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6697
6698 /* Tell adapter where to find initialization sections. */
6699 if ((error = iwn_nic_lock(sc)) != 0)
6700 return error;
6701 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6702 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6703 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6704 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6705 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6706 iwn_nic_unlock(sc);
6707
6708 /* Load firmware boot code. */
6709 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6710 if (error != 0) {
6711 dev_err(sc->sc_dip, CE_WARN,
6712 "!could not load boot firmware");
6713 return error;
6714 }
6715 /* Now press "execute". */
6716 IWN_WRITE(sc, IWN_RESET, 0);
6717
6718 /* Wait at most one second for first alive notification. */
6719 clk = ddi_get_lbolt() + drv_usectohz(1000000);
6720 while ((sc->sc_flags & IWN_FLAG_FW_ALIVE) == 0) {
6721 if (cv_timedwait(&sc->sc_alive_cv, &sc->sc_mtx, clk) < 0) {
6722 dev_err(sc->sc_dip, CE_WARN,
6723 "!timeout waiting for adapter to initialize");
6724 return (IWN_FAIL);
6725 }
6726 }
6727
6728 /* Retrieve current temperature for initial TX power calibration. */
6729 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6730 sc->temp = iwn4965_get_temperature(sc);
6731 sc->sc_misc->temp.value.ul = sc->temp;
6732
6733 /* Copy runtime sections into pre-allocated DMA-safe memory. */
6734 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6735 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6736 fw->main.text, fw->main.textsz);
6737 (void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6738
6739 /* Tell adapter where to find runtime sections. */
6740 if ((error = iwn_nic_lock(sc)) != 0)
6741 return error;
6742 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6743 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6744 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6745 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6746 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6747 IWN_FW_UPDATED | fw->main.textsz);
6748 iwn_nic_unlock(sc);
6749
6750 return 0;
6751 }
6752
6753 static int
iwn5000_load_firmware_section(struct iwn_softc * sc,uint32_t dst,const uint8_t * section,int size)6754 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6755 const uint8_t *section, int size)
6756 {
6757 struct iwn_dma_info *dma = &sc->fw_dma;
6758 int error;
6759 clock_t clk;
6760
6761 ASSERT(mutex_owned(&sc->sc_mtx));
6762
6763 /* Copy firmware section into pre-allocated DMA-safe memory. */
6764 memcpy(dma->vaddr, section, size);
6765 (void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6766
6767 if ((error = iwn_nic_lock(sc)) != 0)
6768 return error;
6769
6770 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6771 IWN_FH_TX_CONFIG_DMA_PAUSE);
6772
6773 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6774 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6775 IWN_LOADDR(dma->paddr));
6776 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6777 IWN_HIADDR(dma->paddr) << 28 | size);
6778 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6779 IWN_FH_TXBUF_STATUS_TBNUM(1) |
6780 IWN_FH_TXBUF_STATUS_TBIDX(1) |
6781 IWN_FH_TXBUF_STATUS_TFBD_VALID);
6782
6783 /* Kick Flow Handler to start DMA transfer. */
6784 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6785 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6786
6787 iwn_nic_unlock(sc);
6788
6789 /* Wait at most five seconds for FH DMA transfer to complete. */
6790 clk = ddi_get_lbolt() + drv_usectohz(5000000);
6791 while ((sc->sc_flags & IWN_FLAG_FW_DMA) == 0) {
6792 if (cv_timedwait(&sc->sc_fhdma_cv, &sc->sc_mtx, clk) < 0)
6793 return (IWN_FAIL);
6794 }
6795 sc->sc_flags &= ~IWN_FLAG_FW_DMA;
6796
6797 return (IWN_SUCCESS);
6798 }
6799
6800 static int
iwn5000_load_firmware(struct iwn_softc * sc)6801 iwn5000_load_firmware(struct iwn_softc *sc)
6802 {
6803 struct iwn_fw_part *fw;
6804 int error;
6805
6806 /* Load the initialization firmware on first boot only. */
6807 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6808 &sc->fw.main : &sc->fw.init;
6809
6810 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6811 fw->text, fw->textsz);
6812 if (error != 0) {
6813 dev_err(sc->sc_dip, CE_WARN,
6814 "!could not load firmware %s section", ".text");
6815 return error;
6816 }
6817 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6818 fw->data, fw->datasz);
6819 if (error != 0) {
6820 dev_err(sc->sc_dip, CE_WARN,
6821 "!could not load firmware %s section", ".data");
6822 return error;
6823 }
6824
6825 /* Now press "execute". */
6826 IWN_WRITE(sc, IWN_RESET, 0);
6827 return 0;
6828 }
6829
6830 /*
6831 * Extract text and data sections from a legacy firmware image.
6832 */
6833 static int
iwn_read_firmware_leg(struct iwn_softc * sc,struct iwn_fw_info * fw)6834 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6835 {
6836 _NOTE(ARGUNUSED(sc));
6837 const uint32_t *ptr;
6838 size_t hdrlen = 24;
6839 uint32_t rev;
6840
6841 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
6842 ptr = (const uint32_t *)fw->data;
6843 rev = le32toh(*ptr++);
6844
6845 /* Check firmware API version. */
6846 if (IWN_FW_API(rev) <= 1) {
6847 dev_err(sc->sc_dip, CE_WARN,
6848 "!bad firmware, need API version >=2");
6849 return EINVAL;
6850 }
6851 if (IWN_FW_API(rev) >= 3) {
6852 /* Skip build number (version 2 header). */
6853 hdrlen += 4;
6854 ptr++;
6855 }
6856 if (fw->size < hdrlen) {
6857 dev_err(sc->sc_dip, CE_WARN,
6858 "!firmware too short: %lld bytes", (longlong_t)fw->size);
6859 return EINVAL;
6860 }
6861 fw->main.textsz = le32toh(*ptr++);
6862 fw->main.datasz = le32toh(*ptr++);
6863 fw->init.textsz = le32toh(*ptr++);
6864 fw->init.datasz = le32toh(*ptr++);
6865 fw->boot.textsz = le32toh(*ptr++);
6866
6867 /* Check that all firmware sections fit. */
6868 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6869 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6870 dev_err(sc->sc_dip, CE_WARN,
6871 "!firmware too short: %lld bytes", (longlong_t)fw->size);
6872 return EINVAL;
6873 }
6874
6875 /* Get pointers to firmware sections. */
6876 fw->main.text = (const uint8_t *)ptr;
6877 fw->main.data = fw->main.text + fw->main.textsz;
6878 fw->init.text = fw->main.data + fw->main.datasz;
6879 fw->init.data = fw->init.text + fw->init.textsz;
6880 fw->boot.text = fw->init.data + fw->init.datasz;
6881 return 0;
6882 }
6883
6884 /*
6885 * Extract text and data sections from a TLV firmware image.
6886 */
6887 static int
iwn_read_firmware_tlv(struct iwn_softc * sc,struct iwn_fw_info * fw,uint16_t alt)6888 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6889 uint16_t alt)
6890 {
6891 _NOTE(ARGUNUSED(sc));
6892 const struct iwn_fw_tlv_hdr *hdr;
6893 const struct iwn_fw_tlv *tlv;
6894 const uint8_t *ptr, *end;
6895 uint64_t altmask;
6896 uint32_t len;
6897
6898 if (fw->size < sizeof (*hdr)) {
6899 dev_err(sc->sc_dip, CE_WARN,
6900 "!firmware too short: %lld bytes", (longlong_t)fw->size);
6901 return EINVAL;
6902 }
6903 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6904 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6905 dev_err(sc->sc_dip, CE_WARN,
6906 "!bad firmware signature 0x%08x", le32toh(hdr->signature));
6907 return EINVAL;
6908 }
6909
6910 /*
6911 * Select the closest supported alternative that is less than
6912 * or equal to the specified one.
6913 */
6914 altmask = le64toh(hdr->altmask);
6915 while (alt > 0 && !(altmask & (1ULL << alt)))
6916 alt--; /* Downgrade. */
6917 IWN_DBG("using alternative %d", alt);
6918
6919 ptr = (const uint8_t *)(hdr + 1);
6920 end = (const uint8_t *)(fw->data + fw->size);
6921
6922 /* Parse type-length-value fields. */
6923 while (ptr + sizeof (*tlv) <= end) {
6924 tlv = (const struct iwn_fw_tlv *)ptr;
6925 len = le32toh(tlv->len);
6926
6927 ptr += sizeof (*tlv);
6928 if (ptr + len > end) {
6929 dev_err(sc->sc_dip, CE_WARN,
6930 "!firmware too short: %lld bytes",
6931 (longlong_t)fw->size);
6932 return EINVAL;
6933 }
6934 /* Skip other alternatives. */
6935 if (tlv->alt != 0 && le16toh(tlv->alt) != alt) {
6936 IWN_DBG("skipping other alternative");
6937 goto next;
6938 }
6939
6940 switch (le16toh(tlv->type)) {
6941 case IWN_FW_TLV_MAIN_TEXT:
6942 fw->main.text = ptr;
6943 fw->main.textsz = len;
6944 break;
6945 case IWN_FW_TLV_MAIN_DATA:
6946 fw->main.data = ptr;
6947 fw->main.datasz = len;
6948 break;
6949 case IWN_FW_TLV_INIT_TEXT:
6950 fw->init.text = ptr;
6951 fw->init.textsz = len;
6952 break;
6953 case IWN_FW_TLV_INIT_DATA:
6954 fw->init.data = ptr;
6955 fw->init.datasz = len;
6956 break;
6957 case IWN_FW_TLV_BOOT_TEXT:
6958 fw->boot.text = ptr;
6959 fw->boot.textsz = len;
6960 break;
6961 case IWN_FW_TLV_ENH_SENS:
6962 if (len != 0) {
6963 dev_err(sc->sc_dip, CE_WARN,
6964 "!TLV type %d has invalid size %u",
6965 le16toh(tlv->type), len);
6966 goto next;
6967 }
6968 sc->sc_flags |= IWN_FLAG_ENH_SENS;
6969 break;
6970 case IWN_FW_TLV_PHY_CALIB:
6971 if (len != sizeof(uint32_t)) {
6972 dev_err(sc->sc_dip, CE_WARN,
6973 "!TLV type %d has invalid size %u",
6974 le16toh(tlv->type), len);
6975 goto next;
6976 }
6977 if (le32toh(*ptr) <= IWN5000_PHY_CALIB_MAX) {
6978 sc->reset_noise_gain = le32toh(*ptr);
6979 sc->noise_gain = le32toh(*ptr) + 1;
6980 }
6981 break;
6982 case IWN_FW_TLV_FLAGS:
6983 if (len < sizeof(uint32_t))
6984 break;
6985 if (len % sizeof(uint32_t))
6986 break;
6987 sc->tlv_feature_flags = le32toh(*ptr);
6988 IWN_DBG("feature: 0x%08x", sc->tlv_feature_flags);
6989 break;
6990 default:
6991 IWN_DBG("TLV type %d not handled", le16toh(tlv->type));
6992 break;
6993 }
6994 next: /* TLV fields are 32-bit aligned. */
6995 ptr += (len + 3) & ~3;
6996 }
6997 return 0;
6998 }
6999
7000 static int
iwn_read_firmware(struct iwn_softc * sc)7001 iwn_read_firmware(struct iwn_softc *sc)
7002 {
7003 struct iwn_fw_info *fw = &sc->fw;
7004 firmware_handle_t fwh;
7005 int error;
7006
7007 /*
7008 * Some PHY calibration commands are firmware-dependent; these
7009 * are the default values that will be overridden if
7010 * necessary.
7011 */
7012 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
7013 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
7014
7015 /* Initialize for error returns */
7016 fw->data = NULL;
7017 fw->size = 0;
7018
7019 /* Open firmware image. */
7020 if ((error = firmware_open("iwn", sc->fwname, &fwh)) != 0) {
7021 dev_err(sc->sc_dip, CE_WARN,
7022 "!could not get firmware handle %s", sc->fwname);
7023 return error;
7024 }
7025 fw->size = firmware_get_size(fwh);
7026 if (fw->size < sizeof (uint32_t)) {
7027 dev_err(sc->sc_dip, CE_WARN,
7028 "!firmware too short: %lld bytes", (longlong_t)fw->size);
7029 (void) firmware_close(fwh);
7030 return EINVAL;
7031 }
7032
7033 /* Read the firmware. */
7034 fw->data = kmem_alloc(fw->size, KM_SLEEP);
7035 error = firmware_read(fwh, 0, fw->data, fw->size);
7036 (void) firmware_close(fwh);
7037 if (error != 0) {
7038 dev_err(sc->sc_dip, CE_WARN,
7039 "!could not read firmware %s", sc->fwname);
7040 goto out;
7041 }
7042
7043 /* Retrieve text and data sections. */
7044 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
7045 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
7046 error = iwn_read_firmware_leg(sc, fw);
7047 else
7048 error = iwn_read_firmware_tlv(sc, fw, 1);
7049 if (error != 0) {
7050 dev_err(sc->sc_dip, CE_WARN,
7051 "!could not read firmware sections");
7052 goto out;
7053 }
7054
7055 /* Make sure text and data sections fit in hardware memory. */
7056 if (fw->main.textsz > sc->fw_text_maxsz ||
7057 fw->main.datasz > sc->fw_data_maxsz ||
7058 fw->init.textsz > sc->fw_text_maxsz ||
7059 fw->init.datasz > sc->fw_data_maxsz ||
7060 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
7061 (fw->boot.textsz & 3) != 0) {
7062 dev_err(sc->sc_dip, CE_WARN,
7063 "!firmware sections too large");
7064 goto out;
7065 }
7066
7067 /* We can proceed with loading the firmware. */
7068 return 0;
7069 out:
7070 kmem_free(fw->data, fw->size);
7071 fw->data = NULL;
7072 fw->size = 0;
7073 return error ? error : EINVAL;
7074 }
7075
7076 static int
iwn_clock_wait(struct iwn_softc * sc)7077 iwn_clock_wait(struct iwn_softc *sc)
7078 {
7079 int ntries;
7080
7081 /* Set "initialization complete" bit. */
7082 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
7083
7084 /* Wait for clock stabilization. */
7085 for (ntries = 0; ntries < 2500; ntries++) {
7086 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
7087 return 0;
7088 DELAY(10);
7089 }
7090 dev_err(sc->sc_dip, CE_WARN,
7091 "!timeout waiting for clock stabilization");
7092 return ETIMEDOUT;
7093 }
7094
7095 static int
iwn_apm_init(struct iwn_softc * sc)7096 iwn_apm_init(struct iwn_softc *sc)
7097 {
7098 uint32_t reg;
7099 int error;
7100
7101 /* Disable L0s exit timer (NMI bug workaround). */
7102 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
7103 /* Don't wait for ICH L0s (ICH bug workaround). */
7104 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
7105
7106 /* Set FH wait threshold to max (HW bug under stress workaround). */
7107 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
7108
7109 /* Enable HAP INTA to move adapter from L1a to L0s. */
7110 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
7111
7112 /* Retrieve PCIe Active State Power Management (ASPM). */
7113 reg = pci_config_get32(sc->sc_pcih,
7114 sc->sc_cap_off + PCIE_LINKCTL);
7115 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
7116 if (reg & PCIE_LINKCTL_ASPM_CTL_L1) /* L1 Entry enabled. */
7117 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
7118 else
7119 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
7120
7121 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
7122 sc->hw_type <= IWN_HW_REV_TYPE_1000)
7123 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
7124
7125 /* Wait for clock stabilization before accessing prph. */
7126 if ((error = iwn_clock_wait(sc)) != 0)
7127 return error;
7128
7129 if ((error = iwn_nic_lock(sc)) != 0)
7130 return error;
7131 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
7132 /* Enable DMA and BSM (Bootstrap State Machine). */
7133 iwn_prph_write(sc, IWN_APMG_CLK_EN,
7134 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
7135 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
7136 } else {
7137 /* Enable DMA. */
7138 iwn_prph_write(sc, IWN_APMG_CLK_EN,
7139 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7140 }
7141 DELAY(20);
7142 /* Disable L1-Active. */
7143 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
7144 iwn_nic_unlock(sc);
7145
7146 return 0;
7147 }
7148
7149 static void
iwn_apm_stop_master(struct iwn_softc * sc)7150 iwn_apm_stop_master(struct iwn_softc *sc)
7151 {
7152 int ntries;
7153
7154 /* Stop busmaster DMA activity. */
7155 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
7156 for (ntries = 0; ntries < 100; ntries++) {
7157 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
7158 return;
7159 DELAY(10);
7160 }
7161 dev_err(sc->sc_dip, CE_WARN,
7162 "!timeout waiting for master");
7163 }
7164
7165 static void
iwn_apm_stop(struct iwn_softc * sc)7166 iwn_apm_stop(struct iwn_softc *sc)
7167 {
7168 iwn_apm_stop_master(sc);
7169
7170 /* Reset the entire device. */
7171 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
7172 DELAY(10);
7173 /* Clear "initialization complete" bit. */
7174 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
7175 }
7176
7177 static int
iwn4965_nic_config(struct iwn_softc * sc)7178 iwn4965_nic_config(struct iwn_softc *sc)
7179 {
7180 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
7181 /*
7182 * I don't believe this to be correct but this is what the
7183 * vendor driver is doing. Probably the bits should not be
7184 * shifted in IWN_RFCFG_*.
7185 */
7186 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7187 IWN_RFCFG_TYPE(sc->rfcfg) |
7188 IWN_RFCFG_STEP(sc->rfcfg) |
7189 IWN_RFCFG_DASH(sc->rfcfg));
7190 }
7191 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7192 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7193 return 0;
7194 }
7195
7196 static int
iwn5000_nic_config(struct iwn_softc * sc)7197 iwn5000_nic_config(struct iwn_softc *sc)
7198 {
7199 uint32_t tmp;
7200 int error;
7201
7202 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
7203 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7204 IWN_RFCFG_TYPE(sc->rfcfg) |
7205 IWN_RFCFG_STEP(sc->rfcfg) |
7206 IWN_RFCFG_DASH(sc->rfcfg));
7207 }
7208 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7209 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7210
7211 if ((error = iwn_nic_lock(sc)) != 0)
7212 return error;
7213 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
7214
7215 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
7216 /*
7217 * Select first Switching Voltage Regulator (1.32V) to
7218 * solve a stability issue related to noisy DC2DC line
7219 * in the silicon of 1000 Series.
7220 */
7221 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
7222 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
7223 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
7224 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
7225 }
7226 iwn_nic_unlock(sc);
7227
7228 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
7229 /* Use internal power amplifier only. */
7230 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
7231 }
7232 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
7233 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
7234 /* Indicate that ROM calibration version is >=6. */
7235 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
7236 }
7237 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
7238 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
7239 if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
7240 sc->hw_type == IWN_HW_REV_TYPE_2000 ||
7241 sc->hw_type == IWN_HW_REV_TYPE_135 ||
7242 sc->hw_type == IWN_HW_REV_TYPE_105)
7243 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT);
7244 return 0;
7245 }
7246
7247 /*
7248 * Take NIC ownership over Intel Active Management Technology (AMT).
7249 */
7250 static int
iwn_hw_prepare(struct iwn_softc * sc)7251 iwn_hw_prepare(struct iwn_softc *sc)
7252 {
7253 int ntries;
7254
7255 /* Check if hardware is ready. */
7256 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7257 for (ntries = 0; ntries < 5; ntries++) {
7258 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7259 IWN_HW_IF_CONFIG_NIC_READY)
7260 return 0;
7261 DELAY(10);
7262 }
7263
7264 /* Hardware not ready, force into ready state. */
7265 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
7266 for (ntries = 0; ntries < 15000; ntries++) {
7267 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
7268 IWN_HW_IF_CONFIG_PREPARE_DONE))
7269 break;
7270 DELAY(10);
7271 }
7272 if (ntries == 15000)
7273 return ETIMEDOUT;
7274
7275 /* Hardware should be ready now. */
7276 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7277 for (ntries = 0; ntries < 5; ntries++) {
7278 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7279 IWN_HW_IF_CONFIG_NIC_READY)
7280 return 0;
7281 DELAY(10);
7282 }
7283 return ETIMEDOUT;
7284 }
7285
7286 static int
iwn_hw_init(struct iwn_softc * sc)7287 iwn_hw_init(struct iwn_softc *sc)
7288 {
7289 struct iwn_ops *ops = &sc->ops;
7290 int error, chnl, qid;
7291 clock_t clk;
7292 uint32_t rx_config;
7293
7294 ASSERT(mutex_owned(&sc->sc_mtx));
7295
7296 /* Clear pending interrupts. */
7297 IWN_WRITE(sc, IWN_INT, 0xffffffff);
7298
7299 if ((error = iwn_apm_init(sc)) != 0) {
7300 dev_err(sc->sc_dip, CE_WARN,
7301 "!could not power ON adapter");
7302 return error;
7303 }
7304
7305 /* Select VMAIN power source. */
7306 if ((error = iwn_nic_lock(sc)) != 0)
7307 return error;
7308 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
7309 iwn_nic_unlock(sc);
7310
7311 /* Perform adapter-specific initialization. */
7312 if ((error = ops->nic_config(sc)) != 0)
7313 return error;
7314
7315 /* Initialize RX ring. */
7316 if ((error = iwn_nic_lock(sc)) != 0)
7317 return error;
7318 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
7319 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
7320 /* Set physical address of RX ring (256-byte aligned). */
7321 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
7322 /* Set physical address of RX status (16-byte aligned). */
7323 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
7324 /* Enable RX. */
7325 rx_config =
7326 IWN_FH_RX_CONFIG_ENA |
7327 #if IWN_RBUF_SIZE == 8192
7328 IWN_FH_RX_CONFIG_RB_SIZE_8K |
7329 #endif
7330 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
7331 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
7332 IWN_FH_RX_CONFIG_SINGLE_FRAME |
7333 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
7334 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG);
7335 IWN_WRITE(sc, IWN_FH_RX_CONFIG, rx_config);
7336 iwn_nic_unlock(sc);
7337 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
7338
7339 if ((error = iwn_nic_lock(sc)) != 0)
7340 return error;
7341
7342 /* Initialize TX scheduler. */
7343 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7344
7345 /* Set physical address of "keep warm" page (16-byte aligned). */
7346 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
7347
7348 /* Initialize TX rings. */
7349 for (qid = 0; qid < sc->ntxqs; qid++) {
7350 struct iwn_tx_ring *txq = &sc->txq[qid];
7351
7352 /* Set physical address of TX ring (256-byte aligned). */
7353 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
7354 txq->desc_dma.paddr >> 8);
7355 }
7356 iwn_nic_unlock(sc);
7357
7358 /* Enable DMA channels. */
7359 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7360 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
7361 IWN_FH_TX_CONFIG_DMA_ENA |
7362 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
7363 }
7364
7365 /* Clear "radio off" and "commands blocked" bits. */
7366 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7367 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7368
7369 /* Clear pending interrupts. */
7370 IWN_WRITE(sc, IWN_INT, 0xffffffff);
7371 /* Enable interrupt coalescing. */
7372 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 32);
7373 /* Enable interrupts. */
7374 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7375
7376 /* _Really_ make sure "radio off" bit is cleared! */
7377 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7378 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7379
7380 /* Enable shadow registers. */
7381 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7382 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7383
7384 if ((error = ops->load_firmware(sc)) != 0) {
7385 dev_err(sc->sc_dip, CE_WARN,
7386 "!could not load firmware");
7387 return error;
7388 }
7389 /* Wait at most one second for firmware alive notification. */
7390 clk = ddi_get_lbolt() + drv_usectohz(1000000);
7391 while ((sc->sc_flags & IWN_FLAG_FW_ALIVE) == 0) {
7392 if (cv_timedwait(&sc->sc_alive_cv, &sc->sc_mtx, clk) < 0) {
7393 dev_err(sc->sc_dip, CE_WARN,
7394 "!timeout waiting for adapter to initialize");
7395 return (IWN_FAIL);
7396 }
7397 }
7398 /* Do post-firmware initialization. */
7399 return ops->post_alive(sc);
7400 }
7401
7402 static void
iwn_hw_stop(struct iwn_softc * sc,boolean_t lock)7403 iwn_hw_stop(struct iwn_softc *sc, boolean_t lock)
7404 {
7405 int chnl, qid, ntries;
7406
7407 if (lock) {
7408 mutex_enter(&sc->sc_mtx);
7409 }
7410
7411 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7412
7413 /* Disable interrupts. */
7414 IWN_WRITE(sc, IWN_INT_MASK, 0);
7415 IWN_WRITE(sc, IWN_INT, 0xffffffff);
7416 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7417 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7418
7419 /* Make sure we no longer hold the NIC lock. */
7420 iwn_nic_unlock(sc);
7421
7422 /* Stop TX scheduler. */
7423 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7424
7425 /* Stop all DMA channels. */
7426 if (iwn_nic_lock(sc) == 0) {
7427 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7428 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7429 for (ntries = 0; ntries < 200; ntries++) {
7430 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7431 IWN_FH_TX_STATUS_IDLE(chnl))
7432 break;
7433 DELAY(10);
7434 }
7435 }
7436 iwn_nic_unlock(sc);
7437 }
7438
7439 /* Stop RX ring. */
7440 iwn_reset_rx_ring(sc, &sc->rxq);
7441
7442 /* Reset all TX rings. */
7443 for (qid = 0; qid < sc->ntxqs; qid++)
7444 iwn_reset_tx_ring(sc, &sc->txq[qid]);
7445
7446 if (iwn_nic_lock(sc) == 0) {
7447 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7448 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7449 iwn_nic_unlock(sc);
7450 }
7451 DELAY(5);
7452 /* Power OFF adapter. */
7453 iwn_apm_stop(sc);
7454
7455 sc->sc_flags &= ~(IWN_FLAG_HW_INITED | IWN_FLAG_FW_ALIVE);
7456
7457 if (lock) {
7458 mutex_exit(&sc->sc_mtx);
7459 }
7460 }
7461
7462 static int
iwn_init(struct iwn_softc * sc)7463 iwn_init(struct iwn_softc *sc)
7464 {
7465 int error;
7466
7467 mutex_enter(&sc->sc_mtx);
7468 if (sc->sc_flags & IWN_FLAG_HW_INITED)
7469 goto out;
7470 if ((error = iwn_hw_prepare(sc)) != 0) {
7471 dev_err(sc->sc_dip, CE_WARN, "!hardware not ready");
7472 goto fail;
7473 }
7474
7475 /* Check that the radio is not disabled by hardware switch. */
7476 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7477 dev_err(sc->sc_dip, CE_WARN,
7478 "!radio is disabled by hardware switch");
7479 error = EPERM; /* :-) */
7480 goto fail;
7481 }
7482
7483 /* Read firmware images from the filesystem. */
7484 if ((error = iwn_read_firmware(sc)) != 0) {
7485 dev_err(sc->sc_dip, CE_WARN, "!could not read firmware");
7486 goto fail;
7487 }
7488
7489 /* Initialize interrupt mask to default value. */
7490 sc->int_mask = IWN_INT_MASK_DEF;
7491 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7492
7493 /* Initialize hardware and upload firmware. */
7494 ASSERT(sc->fw.data != NULL && sc->fw.size > 0);
7495 error = iwn_hw_init(sc);
7496 if (error != 0) {
7497 dev_err(sc->sc_dip, CE_WARN, "!could not initialize hardware");
7498 goto fail;
7499 }
7500
7501 /* Configure adapter now that it is ready. */
7502 if ((error = iwn_config(sc)) != 0) {
7503 dev_err(sc->sc_dip, CE_WARN, "!could not configure device");
7504 goto fail;
7505 }
7506
7507 sc->sc_flags |= IWN_FLAG_HW_INITED;
7508 out:
7509 mutex_exit(&sc->sc_mtx);
7510 return 0;
7511
7512 fail:
7513 iwn_hw_stop(sc, B_FALSE);
7514 mutex_exit(&sc->sc_mtx);
7515 return error;
7516 }
7517
7518 /*
7519 * XXX code from usr/src/uts/common/io/net80211/net880211_output.c
7520 * Copyright (c) 2001 Atsushi Onoe
7521 * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
7522 * Copyright (c) 2007-2009 Damien Bergamini
7523 * All rights reserved.
7524 */
7525
7526 /*
7527 * Add SSID element to a frame
7528 */
7529 static uint8_t *
ieee80211_add_ssid(uint8_t * frm,const uint8_t * ssid,uint32_t len)7530 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, uint32_t len)
7531 {
7532 *frm++ = IEEE80211_ELEMID_SSID;
7533 *frm++ = (uint8_t)len;
7534 bcopy(ssid, frm, len);
7535 return (frm + len);
7536 }
7537
7538 /*
7539 * Add supported rates information element to a frame.
7540 */
7541 static uint8_t *
ieee80211_add_rates(uint8_t * frm,const struct ieee80211_rateset * rs)7542 ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs)
7543 {
7544 uint8_t nrates;
7545
7546 *frm++ = IEEE80211_ELEMID_RATES;
7547 nrates = rs->ir_nrates;
7548 if (nrates > IEEE80211_RATE_SIZE)
7549 nrates = IEEE80211_RATE_SIZE;
7550 *frm++ = nrates;
7551 bcopy(rs->ir_rates, frm, nrates);
7552 return (frm + nrates);
7553 }
7554
7555 /*
7556 * Add extended supported rates element to a frame, usually for 11g mode
7557 */
7558 static uint8_t *
ieee80211_add_xrates(uint8_t * frm,const struct ieee80211_rateset * rs)7559 ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs)
7560 {
7561 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
7562 uint8_t nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
7563
7564 *frm++ = IEEE80211_ELEMID_XRATES;
7565 *frm++ = nrates;
7566 bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
7567 frm += nrates;
7568 }
7569 return (frm);
7570 }
7571
7572 /*
7573 * XXX: Hack to set the current channel to the value advertised in beacons or
7574 * probe responses. Only used during AP detection.
7575 * XXX: Duplicated from if_iwi.c
7576 */
7577 static void
iwn_fix_channel(struct iwn_softc * sc,mblk_t * m,struct iwn_rx_stat * stat)7578 iwn_fix_channel(struct iwn_softc *sc, mblk_t *m,
7579 struct iwn_rx_stat *stat)
7580 {
7581 struct ieee80211com *ic = &sc->sc_ic;
7582 struct ieee80211_frame *wh;
7583 uint8_t subtype;
7584 uint8_t *frm, *efrm;
7585
7586 wh = (struct ieee80211_frame *)m->b_rptr;
7587
7588 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
7589 return;
7590
7591 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
7592
7593 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
7594 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
7595 return;
7596
7597 if (sc->sc_flags & IWN_FLAG_SCANNING_5GHZ) {
7598 int chan = le16toh(stat->chan);
7599 if (chan < __arraycount(ic->ic_sup_channels))
7600 ic->ic_curchan = &ic->ic_sup_channels[chan];
7601 return;
7602 }
7603
7604 frm = (uint8_t *)(wh + 1);
7605 efrm = (uint8_t *)m->b_wptr;
7606
7607 frm += 12; /* skip tstamp, bintval and capinfo fields */
7608 while (frm < efrm) {
7609 if (*frm == IEEE80211_ELEMID_DSPARMS)
7610 #if IEEE80211_CHAN_MAX < 255
7611 if (frm[2] <= IEEE80211_CHAN_MAX)
7612 #endif
7613 ic->ic_curchan = &ic->ic_sup_channels[frm[2]];
7614
7615 frm += frm[1] + 2;
7616 }
7617 }
7618
7619 /*
7620 * invoked by GLD to start or open NIC
7621 */
7622 static int
iwn_m_start(void * arg)7623 iwn_m_start(void *arg)
7624 {
7625 struct iwn_softc *sc;
7626 ieee80211com_t *ic;
7627 int err = IWN_FAIL;
7628
7629 sc = (struct iwn_softc *)arg;
7630 ASSERT(sc != NULL);
7631 ic = &sc->sc_ic;
7632
7633 err = iwn_init(sc);
7634 if (err != IWN_SUCCESS) {
7635 /*
7636 * If initialization failed because the RF switch is off,
7637 * return success anyway to make the 'plumb' succeed.
7638 * The iwn_thread() tries to re-init background.
7639 */
7640 if (err == EPERM &&
7641 !(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7642 mutex_enter(&sc->sc_mtx);
7643 sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
7644 sc->sc_flags |= IWN_FLAG_RADIO_OFF;
7645 mutex_exit(&sc->sc_mtx);
7646 return (IWN_SUCCESS);
7647 }
7648
7649 return (err);
7650 }
7651
7652 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7653
7654 mutex_enter(&sc->sc_mtx);
7655 sc->sc_flags |= IWN_FLAG_RUNNING;
7656 mutex_exit(&sc->sc_mtx);
7657
7658 return (IWN_SUCCESS);
7659 }
7660
7661 /*
7662 * invoked by GLD to stop or down NIC
7663 */
7664 static void
iwn_m_stop(void * arg)7665 iwn_m_stop(void *arg)
7666 {
7667 struct iwn_softc *sc;
7668 ieee80211com_t *ic;
7669
7670 sc = (struct iwn_softc *)arg;
7671 ASSERT(sc != NULL);
7672 ic = &sc->sc_ic;
7673
7674 iwn_hw_stop(sc, B_TRUE);
7675
7676 /*
7677 * release buffer for calibration
7678 */
7679
7680 ieee80211_stop_watchdog(ic);
7681 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7682
7683 mutex_enter(&sc->sc_mtx);
7684 sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
7685 sc->sc_flags &= ~IWN_FLAG_RATE_AUTO_CTL;
7686
7687 sc->sc_flags &= ~IWN_FLAG_RUNNING;
7688 sc->sc_flags &= ~IWN_FLAG_SCANNING;
7689 mutex_exit(&sc->sc_mtx);
7690 }
7691
7692
7693 /*
7694 * Module Loading Data & Entry Points
7695 */
7696 DDI_DEFINE_STREAM_OPS(iwn_devops, nulldev, nulldev, iwn_attach,
7697 iwn_detach, nodev, NULL, D_MP, NULL, iwn_quiesce);
7698
7699 static struct modldrv iwn_modldrv = {
7700 &mod_driverops,
7701 "Intel WiFi Link 4965 and 1000/5000/6000 series driver",
7702 &iwn_devops
7703 };
7704
7705 static struct modlinkage iwn_modlinkage = {
7706 MODREV_1,
7707 &iwn_modldrv,
7708 NULL
7709 };
7710
7711 int
_init(void)7712 _init(void)
7713 {
7714 int status;
7715
7716 status = ddi_soft_state_init(&iwn_state,
7717 sizeof (struct iwn_softc), 1);
7718 if (status != DDI_SUCCESS)
7719 return (status);
7720
7721 mac_init_ops(&iwn_devops, "iwn");
7722 status = mod_install(&iwn_modlinkage);
7723 if (status != DDI_SUCCESS) {
7724 mac_fini_ops(&iwn_devops);
7725 ddi_soft_state_fini(&iwn_state);
7726 }
7727
7728 return (status);
7729 }
7730
7731 int
_fini(void)7732 _fini(void)
7733 {
7734 int status;
7735
7736 status = mod_remove(&iwn_modlinkage);
7737 if (status == DDI_SUCCESS) {
7738 mac_fini_ops(&iwn_devops);
7739 ddi_soft_state_fini(&iwn_state);
7740 }
7741
7742 return (status);
7743 }
7744
7745 int
_info(struct modinfo * mip)7746 _info(struct modinfo *mip)
7747 {
7748 return (mod_info(&iwn_modlinkage, mip));
7749 }
7750