1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2007, Intel Corporation 8 * All rights reserved. 9 */ 10 11 /* 12 * Copyright (c) 2006 13 * Copyright (c) 2007 14 * Damien Bergamini <damien.bergamini@free.fr> 15 * 16 * Permission to use, copy, modify, and distribute this software for any 17 * purpose with or without fee is hereby granted, provided that the above 18 * copyright notice and this permission notice appear in all copies. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 27 */ 28 29 /* 30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/byteorder.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/strsubr.h> 41 #include <sys/ethernet.h> 42 #include <inet/common.h> 43 #include <inet/nd.h> 44 #include <inet/mi.h> 45 #include <sys/note.h> 46 #include <sys/stream.h> 47 #include <sys/strsun.h> 48 #include <sys/modctl.h> 49 #include <sys/devops.h> 50 #include <sys/dlpi.h> 51 #include <sys/mac.h> 52 #include <sys/mac_wifi.h> 53 #include <sys/net80211.h> 54 #include <sys/net80211_proto.h> 55 #include <sys/varargs.h> 56 #include <sys/policy.h> 57 #include <sys/pci.h> 58 59 #include "iwk_calibration.h" 60 #include "iwk_hw.h" 61 #include "iwk_eeprom.h" 62 #include "iwk2_var.h" 63 #include <inet/wifi_ioctl.h> 64 65 #ifdef DEBUG 66 #define IWK_DEBUG_80211 (1 << 0) 67 #define IWK_DEBUG_CMD (1 << 1) 68 #define IWK_DEBUG_DMA (1 << 2) 69 #define IWK_DEBUG_EEPROM (1 << 3) 70 #define IWK_DEBUG_FW (1 << 4) 71 #define IWK_DEBUG_HW (1 << 5) 72 #define IWK_DEBUG_INTR (1 << 6) 73 #define IWK_DEBUG_MRR (1 << 7) 74 #define IWK_DEBUG_PIO (1 << 8) 75 #define IWK_DEBUG_RX (1 << 9) 76 #define IWK_DEBUG_SCAN (1 << 10) 77 #define IWK_DEBUG_TX (1 << 11) 78 #define IWK_DEBUG_RATECTL (1 << 12) 79 #define IWK_DEBUG_RADIO (1 << 13) 80 #define IWK_DEBUG_RESUME (1 << 14) 81 #define IWK_DEBUG_CALIBRATION (1 << 15) 82 uint32_t iwk_dbg_flags = 0; 83 #define IWK_DBG(x) \ 84 iwk_dbg x 85 #else 86 #define IWK_DBG(x) 87 #endif 88 89 static void *iwk_soft_state_p = NULL; 90 static uint8_t iwk_fw_bin [] = { 91 #include "fw-iw/iw4965.ucode.hex" 92 }; 93 94 /* DMA attributes for a shared page */ 95 static ddi_dma_attr_t sh_dma_attr = { 96 DMA_ATTR_V0, /* version of this structure */ 97 0, /* lowest usable address */ 98 0xffffffffU, /* highest usable address */ 99 0xffffffffU, /* maximum DMAable byte count */ 100 0x1000, /* alignment in bytes */ 101 0x1000, /* burst sizes (any?) */ 102 1, /* minimum transfer */ 103 0xffffffffU, /* maximum transfer */ 104 0xffffffffU, /* maximum segment length */ 105 1, /* maximum number of segments */ 106 1, /* granularity */ 107 0, /* flags (reserved) */ 108 }; 109 110 /* DMA attributes for a keep warm DRAM descriptor */ 111 static ddi_dma_attr_t kw_dma_attr = { 112 DMA_ATTR_V0, /* version of this structure */ 113 0, /* lowest usable address */ 114 0xffffffffU, /* highest usable address */ 115 0xffffffffU, /* maximum DMAable byte count */ 116 0x1000, /* alignment in bytes */ 117 0x1000, /* burst sizes (any?) */ 118 1, /* minimum transfer */ 119 0xffffffffU, /* maximum transfer */ 120 0xffffffffU, /* maximum segment length */ 121 1, /* maximum number of segments */ 122 1, /* granularity */ 123 0, /* flags (reserved) */ 124 }; 125 126 /* DMA attributes for a ring descriptor */ 127 static ddi_dma_attr_t ring_desc_dma_attr = { 128 DMA_ATTR_V0, /* version of this structure */ 129 0, /* lowest usable address */ 130 0xffffffffU, /* highest usable address */ 131 0xffffffffU, /* maximum DMAable byte count */ 132 0x100, /* alignment in bytes */ 133 0x100, /* burst sizes (any?) */ 134 1, /* minimum transfer */ 135 0xffffffffU, /* maximum transfer */ 136 0xffffffffU, /* maximum segment length */ 137 1, /* maximum number of segments */ 138 1, /* granularity */ 139 0, /* flags (reserved) */ 140 }; 141 142 /* DMA attributes for a cmd */ 143 static ddi_dma_attr_t cmd_dma_attr = { 144 DMA_ATTR_V0, /* version of this structure */ 145 0, /* lowest usable address */ 146 0xffffffffU, /* highest usable address */ 147 0xffffffffU, /* maximum DMAable byte count */ 148 4, /* alignment in bytes */ 149 0x100, /* burst sizes (any?) */ 150 1, /* minimum transfer */ 151 0xffffffffU, /* maximum transfer */ 152 0xffffffffU, /* maximum segment length */ 153 1, /* maximum number of segments */ 154 1, /* granularity */ 155 0, /* flags (reserved) */ 156 }; 157 158 /* DMA attributes for a rx buffer */ 159 static ddi_dma_attr_t rx_buffer_dma_attr = { 160 DMA_ATTR_V0, /* version of this structure */ 161 0, /* lowest usable address */ 162 0xffffffffU, /* highest usable address */ 163 0xffffffffU, /* maximum DMAable byte count */ 164 0x100, /* alignment in bytes */ 165 0x100, /* burst sizes (any?) */ 166 1, /* minimum transfer */ 167 0xffffffffU, /* maximum transfer */ 168 0xffffffffU, /* maximum segment length */ 169 1, /* maximum number of segments */ 170 1, /* granularity */ 171 0, /* flags (reserved) */ 172 }; 173 174 /* 175 * DMA attributes for a tx buffer. 176 * the maximum number of segments is 4 for the hardware. 177 * now all the wifi drivers put the whole frame in a single 178 * descriptor, so we define the maximum number of segments 1, 179 * just the same as the rx_buffer. we consider leverage the HW 180 * ability in the future, that is why we don't define rx and tx 181 * buffer_dma_attr as the same. 182 */ 183 static ddi_dma_attr_t tx_buffer_dma_attr = { 184 DMA_ATTR_V0, /* version of this structure */ 185 0, /* lowest usable address */ 186 0xffffffffU, /* highest usable address */ 187 0xffffffffU, /* maximum DMAable byte count */ 188 4, /* alignment in bytes */ 189 0x100, /* burst sizes (any?) */ 190 1, /* minimum transfer */ 191 0xffffffffU, /* maximum transfer */ 192 0xffffffffU, /* maximum segment length */ 193 1, /* maximum number of segments */ 194 1, /* granularity */ 195 0, /* flags (reserved) */ 196 }; 197 198 /* DMA attributes for text and data part in the firmware */ 199 static ddi_dma_attr_t fw_dma_attr = { 200 DMA_ATTR_V0, /* version of this structure */ 201 0, /* lowest usable address */ 202 0xffffffffU, /* highest usable address */ 203 0x7fffffff, /* maximum DMAable byte count */ 204 0x10, /* alignment in bytes */ 205 0x100, /* burst sizes (any?) */ 206 1, /* minimum transfer */ 207 0xffffffffU, /* maximum transfer */ 208 0xffffffffU, /* maximum segment length */ 209 1, /* maximum number of segments */ 210 1, /* granularity */ 211 0, /* flags (reserved) */ 212 }; 213 214 215 /* regs access attributes */ 216 static ddi_device_acc_attr_t iwk_reg_accattr = { 217 DDI_DEVICE_ATTR_V0, 218 DDI_STRUCTURE_LE_ACC, 219 DDI_STRICTORDER_ACC, 220 DDI_DEFAULT_ACC 221 }; 222 223 /* DMA access attributes */ 224 static ddi_device_acc_attr_t iwk_dma_accattr = { 225 DDI_DEVICE_ATTR_V0, 226 DDI_NEVERSWAP_ACC, 227 DDI_STRICTORDER_ACC, 228 DDI_DEFAULT_ACC 229 }; 230 231 static int iwk_ring_init(iwk_sc_t *); 232 static void iwk_ring_free(iwk_sc_t *); 233 static int iwk_alloc_shared(iwk_sc_t *); 234 static void iwk_free_shared(iwk_sc_t *); 235 static int iwk_alloc_kw(iwk_sc_t *); 236 static void iwk_free_kw(iwk_sc_t *); 237 static int iwk_alloc_fw_dma(iwk_sc_t *); 238 static void iwk_free_fw_dma(iwk_sc_t *); 239 static int iwk_alloc_rx_ring(iwk_sc_t *); 240 static void iwk_reset_rx_ring(iwk_sc_t *); 241 static void iwk_free_rx_ring(iwk_sc_t *); 242 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *, 243 int, int); 244 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 245 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 246 247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *); 248 static void iwk_node_free(ieee80211_node_t *); 249 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int); 250 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *, 251 const uint8_t mac[IEEE80211_ADDR_LEN]); 252 static void iwk_mac_access_enter(iwk_sc_t *); 253 static void iwk_mac_access_exit(iwk_sc_t *); 254 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t); 255 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t); 256 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t, 257 uint32_t *, int); 258 static int iwk_load_firmware(iwk_sc_t *); 259 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *, 260 iwk_rx_data_t *); 261 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *, 262 iwk_rx_data_t *); 263 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *); 264 static uint_t iwk_intr(caddr_t, caddr_t); 265 static int iwk_eep_load(iwk_sc_t *sc); 266 static void iwk_get_mac_from_eep(iwk_sc_t *sc); 267 static int iwk_eep_sem_down(iwk_sc_t *sc); 268 static void iwk_eep_sem_up(iwk_sc_t *sc); 269 static uint_t iwk_rx_softintr(caddr_t, caddr_t); 270 static uint8_t iwk_rate_to_plcp(int); 271 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int); 272 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t); 273 static int iwk_hw_set_before_auth(iwk_sc_t *); 274 static int iwk_scan(iwk_sc_t *); 275 static int iwk_config(iwk_sc_t *); 276 static void iwk_stop_master(iwk_sc_t *); 277 static int iwk_power_up(iwk_sc_t *); 278 static int iwk_preinit(iwk_sc_t *); 279 static int iwk_init(iwk_sc_t *); 280 static void iwk_stop(iwk_sc_t *); 281 static void iwk_amrr_init(iwk_amrr_t *); 282 static void iwk_amrr_timeout(iwk_sc_t *); 283 static void iwk_amrr_ratectl(void *, ieee80211_node_t *); 284 static int32_t iwk_curr_tempera(iwk_sc_t *sc); 285 static int iwk_tx_power_calibration(iwk_sc_t *sc); 286 static inline int iwk_is_24G_band(iwk_sc_t *sc); 287 static inline int iwk_is_fat_channel(iwk_sc_t *sc); 288 static int iwk_txpower_grp(uint16_t channel); 289 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 290 uint16_t channel, 291 int is_24G, int is_fat, int is_hi_chan); 292 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel); 293 static int iwk_division(int32_t num, int32_t denom, int32_t *res); 294 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 295 int32_t x2, int32_t y2); 296 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 297 struct iwk_eep_calib_channel_info *chan_info); 298 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 299 int32_t curr_voltage); 300 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G); 301 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 302 struct iwk_tx_power_db *tp_db); 303 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc); 304 static int iwk_is_associated(iwk_sc_t *sc); 305 static int iwk_rxgain_diff_init(iwk_sc_t *sc); 306 static int iwk_rxgain_diff(iwk_sc_t *sc); 307 static int iwk_rx_sens_init(iwk_sc_t *sc); 308 static int iwk_rx_sens(iwk_sc_t *sc); 309 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 310 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 311 312 static void iwk_write_event_log(iwk_sc_t *); 313 static void iwk_write_error_log(iwk_sc_t *); 314 315 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 316 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 317 318 /* 319 * GLD specific operations 320 */ 321 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val); 322 static int iwk_m_start(void *arg); 323 static void iwk_m_stop(void *arg); 324 static int iwk_m_unicst(void *arg, const uint8_t *macaddr); 325 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m); 326 static int iwk_m_promisc(void *arg, boolean_t on); 327 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp); 328 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp); 329 static int iwk_m_setprop(void *arg, const char *pr_name, 330 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf); 331 static int iwk_m_getprop(void *arg, const char *pr_name, 332 mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length, 333 void *wldp_buf); 334 static void iwk_destroy_locks(iwk_sc_t *sc); 335 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type); 336 static void iwk_thread(iwk_sc_t *sc); 337 338 /* 339 * Supported rates for 802.11b/g modes (in 500Kbps unit). 340 * 11a and 11n support will be added later. 341 */ 342 static const struct ieee80211_rateset iwk_rateset_11b = 343 { 4, { 2, 4, 11, 22 } }; 344 345 static const struct ieee80211_rateset iwk_rateset_11g = 346 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 347 348 /* 349 * For mfthread only 350 */ 351 extern pri_t minclsyspri; 352 353 #define DRV_NAME_4965 "iwk" 354 355 /* 356 * Module Loading Data & Entry Points 357 */ 358 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach, 359 iwk_detach, nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 360 361 static struct modldrv iwk_modldrv = { 362 &mod_driverops, 363 "Intel(R) 4965AGN driver(N)", 364 &iwk_devops 365 }; 366 367 static struct modlinkage iwk_modlinkage = { 368 MODREV_1, 369 &iwk_modldrv, 370 NULL 371 }; 372 373 int 374 _init(void) 375 { 376 int status; 377 378 status = ddi_soft_state_init(&iwk_soft_state_p, 379 sizeof (iwk_sc_t), 1); 380 if (status != DDI_SUCCESS) 381 return (status); 382 383 mac_init_ops(&iwk_devops, DRV_NAME_4965); 384 status = mod_install(&iwk_modlinkage); 385 if (status != DDI_SUCCESS) { 386 mac_fini_ops(&iwk_devops); 387 ddi_soft_state_fini(&iwk_soft_state_p); 388 } 389 390 return (status); 391 } 392 393 int 394 _fini(void) 395 { 396 int status; 397 398 status = mod_remove(&iwk_modlinkage); 399 if (status == DDI_SUCCESS) { 400 mac_fini_ops(&iwk_devops); 401 ddi_soft_state_fini(&iwk_soft_state_p); 402 } 403 404 return (status); 405 } 406 407 int 408 _info(struct modinfo *mip) 409 { 410 return (mod_info(&iwk_modlinkage, mip)); 411 } 412 413 /* 414 * Mac Call Back entries 415 */ 416 mac_callbacks_t iwk_m_callbacks = { 417 MC_IOCTL | MC_SETPROP | MC_GETPROP, 418 iwk_m_stat, 419 iwk_m_start, 420 iwk_m_stop, 421 iwk_m_promisc, 422 iwk_m_multicst, 423 iwk_m_unicst, 424 iwk_m_tx, 425 NULL, 426 iwk_m_ioctl, 427 NULL, 428 NULL, 429 NULL, 430 iwk_m_setprop, 431 iwk_m_getprop 432 }; 433 434 #ifdef DEBUG 435 void 436 iwk_dbg(uint32_t flags, const char *fmt, ...) 437 { 438 va_list ap; 439 440 if (flags & iwk_dbg_flags) { 441 va_start(ap, fmt); 442 vcmn_err(CE_NOTE, fmt, ap); 443 va_end(ap); 444 } 445 } 446 #endif 447 448 /* 449 * device operations 450 */ 451 int 452 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 453 { 454 iwk_sc_t *sc; 455 ieee80211com_t *ic; 456 int instance, err, i; 457 char strbuf[32]; 458 wifi_data_t wd = { 0 }; 459 mac_register_t *macp; 460 461 int intr_type; 462 int intr_count; 463 int intr_actual; 464 465 switch (cmd) { 466 case DDI_ATTACH: 467 break; 468 case DDI_RESUME: 469 sc = ddi_get_soft_state(iwk_soft_state_p, 470 ddi_get_instance(dip)); 471 ASSERT(sc != NULL); 472 mutex_enter(&sc->sc_glock); 473 sc->sc_flags &= ~IWK_F_SUSPEND; 474 mutex_exit(&sc->sc_glock); 475 if (sc->sc_flags & IWK_F_RUNNING) { 476 (void) iwk_init(sc); 477 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 478 } 479 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n")); 480 return (DDI_SUCCESS); 481 default: 482 err = DDI_FAILURE; 483 goto attach_fail1; 484 } 485 486 instance = ddi_get_instance(dip); 487 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance); 488 if (err != DDI_SUCCESS) { 489 cmn_err(CE_WARN, 490 "iwk_attach(): failed to allocate soft state\n"); 491 goto attach_fail1; 492 } 493 sc = ddi_get_soft_state(iwk_soft_state_p, instance); 494 sc->sc_dip = dip; 495 496 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0, 497 &iwk_reg_accattr, &sc->sc_cfg_handle); 498 if (err != DDI_SUCCESS) { 499 cmn_err(CE_WARN, 500 "iwk_attach(): failed to map config spaces regs\n"); 501 goto attach_fail2; 502 } 503 sc->sc_rev = ddi_get8(sc->sc_cfg_handle, 504 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID)); 505 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0); 506 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle, 507 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ)); 508 if (!sc->sc_clsz) 509 sc->sc_clsz = 16; 510 sc->sc_clsz = (sc->sc_clsz << 2); 511 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) + 512 IEEE80211_MTU + IEEE80211_CRC_LEN + 513 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 514 IEEE80211_WEP_CRCLEN), sc->sc_clsz); 515 /* 516 * Map operating registers 517 */ 518 err = ddi_regs_map_setup(dip, 1, &sc->sc_base, 519 0, 0, &iwk_reg_accattr, &sc->sc_handle); 520 if (err != DDI_SUCCESS) { 521 cmn_err(CE_WARN, 522 "iwk_attach(): failed to map device regs\n"); 523 goto attach_fail2a; 524 } 525 526 err = ddi_intr_get_supported_types(dip, &intr_type); 527 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) { 528 cmn_err(CE_WARN, "iwk_attach(): " 529 "Fixed type interrupt is not supported\n"); 530 goto attach_fail_intr_a; 531 } 532 533 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count); 534 if ((err != DDI_SUCCESS) || (intr_count != 1)) { 535 cmn_err(CE_WARN, "iwk_attach(): " 536 "No fixed interrupts\n"); 537 goto attach_fail_intr_a; 538 } 539 540 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 541 542 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 543 intr_count, &intr_actual, 0); 544 if ((err != DDI_SUCCESS) || (intr_actual != 1)) { 545 cmn_err(CE_WARN, "iwk_attach(): " 546 "ddi_intr_alloc() failed 0x%x\n", err); 547 goto attach_fail_intr_b; 548 } 549 550 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri); 551 if (err != DDI_SUCCESS) { 552 cmn_err(CE_WARN, "iwk_attach(): " 553 "ddi_intr_get_pri() failed 0x%x\n", err); 554 goto attach_fail_intr_c; 555 } 556 557 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, 558 DDI_INTR_PRI(sc->sc_intr_pri)); 559 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, 560 DDI_INTR_PRI(sc->sc_intr_pri)); 561 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER, 562 DDI_INTR_PRI(sc->sc_intr_pri)); 563 564 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL); 565 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL); 566 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL); 567 /* 568 * initialize the mfthread 569 */ 570 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL); 571 sc->sc_mf_thread = NULL; 572 sc->sc_mf_thread_switch = 0; 573 574 /* 575 * Allocate shared page. 576 */ 577 err = iwk_alloc_shared(sc); 578 if (err != DDI_SUCCESS) { 579 cmn_err(CE_WARN, "iwk_attach(): " 580 "failed to allocate shared page\n"); 581 goto attach_fail3; 582 } 583 584 /* 585 * Allocate keep warm page. 586 */ 587 err = iwk_alloc_kw(sc); 588 if (err != DDI_SUCCESS) { 589 cmn_err(CE_WARN, "iwk_attach(): " 590 "failed to allocate keep warm page\n"); 591 goto attach_fail3a; 592 } 593 594 /* 595 * Do some necessary hardware initializations. 596 */ 597 err = iwk_preinit(sc); 598 if (err != DDI_SUCCESS) { 599 cmn_err(CE_WARN, "iwk_attach(): " 600 "failed to init hardware\n"); 601 goto attach_fail4; 602 } 603 604 /* initialize EEPROM */ 605 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */ 606 if (err != 0) { 607 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n"); 608 goto attach_fail4; 609 } 610 611 if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) { 612 cmn_err(CE_WARN, "older EEPROM detected\n"); 613 goto attach_fail4; 614 } 615 616 iwk_get_mac_from_eep(sc); 617 618 err = iwk_ring_init(sc); 619 if (err != DDI_SUCCESS) { 620 cmn_err(CE_WARN, "iwk_attach(): " 621 "failed to allocate and initialize ring\n"); 622 goto attach_fail4; 623 } 624 625 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin; 626 627 err = iwk_alloc_fw_dma(sc); 628 if (err != DDI_SUCCESS) { 629 cmn_err(CE_WARN, "iwk_attach(): " 630 "failed to allocate firmware dma\n"); 631 goto attach_fail5; 632 } 633 634 /* 635 * Initialize the wifi part, which will be used by 636 * generic layer 637 */ 638 ic = &sc->sc_ic; 639 ic->ic_phytype = IEEE80211_T_OFDM; 640 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 641 ic->ic_state = IEEE80211_S_INIT; 642 ic->ic_maxrssi = 100; /* experimental number */ 643 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT | 644 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT; 645 /* 646 * use software WEP and TKIP, hardware CCMP; 647 */ 648 ic->ic_caps |= IEEE80211_C_AES_CCM; 649 /* 650 * Support WPA/WPA2 651 */ 652 ic->ic_caps |= IEEE80211_C_WPA; 653 654 /* set supported .11b and .11g rates */ 655 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b; 656 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g; 657 658 /* set supported .11b and .11g channels (1 through 11) */ 659 for (i = 1; i <= 11; i++) { 660 ic->ic_sup_channels[i].ich_freq = 661 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 662 ic->ic_sup_channels[i].ich_flags = 663 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 664 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ | 665 IEEE80211_CHAN_PASSIVE; 666 } 667 668 ic->ic_xmit = iwk_send; 669 /* 670 * init Wifi layer 671 */ 672 ieee80211_attach(ic); 673 674 /* 675 * different instance has different WPA door 676 */ 677 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 678 ddi_driver_name(dip), 679 ddi_get_instance(dip)); 680 681 /* 682 * Override 80211 default routines 683 */ 684 sc->sc_newstate = ic->ic_newstate; 685 ic->ic_newstate = iwk_newstate; 686 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 687 ic->ic_node_alloc = iwk_node_alloc; 688 ic->ic_node_free = iwk_node_free; 689 ic->ic_crypto.cs_key_set = iwk_key_set; 690 ieee80211_media_init(ic); 691 /* 692 * initialize default tx key 693 */ 694 ic->ic_def_txkey = 0; 695 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX, 696 iwk_rx_softintr, (caddr_t)sc); 697 if (err != DDI_SUCCESS) { 698 cmn_err(CE_WARN, "iwk_attach(): " 699 "add soft interrupt failed\n"); 700 goto attach_fail7; 701 } 702 703 /* 704 * Add the interrupt handler 705 */ 706 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr, 707 (caddr_t)sc, NULL); 708 if (err != DDI_SUCCESS) { 709 cmn_err(CE_WARN, "iwk_attach(): " 710 "ddi_intr_add_handle() failed\n"); 711 goto attach_fail8; 712 } 713 714 err = ddi_intr_enable(sc->sc_intr_htable[0]); 715 if (err != DDI_SUCCESS) { 716 cmn_err(CE_WARN, "iwk_attach(): " 717 "ddi_intr_enable() failed\n"); 718 goto attach_fail_intr_d; 719 } 720 721 /* 722 * Initialize pointer to device specific functions 723 */ 724 wd.wd_secalloc = WIFI_SEC_NONE; 725 wd.wd_opmode = ic->ic_opmode; 726 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr); 727 728 macp = mac_alloc(MAC_VERSION); 729 if (err != DDI_SUCCESS) { 730 cmn_err(CE_WARN, 731 "iwk_attach(): failed to do mac_alloc()\n"); 732 goto attach_fail9; 733 } 734 735 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 736 macp->m_driver = sc; 737 macp->m_dip = dip; 738 macp->m_src_addr = ic->ic_macaddr; 739 macp->m_callbacks = &iwk_m_callbacks; 740 macp->m_min_sdu = 0; 741 macp->m_max_sdu = IEEE80211_MTU; 742 macp->m_pdata = &wd; 743 macp->m_pdata_size = sizeof (wd); 744 745 /* 746 * Register the macp to mac 747 */ 748 err = mac_register(macp, &ic->ic_mach); 749 mac_free(macp); 750 if (err != DDI_SUCCESS) { 751 cmn_err(CE_WARN, 752 "iwk_attach(): failed to do mac_register()\n"); 753 goto attach_fail9; 754 } 755 756 /* 757 * Create minor node of type DDI_NT_NET_WIFI 758 */ 759 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance); 760 err = ddi_create_minor_node(dip, strbuf, S_IFCHR, 761 instance + 1, DDI_NT_NET_WIFI, 0); 762 if (err != DDI_SUCCESS) 763 cmn_err(CE_WARN, 764 "iwk_attach(): failed to do ddi_create_minor_node()\n"); 765 766 /* 767 * Notify link is down now 768 */ 769 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 770 771 /* 772 * create the mf thread to handle the link status, 773 * recovery fatal error, etc. 774 */ 775 sc->sc_mf_thread_switch = 1; 776 if (sc->sc_mf_thread == NULL) 777 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0, 778 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri); 779 780 sc->sc_flags |= IWK_F_ATTACHED; 781 782 return (DDI_SUCCESS); 783 attach_fail9: 784 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 785 attach_fail_intr_d: 786 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 787 788 attach_fail8: 789 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 790 sc->sc_soft_hdl = NULL; 791 attach_fail7: 792 ieee80211_detach(ic); 793 attach_fail6: 794 iwk_free_fw_dma(sc); 795 attach_fail5: 796 iwk_ring_free(sc); 797 attach_fail4: 798 iwk_free_kw(sc); 799 attach_fail3a: 800 iwk_free_shared(sc); 801 attach_fail3: 802 iwk_destroy_locks(sc); 803 attach_fail_intr_c: 804 (void) ddi_intr_free(sc->sc_intr_htable[0]); 805 attach_fail_intr_b: 806 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 807 attach_fail_intr_a: 808 ddi_regs_map_free(&sc->sc_handle); 809 attach_fail2a: 810 ddi_regs_map_free(&sc->sc_cfg_handle); 811 attach_fail2: 812 ddi_soft_state_free(iwk_soft_state_p, instance); 813 attach_fail1: 814 return (err); 815 } 816 817 int 818 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 819 { 820 iwk_sc_t *sc; 821 int err; 822 823 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 824 ASSERT(sc != NULL); 825 826 switch (cmd) { 827 case DDI_DETACH: 828 break; 829 case DDI_SUSPEND: 830 if (sc->sc_flags & IWK_F_RUNNING) { 831 iwk_stop(sc); 832 } 833 mutex_enter(&sc->sc_glock); 834 sc->sc_flags |= IWK_F_SUSPEND; 835 mutex_exit(&sc->sc_glock); 836 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n")); 837 return (DDI_SUCCESS); 838 default: 839 return (DDI_FAILURE); 840 } 841 842 if (!(sc->sc_flags & IWK_F_ATTACHED)) 843 return (DDI_FAILURE); 844 845 err = mac_disable(sc->sc_ic.ic_mach); 846 if (err != DDI_SUCCESS) 847 return (err); 848 849 /* 850 * Destroy the mf_thread 851 */ 852 mutex_enter(&sc->sc_mt_lock); 853 sc->sc_mf_thread_switch = 0; 854 while (sc->sc_mf_thread != NULL) { 855 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) 856 break; 857 } 858 mutex_exit(&sc->sc_mt_lock); 859 860 iwk_stop(sc); 861 DELAY(500000); 862 863 /* 864 * Unregiste from the MAC layer subsystem 865 */ 866 (void) mac_unregister(sc->sc_ic.ic_mach); 867 868 mutex_enter(&sc->sc_glock); 869 iwk_free_fw_dma(sc); 870 iwk_ring_free(sc); 871 iwk_free_kw(sc); 872 iwk_free_shared(sc); 873 mutex_exit(&sc->sc_glock); 874 875 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 876 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 877 (void) ddi_intr_free(sc->sc_intr_htable[0]); 878 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 879 880 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 881 sc->sc_soft_hdl = NULL; 882 883 /* 884 * detach ieee80211 885 */ 886 ieee80211_detach(&sc->sc_ic); 887 888 iwk_destroy_locks(sc); 889 890 ddi_regs_map_free(&sc->sc_handle); 891 ddi_regs_map_free(&sc->sc_cfg_handle); 892 ddi_remove_minor_node(dip, NULL); 893 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip)); 894 895 return (DDI_SUCCESS); 896 } 897 898 static void 899 iwk_destroy_locks(iwk_sc_t *sc) 900 { 901 cv_destroy(&sc->sc_mt_cv); 902 mutex_destroy(&sc->sc_mt_lock); 903 cv_destroy(&sc->sc_tx_cv); 904 cv_destroy(&sc->sc_cmd_cv); 905 cv_destroy(&sc->sc_fw_cv); 906 mutex_destroy(&sc->sc_tx_lock); 907 mutex_destroy(&sc->sc_glock); 908 } 909 910 /* 911 * Allocate an area of memory and a DMA handle for accessing it 912 */ 913 static int 914 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize, 915 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p, 916 uint_t dma_flags, iwk_dma_t *dma_p) 917 { 918 caddr_t vaddr; 919 int err; 920 921 /* 922 * Allocate handle 923 */ 924 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p, 925 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 926 if (err != DDI_SUCCESS) { 927 dma_p->dma_hdl = NULL; 928 return (DDI_FAILURE); 929 } 930 931 /* 932 * Allocate memory 933 */ 934 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 935 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 936 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 937 if (err != DDI_SUCCESS) { 938 ddi_dma_free_handle(&dma_p->dma_hdl); 939 dma_p->dma_hdl = NULL; 940 dma_p->acc_hdl = NULL; 941 return (DDI_FAILURE); 942 } 943 944 /* 945 * Bind the two together 946 */ 947 dma_p->mem_va = vaddr; 948 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 949 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 950 &dma_p->cookie, &dma_p->ncookies); 951 if (err != DDI_DMA_MAPPED) { 952 ddi_dma_mem_free(&dma_p->acc_hdl); 953 ddi_dma_free_handle(&dma_p->dma_hdl); 954 dma_p->acc_hdl = NULL; 955 dma_p->dma_hdl = NULL; 956 return (DDI_FAILURE); 957 } 958 959 dma_p->nslots = ~0U; 960 dma_p->size = ~0U; 961 dma_p->token = ~0U; 962 dma_p->offset = 0; 963 return (DDI_SUCCESS); 964 } 965 966 /* 967 * Free one allocated area of DMAable memory 968 */ 969 static void 970 iwk_free_dma_mem(iwk_dma_t *dma_p) 971 { 972 if (dma_p->dma_hdl != NULL) { 973 if (dma_p->ncookies) { 974 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 975 dma_p->ncookies = 0; 976 } 977 ddi_dma_free_handle(&dma_p->dma_hdl); 978 dma_p->dma_hdl = NULL; 979 } 980 981 if (dma_p->acc_hdl != NULL) { 982 ddi_dma_mem_free(&dma_p->acc_hdl); 983 dma_p->acc_hdl = NULL; 984 } 985 } 986 987 /* 988 * 989 */ 990 static int 991 iwk_alloc_fw_dma(iwk_sc_t *sc) 992 { 993 int err = DDI_SUCCESS; 994 iwk_dma_t *dma_p; 995 char *t; 996 997 /* 998 * firmware image layout: 999 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->| 1000 */ 1001 t = (char *)(sc->sc_hdr + 1); 1002 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz), 1003 &fw_dma_attr, &iwk_dma_accattr, 1004 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1005 &sc->sc_dma_fw_text); 1006 dma_p = &sc->sc_dma_fw_text; 1007 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n", 1008 dma_p->ncookies, dma_p->cookie.dmac_address, 1009 dma_p->cookie.dmac_size)); 1010 if (err != DDI_SUCCESS) { 1011 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1012 " text dma memory"); 1013 goto fail; 1014 } 1015 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz)); 1016 1017 t += LE_32(sc->sc_hdr->textsz); 1018 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1019 &fw_dma_attr, &iwk_dma_accattr, 1020 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1021 &sc->sc_dma_fw_data); 1022 dma_p = &sc->sc_dma_fw_data; 1023 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n", 1024 dma_p->ncookies, dma_p->cookie.dmac_address, 1025 dma_p->cookie.dmac_size)); 1026 if (err != DDI_SUCCESS) { 1027 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1028 " data dma memory"); 1029 goto fail; 1030 } 1031 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1032 1033 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1034 &fw_dma_attr, &iwk_dma_accattr, 1035 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1036 &sc->sc_dma_fw_data_bak); 1037 dma_p = &sc->sc_dma_fw_data_bak; 1038 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx " 1039 "size:%lx]\n", 1040 dma_p->ncookies, dma_p->cookie.dmac_address, 1041 dma_p->cookie.dmac_size)); 1042 if (err != DDI_SUCCESS) { 1043 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1044 " data bakeup dma memory"); 1045 goto fail; 1046 } 1047 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1048 1049 t += LE_32(sc->sc_hdr->datasz); 1050 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz), 1051 &fw_dma_attr, &iwk_dma_accattr, 1052 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1053 &sc->sc_dma_fw_init_text); 1054 dma_p = &sc->sc_dma_fw_init_text; 1055 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx " 1056 "size:%lx]\n", 1057 dma_p->ncookies, dma_p->cookie.dmac_address, 1058 dma_p->cookie.dmac_size)); 1059 if (err != DDI_SUCCESS) { 1060 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1061 "init text dma memory"); 1062 goto fail; 1063 } 1064 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz)); 1065 1066 t += LE_32(sc->sc_hdr->init_textsz); 1067 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz), 1068 &fw_dma_attr, &iwk_dma_accattr, 1069 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1070 &sc->sc_dma_fw_init_data); 1071 dma_p = &sc->sc_dma_fw_init_data; 1072 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx " 1073 "size:%lx]\n", 1074 dma_p->ncookies, dma_p->cookie.dmac_address, 1075 dma_p->cookie.dmac_size)); 1076 if (err != DDI_SUCCESS) { 1077 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1078 "init data dma memory"); 1079 goto fail; 1080 } 1081 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz)); 1082 1083 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz); 1084 fail: 1085 return (err); 1086 } 1087 1088 static void 1089 iwk_free_fw_dma(iwk_sc_t *sc) 1090 { 1091 iwk_free_dma_mem(&sc->sc_dma_fw_text); 1092 iwk_free_dma_mem(&sc->sc_dma_fw_data); 1093 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak); 1094 iwk_free_dma_mem(&sc->sc_dma_fw_init_text); 1095 iwk_free_dma_mem(&sc->sc_dma_fw_init_data); 1096 } 1097 1098 /* 1099 * Allocate a shared page between host and NIC. 1100 */ 1101 static int 1102 iwk_alloc_shared(iwk_sc_t *sc) 1103 { 1104 iwk_dma_t *dma_p; 1105 int err = DDI_SUCCESS; 1106 1107 /* must be aligned on a 4K-page boundary */ 1108 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t), 1109 &sh_dma_attr, &iwk_dma_accattr, 1110 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1111 &sc->sc_dma_sh); 1112 if (err != DDI_SUCCESS) 1113 goto fail; 1114 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va; 1115 1116 dma_p = &sc->sc_dma_sh; 1117 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n", 1118 dma_p->ncookies, dma_p->cookie.dmac_address, 1119 dma_p->cookie.dmac_size)); 1120 1121 return (err); 1122 fail: 1123 iwk_free_shared(sc); 1124 return (err); 1125 } 1126 1127 static void 1128 iwk_free_shared(iwk_sc_t *sc) 1129 { 1130 iwk_free_dma_mem(&sc->sc_dma_sh); 1131 } 1132 1133 /* 1134 * Allocate a keep warm page. 1135 */ 1136 static int 1137 iwk_alloc_kw(iwk_sc_t *sc) 1138 { 1139 iwk_dma_t *dma_p; 1140 int err = DDI_SUCCESS; 1141 1142 /* must be aligned on a 4K-page boundary */ 1143 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE, 1144 &kw_dma_attr, &iwk_dma_accattr, 1145 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1146 &sc->sc_dma_kw); 1147 if (err != DDI_SUCCESS) 1148 goto fail; 1149 1150 dma_p = &sc->sc_dma_kw; 1151 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n", 1152 dma_p->ncookies, dma_p->cookie.dmac_address, 1153 dma_p->cookie.dmac_size)); 1154 1155 return (err); 1156 fail: 1157 iwk_free_kw(sc); 1158 return (err); 1159 } 1160 1161 static void 1162 iwk_free_kw(iwk_sc_t *sc) 1163 { 1164 iwk_free_dma_mem(&sc->sc_dma_kw); 1165 } 1166 1167 static int 1168 iwk_alloc_rx_ring(iwk_sc_t *sc) 1169 { 1170 iwk_rx_ring_t *ring; 1171 iwk_rx_data_t *data; 1172 iwk_dma_t *dma_p; 1173 int i, err = DDI_SUCCESS; 1174 1175 ring = &sc->sc_rxq; 1176 ring->cur = 0; 1177 1178 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t), 1179 &ring_desc_dma_attr, &iwk_dma_accattr, 1180 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1181 &ring->dma_desc); 1182 if (err != DDI_SUCCESS) { 1183 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n"); 1184 goto fail; 1185 } 1186 ring->desc = (uint32_t *)ring->dma_desc.mem_va; 1187 dma_p = &ring->dma_desc; 1188 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n", 1189 dma_p->ncookies, dma_p->cookie.dmac_address, 1190 dma_p->cookie.dmac_size)); 1191 1192 /* 1193 * Allocate Rx buffers. 1194 */ 1195 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1196 data = &ring->data[i]; 1197 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1198 &rx_buffer_dma_attr, &iwk_dma_accattr, 1199 DDI_DMA_READ | DDI_DMA_STREAMING, 1200 &data->dma_data); 1201 if (err != DDI_SUCCESS) { 1202 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] " 1203 "failed\n", i); 1204 goto fail; 1205 } 1206 /* 1207 * the physical address bit [8-36] are used, 1208 * instead of bit [0-31] in 3945. 1209 */ 1210 ring->desc[i] = LE_32((uint32_t) 1211 (data->dma_data.cookie.dmac_address >> 8)); 1212 } 1213 dma_p = &ring->data[0].dma_data; 1214 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx " 1215 "size:%lx]\n", 1216 dma_p->ncookies, dma_p->cookie.dmac_address, 1217 dma_p->cookie.dmac_size)); 1218 1219 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1220 1221 return (err); 1222 1223 fail: 1224 iwk_free_rx_ring(sc); 1225 return (err); 1226 } 1227 1228 static void 1229 iwk_reset_rx_ring(iwk_sc_t *sc) 1230 { 1231 int n; 1232 1233 iwk_mac_access_enter(sc); 1234 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 1235 for (n = 0; n < 2000; n++) { 1236 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) 1237 break; 1238 DELAY(1000); 1239 } 1240 #ifdef DEBUG 1241 if (n == 2000) 1242 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n")); 1243 #endif 1244 iwk_mac_access_exit(sc); 1245 1246 sc->sc_rxq.cur = 0; 1247 } 1248 1249 static void 1250 iwk_free_rx_ring(iwk_sc_t *sc) 1251 { 1252 int i; 1253 1254 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1255 if (sc->sc_rxq.data[i].dma_data.dma_hdl) 1256 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data, 1257 DDI_DMA_SYNC_FORCPU); 1258 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data); 1259 } 1260 1261 if (sc->sc_rxq.dma_desc.dma_hdl) 1262 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV); 1263 iwk_free_dma_mem(&sc->sc_rxq.dma_desc); 1264 } 1265 1266 static int 1267 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring, 1268 int slots, int qid) 1269 { 1270 iwk_tx_data_t *data; 1271 iwk_tx_desc_t *desc_h; 1272 uint32_t paddr_desc_h; 1273 iwk_cmd_t *cmd_h; 1274 uint32_t paddr_cmd_h; 1275 iwk_dma_t *dma_p; 1276 int i, err = DDI_SUCCESS; 1277 1278 ring->qid = qid; 1279 ring->count = TFD_QUEUE_SIZE_MAX; 1280 ring->window = slots; 1281 ring->queued = 0; 1282 ring->cur = 0; 1283 1284 err = iwk_alloc_dma_mem(sc, 1285 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t), 1286 &ring_desc_dma_attr, &iwk_dma_accattr, 1287 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1288 &ring->dma_desc); 1289 if (err != DDI_SUCCESS) { 1290 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] " 1291 "failed\n", qid); 1292 goto fail; 1293 } 1294 dma_p = &ring->dma_desc; 1295 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n", 1296 dma_p->ncookies, dma_p->cookie.dmac_address, 1297 dma_p->cookie.dmac_size)); 1298 1299 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va; 1300 paddr_desc_h = ring->dma_desc.cookie.dmac_address; 1301 1302 err = iwk_alloc_dma_mem(sc, 1303 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t), 1304 &cmd_dma_attr, &iwk_dma_accattr, 1305 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1306 &ring->dma_cmd); 1307 if (err != DDI_SUCCESS) { 1308 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] " 1309 "failed\n", qid); 1310 goto fail; 1311 } 1312 dma_p = &ring->dma_cmd; 1313 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n", 1314 dma_p->ncookies, dma_p->cookie.dmac_address, 1315 dma_p->cookie.dmac_size)); 1316 1317 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va; 1318 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address; 1319 1320 /* 1321 * Allocate Tx buffers. 1322 */ 1323 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX, 1324 KM_NOSLEEP); 1325 if (ring->data == NULL) { 1326 cmn_err(CE_WARN, "could not allocate tx data slots\n"); 1327 goto fail; 1328 } 1329 1330 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) { 1331 data = &ring->data[i]; 1332 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1333 &tx_buffer_dma_attr, &iwk_dma_accattr, 1334 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1335 &data->dma_data); 1336 if (err != DDI_SUCCESS) { 1337 cmn_err(CE_WARN, "dma alloc tx ring " 1338 "buf[%d] failed\n", i); 1339 goto fail; 1340 } 1341 1342 data->desc = desc_h + i; 1343 data->paddr_desc = paddr_desc_h + 1344 _PTRDIFF(data->desc, desc_h); 1345 data->cmd = cmd_h + i; /* (i % slots); */ 1346 /* ((i % slots) * sizeof (iwk_cmd_t)); */ 1347 data->paddr_cmd = paddr_cmd_h + 1348 _PTRDIFF(data->cmd, cmd_h); 1349 } 1350 dma_p = &ring->data[0].dma_data; 1351 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx " 1352 "size:%lx]\n", 1353 dma_p->ncookies, dma_p->cookie.dmac_address, 1354 dma_p->cookie.dmac_size)); 1355 1356 return (err); 1357 1358 fail: 1359 if (ring->data) 1360 kmem_free(ring->data, 1361 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX); 1362 iwk_free_tx_ring(sc, ring); 1363 return (err); 1364 } 1365 1366 static void 1367 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1368 { 1369 iwk_tx_data_t *data; 1370 int i, n; 1371 1372 iwk_mac_access_enter(sc); 1373 1374 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0); 1375 for (n = 0; n < 200; n++) { 1376 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) & 1377 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) 1378 break; 1379 DELAY(10); 1380 } 1381 if (n == 200) { 1382 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n", 1383 ring->qid)); 1384 } 1385 iwk_mac_access_exit(sc); 1386 1387 for (i = 0; i < ring->count; i++) { 1388 data = &ring->data[i]; 1389 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 1390 } 1391 1392 ring->queued = 0; 1393 ring->cur = 0; 1394 } 1395 1396 /*ARGSUSED*/ 1397 static void 1398 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1399 { 1400 int i; 1401 1402 if (ring->dma_desc.dma_hdl != NULL) 1403 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1404 iwk_free_dma_mem(&ring->dma_desc); 1405 1406 if (ring->dma_cmd.dma_hdl != NULL) 1407 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV); 1408 iwk_free_dma_mem(&ring->dma_cmd); 1409 1410 if (ring->data != NULL) { 1411 for (i = 0; i < ring->count; i++) { 1412 if (ring->data[i].dma_data.dma_hdl) 1413 IWK_DMA_SYNC(ring->data[i].dma_data, 1414 DDI_DMA_SYNC_FORDEV); 1415 iwk_free_dma_mem(&ring->data[i].dma_data); 1416 } 1417 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t)); 1418 } 1419 } 1420 1421 static int 1422 iwk_ring_init(iwk_sc_t *sc) 1423 { 1424 int i, err = DDI_SUCCESS; 1425 1426 for (i = 0; i < IWK_NUM_QUEUES; i++) { 1427 if (i == IWK_CMD_QUEUE_NUM) 1428 continue; 1429 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS, 1430 i); 1431 if (err != DDI_SUCCESS) 1432 goto fail; 1433 } 1434 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM], 1435 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM); 1436 if (err != DDI_SUCCESS) 1437 goto fail; 1438 err = iwk_alloc_rx_ring(sc); 1439 if (err != DDI_SUCCESS) 1440 goto fail; 1441 return (err); 1442 1443 fail: 1444 return (err); 1445 } 1446 1447 static void 1448 iwk_ring_free(iwk_sc_t *sc) 1449 { 1450 int i = IWK_NUM_QUEUES; 1451 1452 iwk_free_rx_ring(sc); 1453 while (--i >= 0) { 1454 iwk_free_tx_ring(sc, &sc->sc_txq[i]); 1455 } 1456 } 1457 1458 /* ARGSUSED */ 1459 static ieee80211_node_t * 1460 iwk_node_alloc(ieee80211com_t *ic) 1461 { 1462 iwk_amrr_t *amrr; 1463 1464 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP); 1465 if (amrr != NULL) 1466 iwk_amrr_init(amrr); 1467 return (&amrr->in); 1468 } 1469 1470 static void 1471 iwk_node_free(ieee80211_node_t *in) 1472 { 1473 ieee80211com_t *ic = in->in_ic; 1474 1475 ic->ic_node_cleanup(in); 1476 if (in->in_wpa_ie != NULL) 1477 ieee80211_free(in->in_wpa_ie); 1478 kmem_free(in, sizeof (iwk_amrr_t)); 1479 } 1480 1481 /*ARGSUSED*/ 1482 static int 1483 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1484 { 1485 iwk_sc_t *sc = (iwk_sc_t *)ic; 1486 ieee80211_node_t *in = ic->ic_bss; 1487 enum ieee80211_state ostate = ic->ic_state; 1488 int i, err = IWK_SUCCESS; 1489 1490 mutex_enter(&sc->sc_glock); 1491 switch (nstate) { 1492 case IEEE80211_S_SCAN: 1493 switch (ostate) { 1494 case IEEE80211_S_INIT: 1495 { 1496 iwk_add_sta_t node; 1497 1498 sc->sc_flags |= IWK_F_SCANNING; 1499 iwk_set_led(sc, 2, 10, 2); 1500 1501 /* 1502 * clear association to receive beacons from 1503 * all BSS'es 1504 */ 1505 sc->sc_config.assoc_id = 0; 1506 sc->sc_config.filter_flags &= 1507 ~LE_32(RXON_FILTER_ASSOC_MSK); 1508 1509 IWK_DBG((IWK_DEBUG_80211, "config chan %d " 1510 "flags %x filter_flags %x\n", sc->sc_config.chan, 1511 sc->sc_config.flags, sc->sc_config.filter_flags)); 1512 1513 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1514 sizeof (iwk_rxon_cmd_t), 1); 1515 if (err != IWK_SUCCESS) { 1516 cmn_err(CE_WARN, 1517 "could not clear association\n"); 1518 sc->sc_flags &= ~IWK_F_SCANNING; 1519 mutex_exit(&sc->sc_glock); 1520 return (err); 1521 } 1522 1523 /* add broadcast node to send probe request */ 1524 (void) memset(&node, 0, sizeof (node)); 1525 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN); 1526 node.id = IWK_BROADCAST_ID; 1527 err = iwk_cmd(sc, REPLY_ADD_STA, &node, 1528 sizeof (node), 1); 1529 if (err != IWK_SUCCESS) { 1530 cmn_err(CE_WARN, "could not add " 1531 "broadcast node\n"); 1532 sc->sc_flags &= ~IWK_F_SCANNING; 1533 mutex_exit(&sc->sc_glock); 1534 return (err); 1535 } 1536 break; 1537 } 1538 case IEEE80211_S_SCAN: 1539 mutex_exit(&sc->sc_glock); 1540 /* step to next channel before actual FW scan */ 1541 err = sc->sc_newstate(ic, nstate, arg); 1542 mutex_enter(&sc->sc_glock); 1543 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) { 1544 cmn_err(CE_WARN, 1545 "could not initiate scan\n"); 1546 sc->sc_flags &= ~IWK_F_SCANNING; 1547 ieee80211_cancel_scan(ic); 1548 } 1549 mutex_exit(&sc->sc_glock); 1550 return (err); 1551 default: 1552 break; 1553 1554 } 1555 sc->sc_clk = 0; 1556 break; 1557 1558 case IEEE80211_S_AUTH: 1559 if (ostate == IEEE80211_S_SCAN) { 1560 sc->sc_flags &= ~IWK_F_SCANNING; 1561 } 1562 1563 /* reset state to handle reassociations correctly */ 1564 sc->sc_config.assoc_id = 0; 1565 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 1566 1567 /* 1568 * before sending authentication and association request frame, 1569 * we need do something in the hardware, such as setting the 1570 * channel same to the target AP... 1571 */ 1572 if ((err = iwk_hw_set_before_auth(sc)) != 0) { 1573 cmn_err(CE_WARN, "could not setup firmware for " 1574 "authentication\n"); 1575 mutex_exit(&sc->sc_glock); 1576 return (err); 1577 } 1578 break; 1579 1580 case IEEE80211_S_RUN: 1581 if (ostate == IEEE80211_S_SCAN) { 1582 sc->sc_flags &= ~IWK_F_SCANNING; 1583 } 1584 1585 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 1586 /* let LED blink when monitoring */ 1587 iwk_set_led(sc, 2, 10, 10); 1588 break; 1589 } 1590 IWK_DBG((IWK_DEBUG_80211, "iwk: associated.")); 1591 1592 /* none IBSS mode */ 1593 if (ic->ic_opmode != IEEE80211_M_IBSS) { 1594 /* update adapter's configuration */ 1595 if (sc->sc_assoc_id != in->in_associd) { 1596 cmn_err(CE_WARN, 1597 "associate ID mismatch: expected %d, " 1598 "got %d\n", 1599 in->in_associd, sc->sc_assoc_id); 1600 } 1601 sc->sc_config.assoc_id = in->in_associd & 0x3fff; 1602 /* 1603 * short preamble/slot time are 1604 * negotiated when associating 1605 */ 1606 sc->sc_config.flags &= 1607 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 1608 RXON_FLG_SHORT_SLOT_MSK); 1609 1610 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1611 sc->sc_config.flags |= 1612 LE_32(RXON_FLG_SHORT_SLOT_MSK); 1613 1614 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 1615 sc->sc_config.flags |= 1616 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 1617 1618 sc->sc_config.filter_flags |= 1619 LE_32(RXON_FILTER_ASSOC_MSK); 1620 1621 if (ic->ic_opmode != IEEE80211_M_STA) 1622 sc->sc_config.filter_flags |= 1623 LE_32(RXON_FILTER_BCON_AWARE_MSK); 1624 1625 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x" 1626 " filter_flags %x\n", 1627 sc->sc_config.chan, sc->sc_config.flags, 1628 sc->sc_config.filter_flags)); 1629 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1630 sizeof (iwk_rxon_cmd_t), 1); 1631 if (err != IWK_SUCCESS) { 1632 cmn_err(CE_WARN, "could not update " 1633 "configuration\n"); 1634 mutex_exit(&sc->sc_glock); 1635 return (err); 1636 } 1637 } 1638 1639 /* obtain current temperature of chipset */ 1640 sc->sc_tempera = iwk_curr_tempera(sc); 1641 1642 /* 1643 * make Tx power calibration to determine 1644 * the gains of DSP and radio 1645 */ 1646 err = iwk_tx_power_calibration(sc); 1647 if (err) { 1648 cmn_err(CE_WARN, "iwk_newstate(): " 1649 "failed to set tx power table\n"); 1650 return (err); 1651 } 1652 1653 /* start automatic rate control */ 1654 mutex_enter(&sc->sc_mt_lock); 1655 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1656 sc->sc_flags |= IWK_F_RATE_AUTO_CTL; 1657 /* set rate to some reasonable initial value */ 1658 i = in->in_rates.ir_nrates - 1; 1659 while (i > 0 && IEEE80211_RATE(i) > 72) 1660 i--; 1661 in->in_txrate = i; 1662 } else { 1663 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 1664 } 1665 mutex_exit(&sc->sc_mt_lock); 1666 1667 /* set LED on after associated */ 1668 iwk_set_led(sc, 2, 0, 1); 1669 break; 1670 1671 case IEEE80211_S_INIT: 1672 if (ostate == IEEE80211_S_SCAN) { 1673 sc->sc_flags &= ~IWK_F_SCANNING; 1674 } 1675 1676 /* set LED off after init */ 1677 iwk_set_led(sc, 2, 1, 0); 1678 break; 1679 case IEEE80211_S_ASSOC: 1680 if (ostate == IEEE80211_S_SCAN) { 1681 sc->sc_flags &= ~IWK_F_SCANNING; 1682 } 1683 1684 break; 1685 } 1686 1687 mutex_exit(&sc->sc_glock); 1688 1689 err = sc->sc_newstate(ic, nstate, arg); 1690 1691 if (nstate == IEEE80211_S_RUN) { 1692 1693 mutex_enter(&sc->sc_glock); 1694 1695 /* 1696 * make initialization for Receiver 1697 * sensitivity calibration 1698 */ 1699 err = iwk_rx_sens_init(sc); 1700 if (err) { 1701 cmn_err(CE_WARN, "iwk_newstate(): " 1702 "failed to init RX sensitivity\n"); 1703 mutex_exit(&sc->sc_glock); 1704 return (err); 1705 } 1706 1707 /* make initialization for Receiver gain balance */ 1708 err = iwk_rxgain_diff_init(sc); 1709 if (err) { 1710 cmn_err(CE_WARN, "iwk_newstate(): " 1711 "failed to init phy calibration\n"); 1712 mutex_exit(&sc->sc_glock); 1713 return (err); 1714 } 1715 1716 mutex_exit(&sc->sc_glock); 1717 1718 } 1719 1720 return (err); 1721 } 1722 1723 /*ARGSUSED*/ 1724 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k, 1725 const uint8_t mac[IEEE80211_ADDR_LEN]) 1726 { 1727 iwk_sc_t *sc = (iwk_sc_t *)ic; 1728 iwk_add_sta_t node; 1729 int err; 1730 1731 switch (k->wk_cipher->ic_cipher) { 1732 case IEEE80211_CIPHER_WEP: 1733 case IEEE80211_CIPHER_TKIP: 1734 return (1); /* sofeware do it. */ 1735 case IEEE80211_CIPHER_AES_CCM: 1736 break; 1737 default: 1738 return (0); 1739 } 1740 sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK | 1741 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 1742 1743 mutex_enter(&sc->sc_glock); 1744 1745 /* update ap/multicast node */ 1746 (void) memset(&node, 0, sizeof (node)); 1747 if (IEEE80211_IS_MULTICAST(mac)) { 1748 (void) memset(node.bssid, 0xff, 6); 1749 node.id = IWK_BROADCAST_ID; 1750 } else { 1751 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid); 1752 node.id = IWK_AP_ID; 1753 } 1754 if (k->wk_flags & IEEE80211_KEY_XMIT) { 1755 node.key_flags = 0; 1756 node.keyp = k->wk_keyix; 1757 } else { 1758 node.key_flags = (1 << 14); 1759 node.keyp = k->wk_keyix + 4; 1760 } 1761 (void) memcpy(node.key, k->wk_key, k->wk_keylen); 1762 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8)); 1763 node.sta_mask = STA_MODIFY_KEY_MASK; 1764 node.control = 1; 1765 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 1766 if (err != IWK_SUCCESS) { 1767 cmn_err(CE_WARN, "iwk_key_set():" 1768 "failed to update ap node\n"); 1769 mutex_exit(&sc->sc_glock); 1770 return (0); 1771 } 1772 mutex_exit(&sc->sc_glock); 1773 return (1); 1774 } 1775 1776 /* 1777 * exclusive access to mac begin. 1778 */ 1779 static void 1780 iwk_mac_access_enter(iwk_sc_t *sc) 1781 { 1782 uint32_t tmp; 1783 int n; 1784 1785 tmp = IWK_READ(sc, CSR_GP_CNTRL); 1786 IWK_WRITE(sc, CSR_GP_CNTRL, 1787 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1788 1789 /* wait until we succeed */ 1790 for (n = 0; n < 1000; n++) { 1791 if ((IWK_READ(sc, CSR_GP_CNTRL) & 1792 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1793 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) == 1794 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) 1795 break; 1796 DELAY(10); 1797 } 1798 if (n == 1000) 1799 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n")); 1800 } 1801 1802 /* 1803 * exclusive access to mac end. 1804 */ 1805 static void 1806 iwk_mac_access_exit(iwk_sc_t *sc) 1807 { 1808 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL); 1809 IWK_WRITE(sc, CSR_GP_CNTRL, 1810 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1811 } 1812 1813 static uint32_t 1814 iwk_mem_read(iwk_sc_t *sc, uint32_t addr) 1815 { 1816 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr); 1817 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT)); 1818 } 1819 1820 static void 1821 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1822 { 1823 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr); 1824 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data); 1825 } 1826 1827 static uint32_t 1828 iwk_reg_read(iwk_sc_t *sc, uint32_t addr) 1829 { 1830 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); 1831 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT)); 1832 } 1833 1834 static void 1835 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1836 { 1837 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24)); 1838 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data); 1839 } 1840 1841 static void 1842 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr, 1843 uint32_t *data, int wlen) 1844 { 1845 for (; wlen > 0; wlen--, data++, addr += 4) 1846 iwk_reg_write(sc, addr, *data); 1847 } 1848 1849 1850 /* 1851 * ucode load/initialization steps: 1852 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image. 1853 * BSM contains a small memory that *always* stays powered up, so it can 1854 * retain the bootstrap program even when the card is in a power-saving 1855 * power-down state. The BSM loads the small program into ARC processor's 1856 * instruction memory when triggered by power-up. 1857 * 2) load Initialize image via bootstrap program. 1858 * The Initialize image sets up regulatory and calibration data for the 1859 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed. 1860 * The 4965 reply contains calibration data for temperature, voltage and tx gain 1861 * correction. 1862 */ 1863 static int 1864 iwk_load_firmware(iwk_sc_t *sc) 1865 { 1866 uint32_t *boot_fw = (uint32_t *)sc->sc_boot; 1867 uint32_t size = sc->sc_hdr->bootsz; 1868 int n, err = IWK_SUCCESS; 1869 1870 /* 1871 * The physical address bit [4-35] of the initialize uCode. 1872 * In the initialize alive notify interrupt the physical address of 1873 * the runtime ucode will be set for loading. 1874 */ 1875 iwk_mac_access_enter(sc); 1876 1877 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 1878 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4); 1879 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 1880 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4); 1881 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 1882 sc->sc_dma_fw_init_text.cookie.dmac_size); 1883 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 1884 sc->sc_dma_fw_init_data.cookie.dmac_size); 1885 1886 /* load bootstrap code into BSM memory */ 1887 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw, 1888 size / sizeof (uint32_t)); 1889 1890 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0); 1891 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); 1892 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t)); 1893 1894 /* 1895 * prepare to load initialize uCode 1896 */ 1897 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 1898 1899 /* wait while the adapter is busy loading the firmware */ 1900 for (n = 0; n < 1000; n++) { 1901 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) & 1902 BSM_WR_CTRL_REG_BIT_START)) 1903 break; 1904 DELAY(10); 1905 } 1906 if (n == 1000) { 1907 cmn_err(CE_WARN, "timeout transferring firmware\n"); 1908 err = ETIMEDOUT; 1909 return (err); 1910 } 1911 1912 /* for future power-save mode use */ 1913 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 1914 1915 iwk_mac_access_exit(sc); 1916 1917 return (err); 1918 } 1919 1920 /*ARGSUSED*/ 1921 static void 1922 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 1923 { 1924 ieee80211com_t *ic = &sc->sc_ic; 1925 iwk_rx_ring_t *ring = &sc->sc_rxq; 1926 iwk_rx_phy_res_t *stat; 1927 ieee80211_node_t *in; 1928 uint32_t *tail; 1929 struct ieee80211_frame *wh; 1930 mblk_t *mp; 1931 uint16_t len, rssi, mrssi, agc; 1932 int16_t t; 1933 uint32_t ants, i; 1934 struct iwk_rx_non_cfg_phy *phyinfo; 1935 1936 /* assuming not 11n here. cope with 11n in phase-II */ 1937 stat = (iwk_rx_phy_res_t *)(desc + 1); 1938 if (stat->cfg_phy_cnt > 20) { 1939 return; 1940 } 1941 1942 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy; 1943 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS; 1944 mrssi = 0; 1945 ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >> 1946 RX_PHY_FLAGS_ANTENNAE_OFFSET; 1947 for (i = 0; i < 3; i++) { 1948 if (ants & (1 << i)) 1949 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]); 1950 } 1951 t = mrssi - agc - 44; /* t is the dBM value */ 1952 /* 1953 * convert dBm to percentage ??? 1954 */ 1955 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) / 1956 (75 * 75); 1957 if (rssi > 100) 1958 rssi = 100; 1959 if (rssi < 1) 1960 rssi = 1; 1961 len = stat->byte_count; 1962 tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len); 1963 1964 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d " 1965 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x " 1966 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat), 1967 len, stat->rate.r.s.rate, stat->channel, 1968 LE_32(stat->timestampl), stat->non_cfg_phy_cnt, 1969 stat->cfg_phy_cnt, LE_32(*tail))); 1970 1971 if ((len < 16) || (len > sc->sc_dmabuf_sz)) { 1972 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n")); 1973 return; 1974 } 1975 1976 /* 1977 * discard Rx frames with bad CRC 1978 */ 1979 if ((LE_32(*tail) & 1980 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) != 1981 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) { 1982 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n", 1983 LE_32(*tail))); 1984 sc->sc_rx_err++; 1985 return; 1986 } 1987 1988 wh = (struct ieee80211_frame *) 1989 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt); 1990 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) { 1991 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2); 1992 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n", 1993 sc->sc_assoc_id)); 1994 } 1995 #ifdef DEBUG 1996 if (iwk_dbg_flags & IWK_DEBUG_RX) 1997 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0); 1998 #endif 1999 in = ieee80211_find_rxnode(ic, wh); 2000 mp = allocb(len, BPRI_MED); 2001 if (mp) { 2002 (void) memcpy(mp->b_wptr, wh, len); 2003 mp->b_wptr += len; 2004 2005 /* send the frame to the 802.11 layer */ 2006 (void) ieee80211_input(ic, mp, in, rssi, 0); 2007 } else { 2008 sc->sc_rx_nobuf++; 2009 IWK_DBG((IWK_DEBUG_RX, 2010 "iwk_rx_intr(): alloc rx buf failed\n")); 2011 } 2012 /* release node reference */ 2013 ieee80211_free_node(in); 2014 } 2015 2016 /*ARGSUSED*/ 2017 static void 2018 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 2019 { 2020 ieee80211com_t *ic = &sc->sc_ic; 2021 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3]; 2022 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1); 2023 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss; 2024 2025 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d" 2026 " retries=%d frame_count=%x nkill=%d " 2027 "rate=%x duration=%d status=%x\n", 2028 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count, 2029 stat->bt_kill_count, stat->rate.r.s.rate, 2030 LE_32(stat->duration), LE_32(stat->status))); 2031 2032 amrr->txcnt++; 2033 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt)); 2034 if (stat->ntries > 0) { 2035 amrr->retrycnt++; 2036 sc->sc_tx_retries++; 2037 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n", 2038 sc->sc_tx_retries)); 2039 } 2040 2041 sc->sc_tx_timer = 0; 2042 2043 mutex_enter(&sc->sc_tx_lock); 2044 ring->queued--; 2045 if (ring->queued < 0) 2046 ring->queued = 0; 2047 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) { 2048 sc->sc_need_reschedule = 0; 2049 mutex_exit(&sc->sc_tx_lock); 2050 mac_tx_update(ic->ic_mach); 2051 mutex_enter(&sc->sc_tx_lock); 2052 } 2053 mutex_exit(&sc->sc_tx_lock); 2054 } 2055 2056 static void 2057 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2058 { 2059 if ((desc->hdr.qid & 7) != 4) { 2060 return; 2061 } 2062 mutex_enter(&sc->sc_glock); 2063 sc->sc_flags |= IWK_F_CMD_DONE; 2064 cv_signal(&sc->sc_cmd_cv); 2065 mutex_exit(&sc->sc_glock); 2066 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: " 2067 "qid=%x idx=%d flags=%x type=0x%x\n", 2068 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags, 2069 desc->hdr.type)); 2070 } 2071 2072 static void 2073 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2074 { 2075 uint32_t base, i; 2076 struct iwk_alive_resp *ar = 2077 (struct iwk_alive_resp *)(desc + 1); 2078 2079 /* the microcontroller is ready */ 2080 IWK_DBG((IWK_DEBUG_FW, 2081 "microcode alive notification minor: %x major: %x type:" 2082 " %x subtype: %x\n", 2083 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype)); 2084 2085 if (LE_32(ar->is_valid) != UCODE_VALID_OK) { 2086 IWK_DBG((IWK_DEBUG_FW, 2087 "microcontroller initialization failed\n")); 2088 } 2089 if (ar->ver_subtype == INITIALIZE_SUBTYPE) { 2090 IWK_DBG((IWK_DEBUG_FW, 2091 "initialization alive received.\n")); 2092 (void) memcpy(&sc->sc_card_alive_init, ar, 2093 sizeof (struct iwk_init_alive_resp)); 2094 /* XXX get temperature */ 2095 iwk_mac_access_enter(sc); 2096 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 2097 sc->sc_dma_fw_text.cookie.dmac_address >> 4); 2098 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 2099 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4); 2100 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 2101 sc->sc_dma_fw_data.cookie.dmac_size); 2102 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 2103 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000); 2104 iwk_mac_access_exit(sc); 2105 } else { 2106 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n")); 2107 (void) memcpy(&sc->sc_card_alive_run, ar, 2108 sizeof (struct iwk_alive_resp)); 2109 2110 /* 2111 * Init SCD related registers to make Tx work. XXX 2112 */ 2113 iwk_mac_access_enter(sc); 2114 2115 /* read sram address of data base */ 2116 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR); 2117 2118 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */ 2119 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0; 2120 i < 128; i += 4) 2121 iwk_mem_write(sc, base + i, 0); 2122 2123 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */ 2124 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET; 2125 i < 256; i += 4) 2126 iwk_mem_write(sc, base + i, 0); 2127 2128 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */ 2129 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET; 2130 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4) 2131 iwk_mem_write(sc, base + i, 0); 2132 2133 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR, 2134 sc->sc_dma_sh.cookie.dmac_address >> 10); 2135 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0); 2136 2137 /* initiate the tx queues */ 2138 for (i = 0; i < IWK_NUM_QUEUES; i++) { 2139 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0); 2140 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8)); 2141 iwk_mem_write(sc, sc->sc_scd_base + 2142 SCD_CONTEXT_QUEUE_OFFSET(i), 2143 (SCD_WIN_SIZE & 0x7f)); 2144 iwk_mem_write(sc, sc->sc_scd_base + 2145 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t), 2146 (SCD_FRAME_LIMIT & 0x7f) << 16); 2147 } 2148 /* interrupt enable on each queue0-7 */ 2149 iwk_reg_write(sc, SCD_INTERRUPT_MASK, 2150 (1 << IWK_NUM_QUEUES) - 1); 2151 /* enable each channel 0-7 */ 2152 iwk_reg_write(sc, SCD_TXFACT, 2153 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 2154 /* 2155 * queue 0-7 maps to FIFO 0-7 and 2156 * all queues work under FIFO mode (none-scheduler-ack) 2157 */ 2158 for (i = 0; i < 7; i++) { 2159 iwk_reg_write(sc, 2160 SCD_QUEUE_STATUS_BITS(i), 2161 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2162 (i << SCD_QUEUE_STTS_REG_POS_TXF)| 2163 SCD_QUEUE_STTS_REG_MSK); 2164 } 2165 iwk_mac_access_exit(sc); 2166 2167 sc->sc_flags |= IWK_F_FW_INIT; 2168 cv_signal(&sc->sc_fw_cv); 2169 } 2170 2171 } 2172 2173 static uint_t 2174 /* LINTED: argument unused in function: unused */ 2175 iwk_rx_softintr(caddr_t arg, caddr_t unused) 2176 { 2177 iwk_sc_t *sc = (iwk_sc_t *)arg; 2178 ieee80211com_t *ic = &sc->sc_ic; 2179 iwk_rx_desc_t *desc; 2180 iwk_rx_data_t *data; 2181 uint32_t index; 2182 2183 mutex_enter(&sc->sc_glock); 2184 if (sc->sc_rx_softint_pending != 1) { 2185 mutex_exit(&sc->sc_glock); 2186 return (DDI_INTR_UNCLAIMED); 2187 } 2188 /* disable interrupts */ 2189 IWK_WRITE(sc, CSR_INT_MASK, 0); 2190 mutex_exit(&sc->sc_glock); 2191 2192 /* 2193 * firmware has moved the index of the rx queue, driver get it, 2194 * and deal with it. 2195 */ 2196 index = LE_32(sc->sc_shared->val0) & 0xfff; 2197 2198 while (sc->sc_rxq.cur != index) { 2199 data = &sc->sc_rxq.data[sc->sc_rxq.cur]; 2200 desc = (iwk_rx_desc_t *)data->dma_data.mem_va; 2201 2202 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d" 2203 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n", 2204 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx, 2205 desc->hdr.flags, desc->hdr.type, LE_32(desc->len))); 2206 2207 /* a command other than a tx need to be replied */ 2208 if (!(desc->hdr.qid & 0x80) && 2209 (desc->hdr.type != REPLY_RX_PHY_CMD) && 2210 (desc->hdr.type != REPLY_TX) && 2211 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) && 2212 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) && 2213 (desc->hdr.type != SENSITIVITY_CMD)) 2214 iwk_cmd_intr(sc, desc); 2215 2216 switch (desc->hdr.type) { 2217 case REPLY_4965_RX: 2218 iwk_rx_intr(sc, desc, data); 2219 break; 2220 2221 case REPLY_TX: 2222 iwk_tx_intr(sc, desc, data); 2223 break; 2224 2225 case REPLY_ALIVE: 2226 iwk_ucode_alive(sc, desc); 2227 break; 2228 2229 case CARD_STATE_NOTIFICATION: 2230 { 2231 uint32_t *status = (uint32_t *)(desc + 1); 2232 2233 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n", 2234 LE_32(*status))); 2235 2236 if (LE_32(*status) & 1) { 2237 /* 2238 * the radio button has to be pushed(OFF). It 2239 * is considered as a hw error, the 2240 * iwk_thread() tries to recover it after the 2241 * button is pushed again(ON) 2242 */ 2243 cmn_err(CE_NOTE, 2244 "iwk_rx_softintr(): " 2245 "Radio transmitter is off\n"); 2246 sc->sc_ostate = sc->sc_ic.ic_state; 2247 ieee80211_new_state(&sc->sc_ic, 2248 IEEE80211_S_INIT, -1); 2249 sc->sc_flags |= 2250 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF); 2251 } 2252 break; 2253 } 2254 case SCAN_START_NOTIFICATION: 2255 { 2256 iwk_start_scan_t *scan = 2257 (iwk_start_scan_t *)(desc + 1); 2258 2259 IWK_DBG((IWK_DEBUG_SCAN, 2260 "scanning channel %d status %x\n", 2261 scan->chan, LE_32(scan->status))); 2262 2263 ic->ic_curchan = &ic->ic_sup_channels[scan->chan]; 2264 break; 2265 } 2266 case SCAN_COMPLETE_NOTIFICATION: 2267 { 2268 iwk_stop_scan_t *scan = 2269 (iwk_stop_scan_t *)(desc + 1); 2270 2271 IWK_DBG((IWK_DEBUG_SCAN, 2272 "completed channel %d (burst of %d) status %02x\n", 2273 scan->chan, scan->nchan, scan->status)); 2274 2275 sc->sc_scan_pending++; 2276 break; 2277 } 2278 case STATISTICS_NOTIFICATION: 2279 /* handle statistics notification */ 2280 iwk_statistics_notify(sc, desc); 2281 break; 2282 } 2283 2284 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE; 2285 } 2286 2287 /* 2288 * driver dealt with what reveived in rx queue and tell the information 2289 * to the firmware. 2290 */ 2291 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1; 2292 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7)); 2293 2294 mutex_enter(&sc->sc_glock); 2295 /* re-enable interrupts */ 2296 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2297 sc->sc_rx_softint_pending = 0; 2298 mutex_exit(&sc->sc_glock); 2299 2300 return (DDI_INTR_CLAIMED); 2301 } 2302 2303 static uint_t 2304 /* LINTED: argument unused in function: unused */ 2305 iwk_intr(caddr_t arg, caddr_t unused) 2306 { 2307 iwk_sc_t *sc = (iwk_sc_t *)arg; 2308 uint32_t r, rfh; 2309 2310 mutex_enter(&sc->sc_glock); 2311 2312 if (sc->sc_flags & IWK_F_SUSPEND) { 2313 mutex_exit(&sc->sc_glock); 2314 return (DDI_INTR_UNCLAIMED); 2315 } 2316 2317 r = IWK_READ(sc, CSR_INT); 2318 if (r == 0 || r == 0xffffffff) { 2319 mutex_exit(&sc->sc_glock); 2320 return (DDI_INTR_UNCLAIMED); 2321 } 2322 2323 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r)); 2324 2325 rfh = IWK_READ(sc, CSR_FH_INT_STATUS); 2326 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh)); 2327 /* disable interrupts */ 2328 IWK_WRITE(sc, CSR_INT_MASK, 0); 2329 /* ack interrupts */ 2330 IWK_WRITE(sc, CSR_INT, r); 2331 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh); 2332 2333 if (sc->sc_soft_hdl == NULL) { 2334 mutex_exit(&sc->sc_glock); 2335 return (DDI_INTR_CLAIMED); 2336 } 2337 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) { 2338 cmn_err(CE_WARN, "fatal firmware error\n"); 2339 mutex_exit(&sc->sc_glock); 2340 #ifdef DEBUG 2341 /* dump event and error logs to dmesg */ 2342 iwk_write_error_log(sc); 2343 iwk_write_event_log(sc); 2344 #endif /* DEBUG */ 2345 iwk_stop(sc); 2346 sc->sc_ostate = sc->sc_ic.ic_state; 2347 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 2348 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2349 return (DDI_INTR_CLAIMED); 2350 } 2351 2352 if (r & BIT_INT_RF_KILL) { 2353 IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n")); 2354 } 2355 2356 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) || 2357 (rfh & FH_INT_RX_MASK)) { 2358 sc->sc_rx_softint_pending = 1; 2359 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL); 2360 } 2361 2362 if (r & BIT_INT_ALIVE) { 2363 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n")); 2364 } 2365 2366 /* re-enable interrupts */ 2367 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2368 mutex_exit(&sc->sc_glock); 2369 2370 return (DDI_INTR_CLAIMED); 2371 } 2372 2373 static uint8_t 2374 iwk_rate_to_plcp(int rate) 2375 { 2376 uint8_t ret; 2377 2378 switch (rate) { 2379 /* CCK rates */ 2380 case 2: 2381 ret = 0xa; 2382 break; 2383 case 4: 2384 ret = 0x14; 2385 break; 2386 case 11: 2387 ret = 0x37; 2388 break; 2389 case 22: 2390 ret = 0x6e; 2391 break; 2392 /* OFDM rates */ 2393 case 12: 2394 ret = 0xd; 2395 break; 2396 case 18: 2397 ret = 0xf; 2398 break; 2399 case 24: 2400 ret = 0x5; 2401 break; 2402 case 36: 2403 ret = 0x7; 2404 break; 2405 case 48: 2406 ret = 0x9; 2407 break; 2408 case 72: 2409 ret = 0xb; 2410 break; 2411 case 96: 2412 ret = 0x1; 2413 break; 2414 case 108: 2415 ret = 0x3; 2416 break; 2417 default: 2418 ret = 0; 2419 break; 2420 } 2421 return (ret); 2422 } 2423 2424 static mblk_t * 2425 iwk_m_tx(void *arg, mblk_t *mp) 2426 { 2427 iwk_sc_t *sc = (iwk_sc_t *)arg; 2428 ieee80211com_t *ic = &sc->sc_ic; 2429 mblk_t *next; 2430 2431 if (sc->sc_flags & IWK_F_SUSPEND) { 2432 freemsgchain(mp); 2433 return (NULL); 2434 } 2435 2436 if (ic->ic_state != IEEE80211_S_RUN) { 2437 freemsgchain(mp); 2438 return (NULL); 2439 } 2440 2441 while (mp != NULL) { 2442 next = mp->b_next; 2443 mp->b_next = NULL; 2444 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) { 2445 mp->b_next = next; 2446 break; 2447 } 2448 mp = next; 2449 } 2450 return (mp); 2451 } 2452 2453 /* ARGSUSED */ 2454 static int 2455 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 2456 { 2457 iwk_sc_t *sc = (iwk_sc_t *)ic; 2458 iwk_tx_ring_t *ring; 2459 iwk_tx_desc_t *desc; 2460 iwk_tx_data_t *data; 2461 iwk_cmd_t *cmd; 2462 iwk_tx_cmd_t *tx; 2463 ieee80211_node_t *in; 2464 struct ieee80211_frame *wh; 2465 struct ieee80211_key *k = NULL; 2466 mblk_t *m, *m0; 2467 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS; 2468 uint16_t masks = 0; 2469 2470 ring = &sc->sc_txq[0]; 2471 data = &ring->data[ring->cur]; 2472 desc = data->desc; 2473 cmd = data->cmd; 2474 bzero(desc, sizeof (*desc)); 2475 bzero(cmd, sizeof (*cmd)); 2476 2477 mutex_enter(&sc->sc_tx_lock); 2478 if (sc->sc_flags & IWK_F_SUSPEND) { 2479 mutex_exit(&sc->sc_tx_lock); 2480 if ((type & IEEE80211_FC0_TYPE_MASK) != 2481 IEEE80211_FC0_TYPE_DATA) { 2482 freemsg(mp); 2483 } 2484 err = IWK_FAIL; 2485 goto exit; 2486 } 2487 2488 if (ring->queued > ring->count - 64) { 2489 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n")); 2490 sc->sc_need_reschedule = 1; 2491 mutex_exit(&sc->sc_tx_lock); 2492 if ((type & IEEE80211_FC0_TYPE_MASK) != 2493 IEEE80211_FC0_TYPE_DATA) { 2494 freemsg(mp); 2495 } 2496 sc->sc_tx_nobuf++; 2497 err = IWK_FAIL; 2498 goto exit; 2499 } 2500 mutex_exit(&sc->sc_tx_lock); 2501 2502 hdrlen = sizeof (struct ieee80211_frame); 2503 2504 m = allocb(msgdsize(mp) + 32, BPRI_MED); 2505 if (m == NULL) { /* can not alloc buf, drop this package */ 2506 cmn_err(CE_WARN, 2507 "iwk_send(): failed to allocate msgbuf\n"); 2508 freemsg(mp); 2509 err = IWK_SUCCESS; 2510 goto exit; 2511 } 2512 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) { 2513 mblen = MBLKL(m0); 2514 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen); 2515 off += mblen; 2516 } 2517 m->b_wptr += off; 2518 freemsg(mp); 2519 2520 wh = (struct ieee80211_frame *)m->b_rptr; 2521 2522 in = ieee80211_find_txnode(ic, wh->i_addr1); 2523 if (in == NULL) { 2524 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n"); 2525 freemsg(m); 2526 sc->sc_tx_err++; 2527 err = IWK_SUCCESS; 2528 goto exit; 2529 } 2530 (void) ieee80211_encap(ic, m, in); 2531 2532 cmd->hdr.type = REPLY_TX; 2533 cmd->hdr.flags = 0; 2534 cmd->hdr.qid = ring->qid; 2535 cmd->hdr.idx = ring->cur; 2536 2537 tx = (iwk_tx_cmd_t *)cmd->data; 2538 tx->tx_flags = 0; 2539 2540 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2541 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK)); 2542 } else { 2543 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2544 } 2545 2546 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2547 k = ieee80211_crypto_encap(ic, m); 2548 if (k == NULL) { 2549 freemsg(m); 2550 sc->sc_tx_err++; 2551 err = IWK_SUCCESS; 2552 goto exit; 2553 } 2554 2555 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 2556 tx->sec_ctl = 2; /* for CCMP */ 2557 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2558 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen); 2559 } 2560 2561 /* packet header may have moved, reset our local pointer */ 2562 wh = (struct ieee80211_frame *)m->b_rptr; 2563 } 2564 2565 len = msgdsize(m); 2566 2567 #ifdef DEBUG 2568 if (iwk_dbg_flags & IWK_DEBUG_TX) 2569 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0); 2570 #endif 2571 2572 /* pickup a rate */ 2573 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2574 IEEE80211_FC0_TYPE_MGT) { 2575 /* mgmt frames are sent at 1M */ 2576 rate = in->in_rates.ir_rates[0]; 2577 } else { 2578 /* 2579 * do it here for the software way rate control. 2580 * later for rate scaling in hardware. 2581 * maybe like the following, for management frame: 2582 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; 2583 * for data frame: 2584 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK)); 2585 * rate = in->in_rates.ir_rates[in->in_txrate]; 2586 * tx->initial_rate_index = 1; 2587 * 2588 * now the txrate is determined in tx cmd flags, set to the 2589 * max value 54M for 11g and 11M for 11b. 2590 */ 2591 2592 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { 2593 rate = ic->ic_fixed_rate; 2594 } else { 2595 rate = in->in_rates.ir_rates[in->in_txrate]; 2596 } 2597 } 2598 rate &= IEEE80211_RATE_VAL; 2599 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x", 2600 in->in_txrate, in->in_rates.ir_nrates, rate)); 2601 2602 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK)); 2603 2604 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4); 2605 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen)) 2606 tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2607 2608 /* retrieve destination node's id */ 2609 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2610 tx->sta_id = IWK_BROADCAST_ID; 2611 } else { 2612 if (ic->ic_opmode != IEEE80211_M_IBSS) 2613 tx->sta_id = IWK_AP_ID; 2614 } 2615 2616 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2617 IEEE80211_FC0_TYPE_MGT) { 2618 /* tell h/w to set timestamp in probe responses */ 2619 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2620 IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2621 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK); 2622 2623 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2624 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) || 2625 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2626 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) 2627 tx->timeout.pm_frame_timeout = 3; 2628 else 2629 tx->timeout.pm_frame_timeout = 2; 2630 } else 2631 tx->timeout.pm_frame_timeout = 0; 2632 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 2633 masks |= RATE_MCS_CCK_MSK; 2634 2635 masks |= RATE_MCS_ANT_B_MSK; 2636 tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks); 2637 2638 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x", 2639 tx->tx_flags)); 2640 2641 tx->rts_retry_limit = 60; 2642 tx->data_retry_limit = 15; 2643 2644 tx->stop_time.life_time = LE_32(0xffffffff); 2645 2646 tx->len = LE_16(len); 2647 2648 tx->dram_lsb_ptr = 2649 data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch); 2650 tx->dram_msb_ptr = 0; 2651 tx->driver_txop = 0; 2652 tx->next_frame_len = 0; 2653 2654 (void) memcpy(tx + 1, m->b_rptr, hdrlen); 2655 m->b_rptr += hdrlen; 2656 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen); 2657 2658 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d", 2659 ring->qid, ring->cur, len)); 2660 2661 /* 2662 * first segment includes the tx cmd plus the 802.11 header, 2663 * the second includes the remaining of the 802.11 frame. 2664 */ 2665 desc->val0 = LE_32(2 << 24); 2666 desc->pa[0].tb1_addr = LE_32(data->paddr_cmd); 2667 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) | 2668 ((data->dma_data.cookie.dmac_address & 0xffff) << 16); 2669 desc->pa[0].val2 = 2670 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) | 2671 ((len - hdrlen) << 20); 2672 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x " 2673 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x", 2674 data->paddr_cmd, data->dma_data.cookie.dmac_address, 2675 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2)); 2676 2677 mutex_enter(&sc->sc_tx_lock); 2678 ring->queued++; 2679 mutex_exit(&sc->sc_tx_lock); 2680 2681 /* kick ring */ 2682 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2683 tfd_offset[ring->cur].val = 8 + len; 2684 if (ring->cur < IWK_MAX_WIN_SIZE) { 2685 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2686 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len; 2687 } 2688 2689 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 2690 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 2691 2692 ring->cur = (ring->cur + 1) % ring->count; 2693 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2694 freemsg(m); 2695 /* release node reference */ 2696 ieee80211_free_node(in); 2697 2698 ic->ic_stats.is_tx_bytes += len; 2699 ic->ic_stats.is_tx_frags++; 2700 2701 if (sc->sc_tx_timer == 0) 2702 sc->sc_tx_timer = 10; 2703 exit: 2704 return (err); 2705 } 2706 2707 static void 2708 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp) 2709 { 2710 iwk_sc_t *sc = (iwk_sc_t *)arg; 2711 ieee80211com_t *ic = &sc->sc_ic; 2712 int err; 2713 2714 err = ieee80211_ioctl(ic, wq, mp); 2715 2716 if (err == ENETRESET) { 2717 /* 2718 * This is special for the hidden AP connection. 2719 * In any case, we should make sure only one 'scan' 2720 * in the driver for a 'connect' CLI command. So 2721 * when connecting to a hidden AP, the scan is just 2722 * sent out to the air when we know the desired 2723 * essid of the AP we want to connect. 2724 */ 2725 if (ic->ic_des_esslen) { 2726 if (sc->sc_flags & IWK_F_RUNNING) { 2727 iwk_m_stop(sc); 2728 (void) iwk_m_start(sc); 2729 (void) ieee80211_new_state(ic, 2730 IEEE80211_S_SCAN, -1); 2731 } 2732 } 2733 } 2734 } 2735 2736 /* 2737 * callback functions for set/get properties 2738 */ 2739 static int 2740 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2741 uint_t pr_flags, uint_t wldp_length, void *wldp_buf) 2742 { 2743 int err = 0; 2744 iwk_sc_t *sc = (iwk_sc_t *)arg; 2745 2746 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num, 2747 pr_flags, wldp_length, wldp_buf); 2748 2749 return (err); 2750 } 2751 static int 2752 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2753 uint_t wldp_length, const void *wldp_buf) 2754 { 2755 int err; 2756 iwk_sc_t *sc = (iwk_sc_t *)arg; 2757 ieee80211com_t *ic = &sc->sc_ic; 2758 2759 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length, 2760 wldp_buf); 2761 2762 if (err == ENETRESET) { 2763 if (ic->ic_des_esslen) { 2764 if (sc->sc_flags & IWK_F_RUNNING) { 2765 iwk_m_stop(sc); 2766 (void) iwk_m_start(sc); 2767 (void) ieee80211_new_state(ic, 2768 IEEE80211_S_SCAN, -1); 2769 } 2770 } 2771 err = 0; 2772 } 2773 2774 return (err); 2775 } 2776 2777 /*ARGSUSED*/ 2778 static int 2779 iwk_m_stat(void *arg, uint_t stat, uint64_t *val) 2780 { 2781 iwk_sc_t *sc = (iwk_sc_t *)arg; 2782 ieee80211com_t *ic = &sc->sc_ic; 2783 ieee80211_node_t *in = ic->ic_bss; 2784 struct ieee80211_rateset *rs = &in->in_rates; 2785 2786 mutex_enter(&sc->sc_glock); 2787 switch (stat) { 2788 case MAC_STAT_IFSPEED: 2789 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ? 2790 (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) 2791 : ic->ic_fixed_rate) /2 * 1000000; 2792 break; 2793 case MAC_STAT_NOXMTBUF: 2794 *val = sc->sc_tx_nobuf; 2795 break; 2796 case MAC_STAT_NORCVBUF: 2797 *val = sc->sc_rx_nobuf; 2798 break; 2799 case MAC_STAT_IERRORS: 2800 *val = sc->sc_rx_err; 2801 break; 2802 case MAC_STAT_RBYTES: 2803 *val = ic->ic_stats.is_rx_bytes; 2804 break; 2805 case MAC_STAT_IPACKETS: 2806 *val = ic->ic_stats.is_rx_frags; 2807 break; 2808 case MAC_STAT_OBYTES: 2809 *val = ic->ic_stats.is_tx_bytes; 2810 break; 2811 case MAC_STAT_OPACKETS: 2812 *val = ic->ic_stats.is_tx_frags; 2813 break; 2814 case MAC_STAT_OERRORS: 2815 case WIFI_STAT_TX_FAILED: 2816 *val = sc->sc_tx_err; 2817 break; 2818 case WIFI_STAT_TX_RETRANS: 2819 *val = sc->sc_tx_retries; 2820 break; 2821 case WIFI_STAT_FCS_ERRORS: 2822 case WIFI_STAT_WEP_ERRORS: 2823 case WIFI_STAT_TX_FRAGS: 2824 case WIFI_STAT_MCAST_TX: 2825 case WIFI_STAT_RTS_SUCCESS: 2826 case WIFI_STAT_RTS_FAILURE: 2827 case WIFI_STAT_ACK_FAILURE: 2828 case WIFI_STAT_RX_FRAGS: 2829 case WIFI_STAT_MCAST_RX: 2830 case WIFI_STAT_RX_DUPS: 2831 mutex_exit(&sc->sc_glock); 2832 return (ieee80211_stat(ic, stat, val)); 2833 default: 2834 mutex_exit(&sc->sc_glock); 2835 return (ENOTSUP); 2836 } 2837 mutex_exit(&sc->sc_glock); 2838 2839 return (IWK_SUCCESS); 2840 2841 } 2842 2843 static int 2844 iwk_m_start(void *arg) 2845 { 2846 iwk_sc_t *sc = (iwk_sc_t *)arg; 2847 ieee80211com_t *ic = &sc->sc_ic; 2848 int err; 2849 2850 err = iwk_init(sc); 2851 2852 if (err != IWK_SUCCESS) { 2853 /* 2854 * The hw init err(eg. RF is OFF). Return Success to make 2855 * the 'plumb' succeed. The iwk_thread() tries to re-init 2856 * background. 2857 */ 2858 cmn_err(CE_WARN, "iwk_m_start(): failed to initialize " 2859 "hardware\n"); 2860 mutex_enter(&sc->sc_glock); 2861 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2862 mutex_exit(&sc->sc_glock); 2863 return (IWK_SUCCESS); 2864 } 2865 2866 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2867 2868 mutex_enter(&sc->sc_glock); 2869 sc->sc_flags |= IWK_F_RUNNING; 2870 mutex_exit(&sc->sc_glock); 2871 2872 return (IWK_SUCCESS); 2873 } 2874 2875 static void 2876 iwk_m_stop(void *arg) 2877 { 2878 iwk_sc_t *sc = (iwk_sc_t *)arg; 2879 ieee80211com_t *ic = &sc->sc_ic; 2880 2881 iwk_stop(sc); 2882 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2883 mutex_enter(&sc->sc_mt_lock); 2884 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 2885 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 2886 mutex_exit(&sc->sc_mt_lock); 2887 mutex_enter(&sc->sc_glock); 2888 sc->sc_flags &= ~IWK_F_RUNNING; 2889 sc->sc_flags &= ~IWK_F_SCANNING; 2890 mutex_exit(&sc->sc_glock); 2891 } 2892 2893 /*ARGSUSED*/ 2894 static int 2895 iwk_m_unicst(void *arg, const uint8_t *macaddr) 2896 { 2897 iwk_sc_t *sc = (iwk_sc_t *)arg; 2898 ieee80211com_t *ic = &sc->sc_ic; 2899 int err; 2900 2901 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) { 2902 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); 2903 mutex_enter(&sc->sc_glock); 2904 err = iwk_config(sc); 2905 mutex_exit(&sc->sc_glock); 2906 if (err != IWK_SUCCESS) { 2907 cmn_err(CE_WARN, 2908 "iwk_m_unicst(): " 2909 "failed to configure device\n"); 2910 goto fail; 2911 } 2912 } 2913 return (IWK_SUCCESS); 2914 fail: 2915 return (err); 2916 } 2917 2918 /*ARGSUSED*/ 2919 static int 2920 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m) 2921 { 2922 return (IWK_SUCCESS); 2923 } 2924 2925 /*ARGSUSED*/ 2926 static int 2927 iwk_m_promisc(void *arg, boolean_t on) 2928 { 2929 return (IWK_SUCCESS); 2930 } 2931 2932 static void 2933 iwk_thread(iwk_sc_t *sc) 2934 { 2935 ieee80211com_t *ic = &sc->sc_ic; 2936 clock_t clk; 2937 int times = 0, err, n = 0, timeout = 0; 2938 uint32_t tmp; 2939 2940 mutex_enter(&sc->sc_mt_lock); 2941 while (sc->sc_mf_thread_switch) { 2942 tmp = IWK_READ(sc, CSR_GP_CNTRL); 2943 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) { 2944 sc->sc_flags &= ~IWK_F_RADIO_OFF; 2945 } else { 2946 sc->sc_flags |= IWK_F_RADIO_OFF; 2947 } 2948 /* 2949 * If in SUSPEND or the RF is OFF, do nothing 2950 */ 2951 if ((sc->sc_flags & IWK_F_SUSPEND) || 2952 (sc->sc_flags & IWK_F_RADIO_OFF)) { 2953 mutex_exit(&sc->sc_mt_lock); 2954 delay(drv_usectohz(100000)); 2955 mutex_enter(&sc->sc_mt_lock); 2956 continue; 2957 } 2958 2959 /* 2960 * recovery fatal error 2961 */ 2962 if (ic->ic_mach && 2963 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) { 2964 2965 IWK_DBG((IWK_DEBUG_FW, 2966 "iwk_thread(): " 2967 "try to recover fatal hw error: %d\n", times++)); 2968 2969 iwk_stop(sc); 2970 2971 mutex_exit(&sc->sc_mt_lock); 2972 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2973 delay(drv_usectohz(2000000 + n*500000)); 2974 mutex_enter(&sc->sc_mt_lock); 2975 2976 err = iwk_init(sc); 2977 if (err != IWK_SUCCESS) { 2978 n++; 2979 if (n < 20) 2980 continue; 2981 } 2982 n = 0; 2983 if (!err) 2984 sc->sc_flags |= IWK_F_RUNNING; 2985 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 2986 mutex_exit(&sc->sc_mt_lock); 2987 delay(drv_usectohz(2000000)); 2988 if (sc->sc_ostate != IEEE80211_S_INIT) 2989 ieee80211_new_state(ic, IEEE80211_S_SCAN, 0); 2990 mutex_enter(&sc->sc_mt_lock); 2991 } 2992 2993 if (ic->ic_mach && 2994 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) { 2995 2996 IWK_DBG((IWK_DEBUG_SCAN, 2997 "iwk_thread(): " 2998 "wait for probe response\n")); 2999 3000 sc->sc_scan_pending--; 3001 mutex_exit(&sc->sc_mt_lock); 3002 delay(drv_usectohz(200000)); 3003 ieee80211_next_scan(ic); 3004 mutex_enter(&sc->sc_mt_lock); 3005 } 3006 3007 /* 3008 * rate ctl 3009 */ 3010 if (ic->ic_mach && 3011 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) { 3012 clk = ddi_get_lbolt(); 3013 if (clk > sc->sc_clk + drv_usectohz(500000)) { 3014 iwk_amrr_timeout(sc); 3015 } 3016 } 3017 3018 mutex_exit(&sc->sc_mt_lock); 3019 delay(drv_usectohz(100000)); 3020 mutex_enter(&sc->sc_mt_lock); 3021 3022 if (sc->sc_tx_timer) { 3023 timeout++; 3024 if (timeout == 10) { 3025 sc->sc_tx_timer--; 3026 if (sc->sc_tx_timer == 0) { 3027 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 3028 sc->sc_ostate = IEEE80211_S_RUN; 3029 IWK_DBG((IWK_DEBUG_FW, 3030 "iwk_thread(): try to recover from" 3031 " 'send fail\n")); 3032 } 3033 timeout = 0; 3034 } 3035 } 3036 3037 } 3038 sc->sc_mf_thread = NULL; 3039 cv_signal(&sc->sc_mt_cv); 3040 mutex_exit(&sc->sc_mt_lock); 3041 } 3042 3043 3044 /* 3045 * Send a command to the firmware. 3046 */ 3047 static int 3048 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async) 3049 { 3050 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3051 iwk_tx_desc_t *desc; 3052 iwk_cmd_t *cmd; 3053 clock_t clk; 3054 3055 ASSERT(size <= sizeof (cmd->data)); 3056 ASSERT(mutex_owned(&sc->sc_glock)); 3057 3058 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code)); 3059 desc = ring->data[ring->cur].desc; 3060 cmd = ring->data[ring->cur].cmd; 3061 3062 cmd->hdr.type = (uint8_t)code; 3063 cmd->hdr.flags = 0; 3064 cmd->hdr.qid = ring->qid; 3065 cmd->hdr.idx = ring->cur; 3066 (void) memcpy(cmd->data, buf, size); 3067 (void) memset(desc, 0, sizeof (*desc)); 3068 3069 desc->val0 = LE_32(1 << 24); 3070 desc->pa[0].tb1_addr = 3071 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff); 3072 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0; 3073 3074 /* kick cmd ring XXX */ 3075 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3076 tfd_offset[ring->cur].val = 8; 3077 if (ring->cur < IWK_MAX_WIN_SIZE) { 3078 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3079 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3080 } 3081 ring->cur = (ring->cur + 1) % ring->count; 3082 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3083 3084 if (async) 3085 return (IWK_SUCCESS); 3086 else { 3087 sc->sc_flags &= ~IWK_F_CMD_DONE; 3088 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3089 while (!(sc->sc_flags & IWK_F_CMD_DONE)) { 3090 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) < 3091 0) 3092 break; 3093 } 3094 if (sc->sc_flags & IWK_F_CMD_DONE) 3095 return (IWK_SUCCESS); 3096 else 3097 return (IWK_FAIL); 3098 } 3099 } 3100 3101 static void 3102 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on) 3103 { 3104 iwk_led_cmd_t led; 3105 3106 led.interval = LE_32(100000); /* unit: 100ms */ 3107 led.id = id; 3108 led.off = off; 3109 led.on = on; 3110 3111 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1); 3112 } 3113 3114 static int 3115 iwk_hw_set_before_auth(iwk_sc_t *sc) 3116 { 3117 ieee80211com_t *ic = &sc->sc_ic; 3118 ieee80211_node_t *in = ic->ic_bss; 3119 iwk_add_sta_t node; 3120 iwk_link_quality_cmd_t link_quality; 3121 struct ieee80211_rateset rs; 3122 uint16_t masks = 0, rate; 3123 int i, err; 3124 3125 /* update adapter's configuration according the info of target AP */ 3126 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 3127 sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan); 3128 if (ic->ic_curmode == IEEE80211_MODE_11B) { 3129 sc->sc_config.cck_basic_rates = 0x03; 3130 sc->sc_config.ofdm_basic_rates = 0; 3131 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 3132 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 3133 sc->sc_config.cck_basic_rates = 0; 3134 sc->sc_config.ofdm_basic_rates = 0x15; 3135 } else { /* assume 802.11b/g */ 3136 sc->sc_config.cck_basic_rates = 0x0f; 3137 sc->sc_config.ofdm_basic_rates = 0xff; 3138 } 3139 3140 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 3141 RXON_FLG_SHORT_SLOT_MSK); 3142 3143 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3144 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK); 3145 else 3146 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK); 3147 3148 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3149 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3150 else 3151 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK); 3152 3153 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x " 3154 "filter_flags %x cck %x ofdm %x" 3155 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n", 3156 sc->sc_config.chan, sc->sc_config.flags, 3157 sc->sc_config.filter_flags, 3158 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates, 3159 sc->sc_config.bssid[0], sc->sc_config.bssid[1], 3160 sc->sc_config.bssid[2], sc->sc_config.bssid[3], 3161 sc->sc_config.bssid[4], sc->sc_config.bssid[5])); 3162 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3163 sizeof (iwk_rxon_cmd_t), 1); 3164 if (err != IWK_SUCCESS) { 3165 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3166 " failed to config chan%d\n", 3167 sc->sc_config.chan); 3168 return (err); 3169 } 3170 3171 /* obtain current temperature of chipset */ 3172 sc->sc_tempera = iwk_curr_tempera(sc); 3173 3174 /* make Tx power calibration to determine the gains of DSP and radio */ 3175 err = iwk_tx_power_calibration(sc); 3176 if (err) { 3177 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3178 "failed to set tx power table\n"); 3179 return (err); 3180 } 3181 3182 /* add default AP node */ 3183 (void) memset(&node, 0, sizeof (node)); 3184 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid); 3185 node.id = IWK_AP_ID; 3186 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 3187 if (err != IWK_SUCCESS) { 3188 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3189 "failed to add BSS node\n"); 3190 return (err); 3191 } 3192 3193 /* TX_LINK_QUALITY cmd ? */ 3194 (void) memset(&link_quality, 0, sizeof (link_quality)); 3195 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)]; 3196 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3197 if (i < rs.ir_nrates) 3198 rate = rs.ir_rates[rs.ir_nrates - i]; 3199 else 3200 rate = 2; 3201 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 3202 masks |= RATE_MCS_CCK_MSK; 3203 masks |= RATE_MCS_ANT_B_MSK; 3204 masks &= ~RATE_MCS_ANT_A_MSK; 3205 link_quality.rate_n_flags[i] = 3206 iwk_rate_to_plcp(rate) | masks; 3207 } 3208 3209 link_quality.general_params.single_stream_ant_msk = 2; 3210 link_quality.general_params.dual_stream_ant_msk = 3; 3211 link_quality.agg_params.agg_dis_start_th = 3; 3212 link_quality.agg_params.agg_time_limit = LE_16(4000); 3213 link_quality.sta_id = IWK_AP_ID; 3214 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3215 sizeof (link_quality), 1); 3216 if (err != IWK_SUCCESS) { 3217 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3218 "failed to config link quality table\n"); 3219 return (err); 3220 } 3221 3222 return (IWK_SUCCESS); 3223 } 3224 3225 /* 3226 * Send a scan request(assembly scan cmd) to the firmware. 3227 */ 3228 static int 3229 iwk_scan(iwk_sc_t *sc) 3230 { 3231 ieee80211com_t *ic = &sc->sc_ic; 3232 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3233 iwk_tx_desc_t *desc; 3234 iwk_tx_data_t *data; 3235 iwk_cmd_t *cmd; 3236 iwk_scan_hdr_t *hdr; 3237 iwk_scan_chan_t *chan; 3238 struct ieee80211_frame *wh; 3239 ieee80211_node_t *in = ic->ic_bss; 3240 uint8_t essid[IEEE80211_NWID_LEN+1]; 3241 struct ieee80211_rateset *rs; 3242 enum ieee80211_phymode mode; 3243 uint8_t *frm; 3244 int i, pktlen, nrates; 3245 3246 data = &ring->data[ring->cur]; 3247 desc = data->desc; 3248 cmd = (iwk_cmd_t *)data->dma_data.mem_va; 3249 3250 cmd->hdr.type = REPLY_SCAN_CMD; 3251 cmd->hdr.flags = 0; 3252 cmd->hdr.qid = ring->qid; 3253 cmd->hdr.idx = ring->cur | 0x40; 3254 3255 hdr = (iwk_scan_hdr_t *)cmd->data; 3256 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t)); 3257 hdr->nchan = 1; 3258 hdr->quiet_time = LE_16(50); 3259 hdr->quiet_plcp_th = LE_16(1); 3260 3261 hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 3262 hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3263 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3264 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3265 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3266 3267 hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 3268 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID; 3269 hdr->tx_cmd.stop_time.life_time = 0xffffffff; 3270 hdr->tx_cmd.tx_flags |= (0x200); 3271 hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2); 3272 hdr->tx_cmd.rate.r.rate_n_flags |= 3273 (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); 3274 hdr->direct_scan[0].len = ic->ic_des_esslen; 3275 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID; 3276 3277 if (ic->ic_des_esslen) { 3278 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen); 3279 essid[ic->ic_des_esslen] = '\0'; 3280 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid)); 3281 3282 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid, 3283 ic->ic_des_esslen); 3284 } else { 3285 bzero(hdr->direct_scan[0].ssid, 3286 sizeof (hdr->direct_scan[0].ssid)); 3287 } 3288 /* 3289 * a probe request frame is required after the REPLY_SCAN_CMD 3290 */ 3291 wh = (struct ieee80211_frame *)(hdr + 1); 3292 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3293 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3294 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3295 (void) memset(wh->i_addr1, 0xff, 6); 3296 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr); 3297 (void) memset(wh->i_addr3, 0xff, 6); 3298 *(uint16_t *)&wh->i_dur[0] = 0; 3299 *(uint16_t *)&wh->i_seq[0] = 0; 3300 3301 frm = (uint8_t *)(wh + 1); 3302 3303 /* essid IE */ 3304 if (in->in_esslen) { 3305 bcopy(in->in_essid, essid, in->in_esslen); 3306 essid[in->in_esslen] = '\0'; 3307 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n", 3308 essid)); 3309 } 3310 *frm++ = IEEE80211_ELEMID_SSID; 3311 *frm++ = in->in_esslen; 3312 (void) memcpy(frm, in->in_essid, in->in_esslen); 3313 frm += in->in_esslen; 3314 3315 mode = ieee80211_chan2mode(ic, ic->ic_curchan); 3316 rs = &ic->ic_sup_rates[mode]; 3317 3318 /* supported rates IE */ 3319 *frm++ = IEEE80211_ELEMID_RATES; 3320 nrates = rs->ir_nrates; 3321 if (nrates > IEEE80211_RATE_SIZE) 3322 nrates = IEEE80211_RATE_SIZE; 3323 *frm++ = (uint8_t)nrates; 3324 (void) memcpy(frm, rs->ir_rates, nrates); 3325 frm += nrates; 3326 3327 /* supported xrates IE */ 3328 if (rs->ir_nrates > IEEE80211_RATE_SIZE) { 3329 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE; 3330 *frm++ = IEEE80211_ELEMID_XRATES; 3331 *frm++ = (uint8_t)nrates; 3332 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates); 3333 frm += nrates; 3334 } 3335 3336 /* optionnal IE (usually for wpa) */ 3337 if (ic->ic_opt_ie != NULL) { 3338 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len); 3339 frm += ic->ic_opt_ie_len; 3340 } 3341 3342 /* setup length of probe request */ 3343 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh)); 3344 hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) + 3345 hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t); 3346 3347 /* 3348 * the attribute of the scan channels are required after the probe 3349 * request frame. 3350 */ 3351 chan = (iwk_scan_chan_t *)frm; 3352 for (i = 1; i <= hdr->nchan; i++, chan++) { 3353 if (ic->ic_des_esslen) { 3354 chan->type = 3; 3355 } else { 3356 chan->type = 1; 3357 } 3358 3359 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3360 chan->tpc.tx_gain = 0x3f; 3361 chan->tpc.dsp_atten = 110; 3362 chan->active_dwell = LE_16(50); 3363 chan->passive_dwell = LE_16(120); 3364 3365 frm += sizeof (iwk_scan_chan_t); 3366 } 3367 3368 pktlen = _PTRDIFF(frm, cmd); 3369 3370 (void) memset(desc, 0, sizeof (*desc)); 3371 desc->val0 = LE_32(1 << 24); 3372 desc->pa[0].tb1_addr = 3373 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff); 3374 desc->pa[0].val1 = (pktlen << 4) & 0xfff0; 3375 3376 /* 3377 * maybe for cmd, filling the byte cnt table is not necessary. 3378 * anyway, we fill it here. 3379 */ 3380 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3381 tfd_offset[ring->cur].val = 8; 3382 if (ring->cur < IWK_MAX_WIN_SIZE) { 3383 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3384 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3385 } 3386 3387 /* kick cmd ring */ 3388 ring->cur = (ring->cur + 1) % ring->count; 3389 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3390 3391 return (IWK_SUCCESS); 3392 } 3393 3394 static int 3395 iwk_config(iwk_sc_t *sc) 3396 { 3397 ieee80211com_t *ic = &sc->sc_ic; 3398 iwk_powertable_cmd_t powertable; 3399 iwk_bt_cmd_t bt; 3400 iwk_add_sta_t node; 3401 iwk_link_quality_cmd_t link_quality; 3402 int i, err; 3403 uint16_t masks = 0; 3404 3405 /* 3406 * set power mode. Disable power management at present, do it later 3407 */ 3408 (void) memset(&powertable, 0, sizeof (powertable)); 3409 powertable.flags = LE_16(0x8); 3410 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable, 3411 sizeof (powertable), 0); 3412 if (err != IWK_SUCCESS) { 3413 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n"); 3414 return (err); 3415 } 3416 3417 /* configure bt coexistence */ 3418 (void) memset(&bt, 0, sizeof (bt)); 3419 bt.flags = 3; 3420 bt.lead_time = 0xaa; 3421 bt.max_kill = 1; 3422 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt, 3423 sizeof (bt), 0); 3424 if (err != IWK_SUCCESS) { 3425 cmn_err(CE_WARN, 3426 "iwk_config(): " 3427 "failed to configurate bt coexistence\n"); 3428 return (err); 3429 } 3430 3431 /* configure rxon */ 3432 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t)); 3433 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 3434 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 3435 sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3436 sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | 3437 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK); 3438 sc->sc_config.flags &= (~RXON_FLG_CCK_MSK); 3439 switch (ic->ic_opmode) { 3440 case IEEE80211_M_STA: 3441 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 3442 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3443 RXON_FILTER_DIS_DECRYPT_MSK | 3444 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3445 break; 3446 case IEEE80211_M_AHDEMO: 3447 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 3448 sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3449 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3450 RXON_FILTER_DIS_DECRYPT_MSK | 3451 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3452 break; 3453 case IEEE80211_M_HOSTAP: 3454 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 3455 break; 3456 case IEEE80211_M_MONITOR: 3457 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 3458 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3459 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); 3460 break; 3461 } 3462 sc->sc_config.cck_basic_rates = 0x0f; 3463 sc->sc_config.ofdm_basic_rates = 0xff; 3464 3465 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff; 3466 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff; 3467 3468 /* set antenna */ 3469 3470 sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3471 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3472 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3473 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3474 3475 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3476 sizeof (iwk_rxon_cmd_t), 0); 3477 if (err != IWK_SUCCESS) { 3478 cmn_err(CE_WARN, "iwk_config(): " 3479 "failed to set configure command\n"); 3480 return (err); 3481 } 3482 /* obtain current temperature of chipset */ 3483 sc->sc_tempera = iwk_curr_tempera(sc); 3484 3485 /* make Tx power calibration to determine the gains of DSP and radio */ 3486 err = iwk_tx_power_calibration(sc); 3487 if (err) { 3488 cmn_err(CE_WARN, "iwk_config(): " 3489 "failed to set tx power table\n"); 3490 return (err); 3491 } 3492 3493 /* add broadcast node so that we can send broadcast frame */ 3494 (void) memset(&node, 0, sizeof (node)); 3495 (void) memset(node.bssid, 0xff, 6); 3496 node.id = IWK_BROADCAST_ID; 3497 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0); 3498 if (err != IWK_SUCCESS) { 3499 cmn_err(CE_WARN, "iwk_config(): " 3500 "failed to add broadcast node\n"); 3501 return (err); 3502 } 3503 3504 /* TX_LINK_QUALITY cmd ? */ 3505 (void) memset(&link_quality, 0, sizeof (link_quality)); 3506 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3507 masks |= RATE_MCS_CCK_MSK; 3508 masks |= RATE_MCS_ANT_B_MSK; 3509 masks &= ~RATE_MCS_ANT_A_MSK; 3510 link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks; 3511 } 3512 3513 link_quality.general_params.single_stream_ant_msk = 2; 3514 link_quality.general_params.dual_stream_ant_msk = 3; 3515 link_quality.agg_params.agg_dis_start_th = 3; 3516 link_quality.agg_params.agg_time_limit = LE_16(4000); 3517 link_quality.sta_id = IWK_BROADCAST_ID; 3518 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3519 sizeof (link_quality), 0); 3520 if (err != IWK_SUCCESS) { 3521 cmn_err(CE_WARN, "iwk_config(): " 3522 "failed to config link quality table\n"); 3523 return (err); 3524 } 3525 3526 return (IWK_SUCCESS); 3527 } 3528 3529 static void 3530 iwk_stop_master(iwk_sc_t *sc) 3531 { 3532 uint32_t tmp; 3533 int n; 3534 3535 tmp = IWK_READ(sc, CSR_RESET); 3536 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER); 3537 3538 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3539 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) == 3540 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) 3541 return; 3542 3543 for (n = 0; n < 2000; n++) { 3544 if (IWK_READ(sc, CSR_RESET) & 3545 CSR_RESET_REG_FLAG_MASTER_DISABLED) 3546 break; 3547 DELAY(1000); 3548 } 3549 if (n == 2000) 3550 IWK_DBG((IWK_DEBUG_HW, 3551 "timeout waiting for master stop\n")); 3552 } 3553 3554 static int 3555 iwk_power_up(iwk_sc_t *sc) 3556 { 3557 uint32_t tmp; 3558 3559 iwk_mac_access_enter(sc); 3560 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3561 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC; 3562 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN; 3563 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3564 iwk_mac_access_exit(sc); 3565 3566 DELAY(5000); 3567 return (IWK_SUCCESS); 3568 } 3569 3570 static int 3571 iwk_preinit(iwk_sc_t *sc) 3572 { 3573 uint32_t tmp; 3574 int n; 3575 uint8_t vlink; 3576 3577 /* clear any pending interrupts */ 3578 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3579 3580 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS); 3581 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS, 3582 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 3583 3584 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3585 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3586 3587 /* wait for clock ready */ 3588 for (n = 0; n < 1000; n++) { 3589 if (IWK_READ(sc, CSR_GP_CNTRL) & 3590 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) 3591 break; 3592 DELAY(10); 3593 } 3594 if (n == 1000) { 3595 cmn_err(CE_WARN, 3596 "iwk_preinit(): timeout waiting for clock ready\n"); 3597 return (ETIMEDOUT); 3598 } 3599 iwk_mac_access_enter(sc); 3600 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG); 3601 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp | 3602 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT); 3603 3604 DELAY(20); 3605 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT); 3606 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp | 3607 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE); 3608 iwk_mac_access_exit(sc); 3609 3610 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */ 3611 3612 (void) iwk_power_up(sc); 3613 3614 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) { 3615 tmp = ddi_get32(sc->sc_cfg_handle, 3616 (uint32_t *)(sc->sc_cfg_base + 0xe8)); 3617 ddi_put32(sc->sc_cfg_handle, 3618 (uint32_t *)(sc->sc_cfg_base + 0xe8), 3619 tmp & ~(1 << 11)); 3620 } 3621 3622 3623 vlink = ddi_get8(sc->sc_cfg_handle, 3624 (uint8_t *)(sc->sc_cfg_base + 0xf0)); 3625 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0), 3626 vlink & ~2); 3627 3628 tmp = IWK_READ(sc, CSR_SW_VER); 3629 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 3630 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 3631 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R; 3632 IWK_WRITE(sc, CSR_SW_VER, tmp); 3633 3634 /* make sure power supply on each part of the hardware */ 3635 iwk_mac_access_enter(sc); 3636 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3637 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3638 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3639 DELAY(5); 3640 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3641 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3642 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3643 iwk_mac_access_exit(sc); 3644 return (IWK_SUCCESS); 3645 } 3646 3647 /* 3648 * set up semphore flag to own EEPROM 3649 */ 3650 static int iwk_eep_sem_down(iwk_sc_t *sc) 3651 { 3652 int count1, count2; 3653 uint32_t tmp; 3654 3655 for (count1 = 0; count1 < 1000; count1++) { 3656 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3657 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3658 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM); 3659 3660 for (count2 = 0; count2 < 2; count2++) { 3661 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) & 3662 CSR_HW_IF_CONFIG_REG_EEP_SEM) 3663 return (IWK_SUCCESS); 3664 DELAY(10000); 3665 } 3666 } 3667 return (IWK_FAIL); 3668 } 3669 3670 /* 3671 * reset semphore flag to release EEPROM 3672 */ 3673 static void iwk_eep_sem_up(iwk_sc_t *sc) 3674 { 3675 uint32_t tmp; 3676 3677 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3678 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3679 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM)); 3680 } 3681 3682 /* 3683 * This function load all infomation in eeprom into iwk_eep 3684 * structure in iwk_sc_t structure 3685 */ 3686 static int iwk_eep_load(iwk_sc_t *sc) 3687 { 3688 int i, rr; 3689 uint32_t rv, tmp, eep_gp; 3690 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map); 3691 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map; 3692 3693 /* read eeprom gp register in CSR */ 3694 eep_gp = IWK_READ(sc, CSR_EEPROM_GP); 3695 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) == 3696 CSR_EEPROM_GP_BAD_SIGNATURE) { 3697 cmn_err(CE_WARN, "EEPROM not found\n"); 3698 return (IWK_FAIL); 3699 } 3700 3701 rr = iwk_eep_sem_down(sc); 3702 if (rr != 0) { 3703 cmn_err(CE_WARN, "failed to own EEPROM\n"); 3704 return (IWK_FAIL); 3705 } 3706 3707 for (addr = 0; addr < eep_sz; addr += 2) { 3708 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1); 3709 tmp = IWK_READ(sc, CSR_EEPROM_REG); 3710 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2)); 3711 3712 for (i = 0; i < 10; i++) { 3713 rv = IWK_READ(sc, CSR_EEPROM_REG); 3714 if (rv & 1) 3715 break; 3716 DELAY(10); 3717 } 3718 3719 if (!(rv & 1)) { 3720 cmn_err(CE_WARN, "time out when read EEPROM\n"); 3721 iwk_eep_sem_up(sc); 3722 return (IWK_FAIL); 3723 } 3724 3725 eep_p[addr/2] = rv >> 16; 3726 } 3727 3728 iwk_eep_sem_up(sc); 3729 return (IWK_SUCCESS); 3730 } 3731 3732 /* 3733 * init mac address in ieee80211com_t struct 3734 */ 3735 static void iwk_get_mac_from_eep(iwk_sc_t *sc) 3736 { 3737 ieee80211com_t *ic = &sc->sc_ic; 3738 struct iwk_eep *ep = &sc->sc_eep_map; 3739 3740 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address); 3741 3742 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n", 3743 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2], 3744 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5])); 3745 } 3746 3747 static int 3748 iwk_init(iwk_sc_t *sc) 3749 { 3750 int qid, n, err; 3751 clock_t clk; 3752 uint32_t tmp; 3753 3754 mutex_enter(&sc->sc_glock); 3755 sc->sc_flags &= ~IWK_F_FW_INIT; 3756 3757 (void) iwk_preinit(sc); 3758 3759 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3760 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) { 3761 cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n"); 3762 goto fail1; 3763 } 3764 3765 /* init Rx ring */ 3766 iwk_mac_access_enter(sc); 3767 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 3768 3769 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 3770 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 3771 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8); 3772 3773 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG, 3774 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address + 3775 offsetof(struct iwk_shared, val0)) >> 4)); 3776 3777 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 3778 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 3779 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 3780 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 3781 (RX_QUEUE_SIZE_LOG << 3782 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 3783 iwk_mac_access_exit(sc); 3784 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 3785 (RX_QUEUE_SIZE - 1) & ~0x7); 3786 3787 /* init Tx rings */ 3788 iwk_mac_access_enter(sc); 3789 iwk_reg_write(sc, SCD_TXFACT, 0); 3790 3791 /* keep warm page */ 3792 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG, 3793 sc->sc_dma_kw.cookie.dmac_address >> 4); 3794 3795 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) { 3796 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid), 3797 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8); 3798 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid), 3799 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 3800 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 3801 } 3802 iwk_mac_access_exit(sc); 3803 3804 /* clear "radio off" and "disable command" bits */ 3805 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3806 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, 3807 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3808 3809 /* clear any pending interrupts */ 3810 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3811 3812 /* enable interrupts */ 3813 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 3814 3815 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3816 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3817 3818 /* 3819 * backup ucode data part for future use. 3820 */ 3821 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va, 3822 sc->sc_dma_fw_data.mem_va, 3823 sc->sc_dma_fw_data.alength); 3824 3825 for (n = 0; n < 2; n++) { 3826 /* load firmware init segment into NIC */ 3827 err = iwk_load_firmware(sc); 3828 if (err != IWK_SUCCESS) { 3829 cmn_err(CE_WARN, "iwk_init(): " 3830 "failed to setup boot firmware\n"); 3831 continue; 3832 } 3833 3834 /* now press "execute" start running */ 3835 IWK_WRITE(sc, CSR_RESET, 0); 3836 break; 3837 } 3838 if (n == 2) { 3839 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n"); 3840 goto fail1; 3841 } 3842 /* ..and wait at most one second for adapter to initialize */ 3843 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3844 while (!(sc->sc_flags & IWK_F_FW_INIT)) { 3845 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0) 3846 break; 3847 } 3848 if (!(sc->sc_flags & IWK_F_FW_INIT)) { 3849 cmn_err(CE_WARN, 3850 "iwk_init(): timeout waiting for firmware init\n"); 3851 goto fail1; 3852 } 3853 3854 /* 3855 * at this point, the firmware is loaded OK, then config the hardware 3856 * with the ucode API, including rxon, txpower, etc. 3857 */ 3858 err = iwk_config(sc); 3859 if (err) { 3860 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n"); 3861 goto fail1; 3862 } 3863 3864 /* at this point, hardware may receive beacons :) */ 3865 mutex_exit(&sc->sc_glock); 3866 return (IWK_SUCCESS); 3867 3868 fail1: 3869 err = IWK_FAIL; 3870 mutex_exit(&sc->sc_glock); 3871 return (err); 3872 } 3873 3874 static void 3875 iwk_stop(iwk_sc_t *sc) 3876 { 3877 uint32_t tmp; 3878 int i; 3879 3880 3881 mutex_enter(&sc->sc_glock); 3882 3883 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3884 /* disable interrupts */ 3885 IWK_WRITE(sc, CSR_INT_MASK, 0); 3886 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK); 3887 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff); 3888 3889 /* reset all Tx rings */ 3890 for (i = 0; i < IWK_NUM_QUEUES; i++) 3891 iwk_reset_tx_ring(sc, &sc->sc_txq[i]); 3892 3893 /* reset Rx ring */ 3894 iwk_reset_rx_ring(sc); 3895 3896 iwk_mac_access_enter(sc); 3897 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT); 3898 iwk_mac_access_exit(sc); 3899 3900 DELAY(5); 3901 3902 iwk_stop_master(sc); 3903 3904 sc->sc_tx_timer = 0; 3905 tmp = IWK_READ(sc, CSR_RESET); 3906 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET); 3907 mutex_exit(&sc->sc_glock); 3908 } 3909 3910 /* 3911 * Naive implementation of the Adaptive Multi Rate Retry algorithm: 3912 * "IEEE 802.11 Rate Adaptation: A Practical Approach" 3913 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti 3914 * INRIA Sophia - Projet Planete 3915 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html 3916 */ 3917 #define is_success(amrr) \ 3918 ((amrr)->retrycnt < (amrr)->txcnt / 10) 3919 #define is_failure(amrr) \ 3920 ((amrr)->retrycnt > (amrr)->txcnt / 3) 3921 #define is_enough(amrr) \ 3922 ((amrr)->txcnt > 100) 3923 #define is_min_rate(in) \ 3924 ((in)->in_txrate == 0) 3925 #define is_max_rate(in) \ 3926 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1) 3927 #define increase_rate(in) \ 3928 ((in)->in_txrate++) 3929 #define decrease_rate(in) \ 3930 ((in)->in_txrate--) 3931 #define reset_cnt(amrr) \ 3932 { (amrr)->txcnt = (amrr)->retrycnt = 0; } 3933 3934 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1 3935 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15 3936 3937 static void 3938 iwk_amrr_init(iwk_amrr_t *amrr) 3939 { 3940 amrr->success = 0; 3941 amrr->recovery = 0; 3942 amrr->txcnt = amrr->retrycnt = 0; 3943 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD; 3944 } 3945 3946 static void 3947 iwk_amrr_timeout(iwk_sc_t *sc) 3948 { 3949 ieee80211com_t *ic = &sc->sc_ic; 3950 3951 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n")); 3952 if (ic->ic_opmode == IEEE80211_M_STA) 3953 iwk_amrr_ratectl(NULL, ic->ic_bss); 3954 else 3955 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL); 3956 sc->sc_clk = ddi_get_lbolt(); 3957 } 3958 3959 /* ARGSUSED */ 3960 static void 3961 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in) 3962 { 3963 iwk_amrr_t *amrr = (iwk_amrr_t *)in; 3964 int need_change = 0; 3965 3966 if (is_success(amrr) && is_enough(amrr)) { 3967 amrr->success++; 3968 if (amrr->success >= amrr->success_threshold && 3969 !is_max_rate(in)) { 3970 amrr->recovery = 1; 3971 amrr->success = 0; 3972 increase_rate(in); 3973 IWK_DBG((IWK_DEBUG_RATECTL, 3974 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n", 3975 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 3976 need_change = 1; 3977 } else { 3978 amrr->recovery = 0; 3979 } 3980 } else if (is_failure(amrr)) { 3981 amrr->success = 0; 3982 if (!is_min_rate(in)) { 3983 if (amrr->recovery) { 3984 amrr->success_threshold++; 3985 if (amrr->success_threshold > 3986 IWK_AMRR_MAX_SUCCESS_THRESHOLD) 3987 amrr->success_threshold = 3988 IWK_AMRR_MAX_SUCCESS_THRESHOLD; 3989 } else { 3990 amrr->success_threshold = 3991 IWK_AMRR_MIN_SUCCESS_THRESHOLD; 3992 } 3993 decrease_rate(in); 3994 IWK_DBG((IWK_DEBUG_RATECTL, 3995 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n", 3996 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 3997 need_change = 1; 3998 } 3999 amrr->recovery = 0; /* paper is incorrect */ 4000 } 4001 4002 if (is_enough(amrr) || need_change) 4003 reset_cnt(amrr); 4004 } 4005 4006 /* 4007 * calculate 4965 chipset's kelvin temperature according to 4008 * the data of init alive and satistics notification. 4009 * The details is described in iwk_calibration.h file 4010 */ 4011 static int32_t iwk_curr_tempera(iwk_sc_t *sc) 4012 { 4013 int32_t tempera; 4014 int32_t r1, r2, r3; 4015 uint32_t r4_u; 4016 int32_t r4_s; 4017 4018 if (iwk_is_fat_channel(sc)) { 4019 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]); 4020 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]); 4021 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]); 4022 r4_u = sc->sc_card_alive_init.therm_r4[1]; 4023 } else { 4024 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]); 4025 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]); 4026 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]); 4027 r4_u = sc->sc_card_alive_init.therm_r4[0]; 4028 } 4029 4030 if (sc->sc_flags & IWK_F_STATISTICS) { 4031 r4_s = (int32_t)(sc->sc_statistics.general.temperature << 4032 (31-23)) >> (31-23); 4033 } else { 4034 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23); 4035 } 4036 4037 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n", 4038 r1, r2, r3, r4_s)); 4039 4040 if (r3 == r1) { 4041 cmn_err(CE_WARN, "iwk_curr_tempera(): " 4042 "failed to calculate temperature" 4043 "because r3 = r1\n"); 4044 return (DDI_FAILURE); 4045 } 4046 4047 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2); 4048 tempera /= (r3 - r1); 4049 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; 4050 4051 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n", 4052 tempera, KELVIN_TO_CELSIUS(tempera))); 4053 4054 return (tempera); 4055 } 4056 4057 /* Determine whether 4965 is using 2.4 GHz band */ 4058 static inline int iwk_is_24G_band(iwk_sc_t *sc) 4059 { 4060 return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK); 4061 } 4062 4063 /* Determine whether 4965 is using fat channel */ 4064 static inline int iwk_is_fat_channel(iwk_sc_t *sc) 4065 { 4066 return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 4067 (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK)); 4068 } 4069 4070 /* 4071 * In MIMO mode, determine which group 4965's current channel belong to. 4072 * For more infomation about "channel group", 4073 * please refer to iwk_calibration.h file 4074 */ 4075 static int iwk_txpower_grp(uint16_t channel) 4076 { 4077 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH && 4078 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) { 4079 return (CALIB_CH_GROUP_5); 4080 } 4081 4082 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH && 4083 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) { 4084 return (CALIB_CH_GROUP_1); 4085 } 4086 4087 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH && 4088 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) { 4089 return (CALIB_CH_GROUP_2); 4090 } 4091 4092 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH && 4093 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) { 4094 return (CALIB_CH_GROUP_3); 4095 } 4096 4097 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH && 4098 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) { 4099 return (CALIB_CH_GROUP_4); 4100 } 4101 4102 cmn_err(CE_WARN, "iwk_txpower_grp(): " 4103 "can't find txpower group for channel %d.\n", channel); 4104 4105 return (DDI_FAILURE); 4106 } 4107 4108 /* 2.4 GHz */ 4109 static uint16_t iwk_eep_band_1[14] = { 4110 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 4111 }; 4112 4113 /* 5.2 GHz bands */ 4114 static uint16_t iwk_eep_band_2[13] = { 4115 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 4116 }; 4117 4118 static uint16_t iwk_eep_band_3[12] = { 4119 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 4120 }; 4121 4122 static uint16_t iwk_eep_band_4[11] = { 4123 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 4124 }; 4125 4126 static uint16_t iwk_eep_band_5[6] = { 4127 145, 149, 153, 157, 161, 165 4128 }; 4129 4130 static uint16_t iwk_eep_band_6[7] = { 4131 1, 2, 3, 4, 5, 6, 7 4132 }; 4133 4134 static uint16_t iwk_eep_band_7[11] = { 4135 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 4136 }; 4137 4138 /* Get regulatory data from eeprom for a given channel */ 4139 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 4140 uint16_t channel, 4141 int is_24G, int is_fat, int is_hi_chan) 4142 { 4143 int32_t i; 4144 uint16_t chan; 4145 4146 if (is_fat) { /* 11n mode */ 4147 4148 if (is_hi_chan) { 4149 chan = channel - 4; 4150 } else { 4151 chan = channel; 4152 } 4153 4154 for (i = 0; i < 7; i++) { 4155 if (iwk_eep_band_6[i] == chan) { 4156 return (&sc->sc_eep_map.band_24_channels[i]); 4157 } 4158 } 4159 for (i = 0; i < 11; i++) { 4160 if (iwk_eep_band_7[i] == chan) { 4161 return (&sc->sc_eep_map.band_52_channels[i]); 4162 } 4163 } 4164 } else if (is_24G) { /* 2.4 GHz band */ 4165 for (i = 0; i < 14; i++) { 4166 if (iwk_eep_band_1[i] == channel) { 4167 return (&sc->sc_eep_map.band_1_channels[i]); 4168 } 4169 } 4170 } else { /* 5 GHz band */ 4171 for (i = 0; i < 13; i++) { 4172 if (iwk_eep_band_2[i] == channel) { 4173 return (&sc->sc_eep_map.band_2_channels[i]); 4174 } 4175 } 4176 for (i = 0; i < 12; i++) { 4177 if (iwk_eep_band_3[i] == channel) { 4178 return (&sc->sc_eep_map.band_3_channels[i]); 4179 } 4180 } 4181 for (i = 0; i < 11; i++) { 4182 if (iwk_eep_band_4[i] == channel) { 4183 return (&sc->sc_eep_map.band_4_channels[i]); 4184 } 4185 } 4186 for (i = 0; i < 6; i++) { 4187 if (iwk_eep_band_5[i] == channel) { 4188 return (&sc->sc_eep_map.band_5_channels[i]); 4189 } 4190 } 4191 } 4192 4193 return (NULL); 4194 } 4195 4196 /* 4197 * Determine which subband a given channel belongs 4198 * to in 2.4 GHz or 5 GHz band 4199 */ 4200 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel) 4201 { 4202 int32_t b_n = -1; 4203 4204 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) { 4205 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) { 4206 continue; 4207 } 4208 4209 if ((channel >= 4210 (uint16_t)sc->sc_eep_map.calib_info. 4211 band_info_tbl[b_n].ch_from) && 4212 (channel <= 4213 (uint16_t)sc->sc_eep_map.calib_info. 4214 band_info_tbl[b_n].ch_to)) { 4215 break; 4216 } 4217 } 4218 4219 return (b_n); 4220 } 4221 4222 /* Make a special division for interpolation operation */ 4223 static int iwk_division(int32_t num, int32_t denom, int32_t *res) 4224 { 4225 int32_t sign = 1; 4226 4227 if (num < 0) { 4228 sign = -sign; 4229 num = -num; 4230 } 4231 4232 if (denom < 0) { 4233 sign = -sign; 4234 denom = -denom; 4235 } 4236 4237 *res = ((num*2 + denom) / (denom*2)) * sign; 4238 4239 return (IWK_SUCCESS); 4240 } 4241 4242 /* Make interpolation operation */ 4243 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 4244 int32_t x2, int32_t y2) 4245 { 4246 int32_t val; 4247 4248 if (x2 == x1) { 4249 return (y1); 4250 } else { 4251 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val); 4252 return (val + y2); 4253 } 4254 } 4255 4256 /* Get interpolation measurement data of a given channel for all chains. */ 4257 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 4258 struct iwk_eep_calib_channel_info *chan_info) 4259 { 4260 int32_t ban_n; 4261 uint32_t ch1_n, ch2_n; 4262 int32_t c, m; 4263 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p; 4264 4265 /* determine subband number */ 4266 ban_n = iwk_band_number(sc, channel); 4267 if (ban_n >= EEP_TX_POWER_BANDS) { 4268 return (DDI_FAILURE); 4269 } 4270 4271 ch1_n = 4272 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num; 4273 ch2_n = 4274 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num; 4275 4276 chan_info->ch_num = (uint8_t)channel; /* given channel number */ 4277 4278 /* 4279 * go through all chains on chipset 4280 */ 4281 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) { 4282 /* 4283 * go through all factory measurements 4284 */ 4285 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) { 4286 m1_p = 4287 &(sc->sc_eep_map.calib_info. 4288 band_info_tbl[ban_n].ch1.measure[c][m]); 4289 m2_p = 4290 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n]. 4291 ch2.measure[c][m]); 4292 m_p = &(chan_info->measure[c][m]); 4293 4294 /* 4295 * make interpolation to get actual 4296 * Tx power for given channel 4297 */ 4298 m_p->actual_pow = iwk_interpolate_value(channel, 4299 ch1_n, m1_p->actual_pow, 4300 ch2_n, m2_p->actual_pow); 4301 4302 /* make interpolation to get index into gain table */ 4303 m_p->gain_idx = iwk_interpolate_value(channel, 4304 ch1_n, m1_p->gain_idx, 4305 ch2_n, m2_p->gain_idx); 4306 4307 /* make interpolation to get chipset temperature */ 4308 m_p->temperature = iwk_interpolate_value(channel, 4309 ch1_n, m1_p->temperature, 4310 ch2_n, m2_p->temperature); 4311 4312 /* 4313 * make interpolation to get power 4314 * amp detector level 4315 */ 4316 m_p->pa_det = iwk_interpolate_value(channel, ch1_n, 4317 m1_p->pa_det, 4318 ch2_n, m2_p->pa_det); 4319 } 4320 } 4321 4322 return (IWK_SUCCESS); 4323 } 4324 4325 /* 4326 * Calculate voltage compensation for Tx power. For more infomation, 4327 * please refer to iwk_calibration.h file 4328 */ 4329 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 4330 int32_t curr_voltage) 4331 { 4332 int32_t vol_comp = 0; 4333 4334 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) || 4335 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) { 4336 return (vol_comp); 4337 } 4338 4339 (void) iwk_division(curr_voltage-eep_voltage, 4340 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp); 4341 4342 if (curr_voltage > eep_voltage) { 4343 vol_comp *= 2; 4344 } 4345 if ((vol_comp < -2) || (vol_comp > 2)) { 4346 vol_comp = 0; 4347 } 4348 4349 return (vol_comp); 4350 } 4351 4352 /* 4353 * Thermal compensation values for txpower for various frequency ranges ... 4354 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust 4355 */ 4356 static struct iwk_txpower_tempera_comp { 4357 int32_t degrees_per_05db_a; 4358 int32_t degrees_per_05db_a_denom; 4359 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = { 4360 {9, 2}, /* group 0 5.2, ch 34-43 */ 4361 {4, 1}, /* group 1 5.2, ch 44-70 */ 4362 {4, 1}, /* group 2 5.2, ch 71-124 */ 4363 {4, 1}, /* group 3 5.2, ch 125-200 */ 4364 {3, 1} /* group 4 2.4, ch all */ 4365 }; 4366 4367 /* 4368 * bit-rate-dependent table to prevent Tx distortion, in half-dB units, 4369 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. 4370 */ 4371 static int32_t back_off_table[] = { 4372 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ 4373 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ 4374 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ 4375 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ 4376 10 /* CCK */ 4377 }; 4378 4379 /* determine minimum Tx power index in gain table */ 4380 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G) 4381 { 4382 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) { 4383 return (MIN_TX_GAIN_INDEX_52GHZ_EXT); 4384 } 4385 4386 return (MIN_TX_GAIN_INDEX); 4387 } 4388 4389 /* 4390 * Determine DSP and radio gain according to temperature and other factors. 4391 * This function is the majority of Tx power calibration 4392 */ 4393 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 4394 struct iwk_tx_power_db *tp_db) 4395 { 4396 int is_24G, is_fat, is_high_chan, is_mimo; 4397 int c, r; 4398 int32_t target_power; 4399 int32_t tx_grp = CALIB_CH_GROUP_MAX; 4400 uint16_t channel; 4401 uint8_t saturation_power; 4402 int32_t regu_power; 4403 int32_t curr_regu_power; 4404 struct iwk_eep_channel *eep_chan_p; 4405 struct iwk_eep_calib_channel_info eep_chan_calib; 4406 int32_t eep_voltage, init_voltage; 4407 int32_t voltage_compensation; 4408 int32_t temperature; 4409 int32_t degrees_per_05db_num; 4410 int32_t degrees_per_05db_denom; 4411 struct iwk_eep_calib_measure *measure_p; 4412 int32_t interpo_temp; 4413 int32_t power_limit; 4414 int32_t atten_value; 4415 int32_t tempera_comp[2]; 4416 int32_t interpo_gain_idx[2]; 4417 int32_t interpo_actual_pow[2]; 4418 union iwk_tx_power_dual_stream txpower_gains; 4419 int32_t txpower_gains_idx; 4420 4421 channel = sc->sc_config.chan; 4422 4423 /* 2.4 GHz or 5 GHz band */ 4424 is_24G = iwk_is_24G_band(sc); 4425 4426 /* fat channel or not */ 4427 is_fat = iwk_is_fat_channel(sc); 4428 4429 /* 4430 * using low half channel number or high half channel number 4431 * identify fat channel 4432 */ 4433 if (is_fat && (sc->sc_config.flags & 4434 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) { 4435 is_high_chan = 1; 4436 } 4437 4438 if ((channel > 0) && (channel < 200)) { 4439 /* get regulatory channel data from eeprom */ 4440 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G, 4441 is_fat, is_high_chan); 4442 if (NULL == eep_chan_p) { 4443 cmn_err(CE_WARN, 4444 "iwk_txpower_table_cmd_init(): " 4445 "can't get channel infomation\n"); 4446 return (DDI_FAILURE); 4447 } 4448 } else { 4449 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4450 "channel(%d) isn't in proper range\n", 4451 channel); 4452 return (DDI_FAILURE); 4453 } 4454 4455 /* initial value of Tx power */ 4456 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg; 4457 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) { 4458 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4459 "user TX power is too weak\n"); 4460 return (DDI_FAILURE); 4461 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) { 4462 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4463 "user TX power is too strong\n"); 4464 return (DDI_FAILURE); 4465 } 4466 4467 target_power = 2 * sc->sc_user_txpower; 4468 4469 /* determine which group current channel belongs to */ 4470 tx_grp = iwk_txpower_grp(channel); 4471 if (tx_grp < 0) { 4472 return (tx_grp); 4473 } 4474 4475 4476 if (is_fat) { 4477 if (is_high_chan) { 4478 channel -= 2; 4479 } else { 4480 channel += 2; 4481 } 4482 } 4483 4484 /* determine saturation power */ 4485 if (is_24G) { 4486 saturation_power = 4487 sc->sc_eep_map.calib_info.saturation_power24; 4488 } else { 4489 saturation_power = 4490 sc->sc_eep_map.calib_info.saturation_power52; 4491 } 4492 4493 if (saturation_power < IWK_TX_POWER_SATURATION_MIN || 4494 saturation_power > IWK_TX_POWER_SATURATION_MAX) { 4495 if (is_24G) { 4496 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24; 4497 } else { 4498 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52; 4499 } 4500 } 4501 4502 /* determine regulatory power */ 4503 regu_power = (int32_t)eep_chan_p->max_power_avg * 2; 4504 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) || 4505 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) { 4506 if (is_24G) { 4507 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24; 4508 } else { 4509 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52; 4510 } 4511 } 4512 4513 /* 4514 * get measurement data for current channel 4515 * suach as temperature,index to gain table,actual Tx power 4516 */ 4517 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib); 4518 4519 eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage; 4520 init_voltage = (int32_t)sc->sc_card_alive_init.voltage; 4521 4522 /* calculate voltage compensation to Tx power */ 4523 voltage_compensation = 4524 iwk_voltage_compensation(eep_voltage, init_voltage); 4525 4526 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) { 4527 temperature = sc->sc_tempera; 4528 } else { 4529 temperature = IWK_TX_POWER_TEMPERATURE_MIN; 4530 } 4531 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) { 4532 temperature = sc->sc_tempera; 4533 } else { 4534 temperature = IWK_TX_POWER_TEMPERATURE_MAX; 4535 } 4536 temperature = KELVIN_TO_CELSIUS(temperature); 4537 4538 degrees_per_05db_num = 4539 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a; 4540 degrees_per_05db_denom = 4541 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom; 4542 4543 for (c = 0; c < 2; c++) { /* go through all chains */ 4544 measure_p = &eep_chan_calib.measure[c][1]; 4545 interpo_temp = measure_p->temperature; 4546 4547 /* determine temperature compensation to Tx power */ 4548 (void) iwk_division( 4549 (temperature-interpo_temp)*degrees_per_05db_denom, 4550 degrees_per_05db_num, &tempera_comp[c]); 4551 4552 interpo_gain_idx[c] = measure_p->gain_idx; 4553 interpo_actual_pow[c] = measure_p->actual_pow; 4554 } 4555 4556 /* 4557 * go through all rate entries in Tx power table 4558 */ 4559 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) { 4560 if (r & 0x8) { 4561 /* need to lower regulatory power for MIMO mode */ 4562 curr_regu_power = regu_power - 4563 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION; 4564 is_mimo = 1; 4565 } else { 4566 curr_regu_power = regu_power; 4567 is_mimo = 0; 4568 } 4569 4570 power_limit = saturation_power - back_off_table[r]; 4571 if (power_limit > curr_regu_power) { 4572 /* final Tx power limit */ 4573 power_limit = curr_regu_power; 4574 } 4575 4576 if (target_power > power_limit) { 4577 target_power = power_limit; /* final target Tx power */ 4578 } 4579 4580 for (c = 0; c < 2; c++) { /* go through all Tx chains */ 4581 if (is_mimo) { 4582 atten_value = 4583 sc->sc_card_alive_init.tx_atten[tx_grp][c]; 4584 } else { 4585 atten_value = 0; 4586 } 4587 4588 /* 4589 * calculate index in gain table 4590 * this step is very important 4591 */ 4592 txpower_gains_idx = interpo_gain_idx[c] - 4593 (target_power - interpo_actual_pow[c]) - 4594 tempera_comp[c] - voltage_compensation + 4595 atten_value; 4596 4597 if (txpower_gains_idx < 4598 iwk_min_power_index(r, is_24G)) { 4599 txpower_gains_idx = 4600 iwk_min_power_index(r, is_24G); 4601 } 4602 4603 if (!is_24G) { 4604 /* 4605 * support negative index for 5 GHz 4606 * band 4607 */ 4608 txpower_gains_idx += 9; 4609 } 4610 4611 if (POWER_TABLE_CCK_ENTRY == r) { 4612 /* for CCK mode, make necessary attenuaton */ 4613 txpower_gains_idx += 4614 IWK_TX_POWER_CCK_COMPENSATION_C_STEP; 4615 } 4616 4617 if (txpower_gains_idx > 107) { 4618 txpower_gains_idx = 107; 4619 } else if (txpower_gains_idx < 0) { 4620 txpower_gains_idx = 0; 4621 } 4622 4623 /* search DSP and radio gains in gain table */ 4624 txpower_gains.s.radio_tx_gain[c] = 4625 gains_table[is_24G][txpower_gains_idx].radio; 4626 txpower_gains.s.dsp_predis_atten[c] = 4627 gains_table[is_24G][txpower_gains_idx].dsp; 4628 4629 IWK_DBG((IWK_DEBUG_CALIBRATION, 4630 "rate_index: %d, " 4631 "gain_index %d, c: %d,is_mimo: %d\n", 4632 r, txpower_gains_idx, c, is_mimo)); 4633 } 4634 4635 /* initialize Tx power table */ 4636 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) { 4637 tp_db->ht_ofdm_power[r].dw = txpower_gains.dw; 4638 } else { 4639 tp_db->legacy_cck_power.dw = txpower_gains.dw; 4640 } 4641 } 4642 4643 return (IWK_SUCCESS); 4644 } 4645 4646 /* 4647 * make Tx power calibration to adjust Tx power. 4648 * This is completed by sending out Tx power table command. 4649 */ 4650 static int iwk_tx_power_calibration(iwk_sc_t *sc) 4651 { 4652 iwk_tx_power_table_cmd_t cmd; 4653 int rv; 4654 4655 if (sc->sc_flags & IWK_F_SCANNING) { 4656 return (IWK_SUCCESS); 4657 } 4658 4659 /* necessary initialization to Tx power table command */ 4660 cmd.band = (uint8_t)iwk_is_24G_band(sc); 4661 cmd.channel = sc->sc_config.chan; 4662 cmd.channel_normal_width = 0; 4663 4664 /* initialize Tx power table */ 4665 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power); 4666 if (rv) { 4667 cmn_err(CE_NOTE, "rv= %d\n", rv); 4668 return (rv); 4669 } 4670 4671 /* send out Tx power table command */ 4672 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1); 4673 if (rv) { 4674 return (rv); 4675 } 4676 4677 /* record current temperature */ 4678 sc->sc_last_tempera = sc->sc_tempera; 4679 4680 return (IWK_SUCCESS); 4681 } 4682 4683 /* This function is the handler of statistics notification from uCode */ 4684 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc) 4685 { 4686 int is_diff; 4687 struct iwk_notif_statistics *statistics_p = 4688 (struct iwk_notif_statistics *)(desc + 1); 4689 4690 mutex_enter(&sc->sc_glock); 4691 4692 is_diff = (sc->sc_statistics.general.temperature != 4693 statistics_p->general.temperature) || 4694 ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) != 4695 (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)); 4696 4697 /* update statistics data */ 4698 (void) memcpy(&sc->sc_statistics, statistics_p, 4699 sizeof (struct iwk_notif_statistics)); 4700 4701 sc->sc_flags |= IWK_F_STATISTICS; 4702 4703 if (!(sc->sc_flags & IWK_F_SCANNING)) { 4704 /* make Receiver gain balance calibration */ 4705 (void) iwk_rxgain_diff(sc); 4706 4707 /* make Receiver sensitivity calibration */ 4708 (void) iwk_rx_sens(sc); 4709 } 4710 4711 4712 if (!is_diff) { 4713 mutex_exit(&sc->sc_glock); 4714 return; 4715 } 4716 4717 /* calibration current temperature of 4965 chipset */ 4718 sc->sc_tempera = iwk_curr_tempera(sc); 4719 4720 /* distinct temperature change will trigger Tx power calibration */ 4721 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) || 4722 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) { 4723 /* make Tx power calibration */ 4724 (void) iwk_tx_power_calibration(sc); 4725 } 4726 4727 mutex_exit(&sc->sc_glock); 4728 } 4729 4730 /* Determine this station is in associated state or not */ 4731 static int iwk_is_associated(iwk_sc_t *sc) 4732 { 4733 return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK); 4734 } 4735 4736 /* Make necessary preparation for Receiver gain balance calibration */ 4737 static int iwk_rxgain_diff_init(iwk_sc_t *sc) 4738 { 4739 int i, rv; 4740 struct iwk_calibration_cmd cmd; 4741 struct iwk_rx_gain_diff *gain_diff_p; 4742 4743 gain_diff_p = &sc->sc_rxgain_diff; 4744 4745 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff)); 4746 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd)); 4747 4748 for (i = 0; i < RX_CHAINS_NUM; i++) { 4749 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL; 4750 } 4751 4752 if (iwk_is_associated(sc)) { 4753 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4754 cmd.diff_gain_a = 0; 4755 cmd.diff_gain_b = 0; 4756 cmd.diff_gain_c = 0; 4757 4758 /* assume the gains of every Rx chains is balanceable */ 4759 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd, 4760 sizeof (cmd), 1); 4761 if (rv) { 4762 return (rv); 4763 } 4764 4765 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE; 4766 } 4767 4768 return (IWK_SUCCESS); 4769 } 4770 4771 /* 4772 * make Receiver gain balance to balance Rx gain between Rx chains 4773 * and determine which chain is disconnected 4774 */ 4775 static int iwk_rxgain_diff(iwk_sc_t *sc) 4776 { 4777 int i, is_24G, rv; 4778 int max_beacon_chain_n; 4779 int min_noise_chain_n; 4780 uint16_t channel_n; 4781 int32_t beacon_diff; 4782 int32_t noise_diff; 4783 uint32_t noise_chain_a, noise_chain_b, noise_chain_c; 4784 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c; 4785 struct iwk_calibration_cmd cmd; 4786 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4787 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4788 struct statistics_rx_non_phy *rx_general_p = 4789 &sc->sc_statistics.rx.general; 4790 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff; 4791 4792 if (INTERFERENCE_DATA_AVAILABLE != 4793 rx_general_p->interference_data_flag) { 4794 return (IWK_SUCCESS); 4795 } 4796 4797 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) { 4798 return (IWK_SUCCESS); 4799 } 4800 4801 is_24G = iwk_is_24G_band(sc); 4802 channel_n = sc->sc_config.chan; /* channel number */ 4803 4804 if ((channel_n != (sc->sc_statistics.flag >> 16)) || 4805 ((STATISTICS_REPLY_FLG_BAND_24G_MSK == 4806 (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && 4807 !is_24G)) { 4808 return (IWK_SUCCESS); 4809 } 4810 4811 /* Rx chain's noise strength from statistics notification */ 4812 noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF; 4813 noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF; 4814 noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF; 4815 4816 /* Rx chain's beacon strength from statistics notification */ 4817 beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF; 4818 beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF; 4819 beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF; 4820 4821 gain_diff_p->beacon_count++; 4822 4823 /* accumulate chain's noise strength */ 4824 gain_diff_p->noise_stren_a += noise_chain_a; 4825 gain_diff_p->noise_stren_b += noise_chain_b; 4826 gain_diff_p->noise_stren_c += noise_chain_c; 4827 4828 /* accumulate chain's beacon strength */ 4829 gain_diff_p->beacon_stren_a += beacon_chain_a; 4830 gain_diff_p->beacon_stren_b += beacon_chain_b; 4831 gain_diff_p->beacon_stren_c += beacon_chain_c; 4832 4833 if (BEACON_NUM_20 == gain_diff_p->beacon_count) { 4834 /* calculate average beacon strength */ 4835 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20; 4836 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20; 4837 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20; 4838 4839 /* calculate average noise strength */ 4840 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20; 4841 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4842 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4843 4844 /* determine maximum beacon strength among 3 chains */ 4845 if ((beacon_aver[0] >= beacon_aver[1]) && 4846 (beacon_aver[0] >= beacon_aver[2])) { 4847 max_beacon_chain_n = 0; 4848 gain_diff_p->connected_chains = 1 << 0; 4849 } else if (beacon_aver[1] >= beacon_aver[2]) { 4850 max_beacon_chain_n = 1; 4851 gain_diff_p->connected_chains = 1 << 1; 4852 } else { 4853 max_beacon_chain_n = 2; 4854 gain_diff_p->connected_chains = 1 << 2; 4855 } 4856 4857 /* determine which chain is disconnected */ 4858 for (i = 0; i < RX_CHAINS_NUM; i++) { 4859 if (i != max_beacon_chain_n) { 4860 beacon_diff = beacon_aver[max_beacon_chain_n] - 4861 beacon_aver[i]; 4862 if (beacon_diff > MAX_ALLOWED_DIFF) { 4863 gain_diff_p->disconnect_chain[i] = 1; 4864 } else { 4865 gain_diff_p->connected_chains |= 4866 (1 << i); 4867 } 4868 } 4869 } 4870 4871 /* 4872 * if chain A and B are both disconnected, 4873 * assume the stronger in beacon strength is connected 4874 */ 4875 if (gain_diff_p->disconnect_chain[0] && 4876 gain_diff_p->disconnect_chain[1]) { 4877 if (beacon_aver[0] >= beacon_aver[1]) { 4878 gain_diff_p->disconnect_chain[0] = 0; 4879 gain_diff_p->connected_chains |= (1 << 0); 4880 } else { 4881 gain_diff_p->disconnect_chain[1] = 0; 4882 gain_diff_p->connected_chains |= (1 << 1); 4883 } 4884 } 4885 4886 /* determine minimum noise strength among 3 chains */ 4887 if (!gain_diff_p->disconnect_chain[0]) { 4888 min_noise_chain_n = 0; 4889 4890 for (i = 0; i < RX_CHAINS_NUM; i++) { 4891 if (!gain_diff_p->disconnect_chain[i] && 4892 (noise_aver[i] <= 4893 noise_aver[min_noise_chain_n])) { 4894 min_noise_chain_n = i; 4895 } 4896 4897 } 4898 } else { 4899 min_noise_chain_n = 1; 4900 4901 for (i = 0; i < RX_CHAINS_NUM; i++) { 4902 if (!gain_diff_p->disconnect_chain[i] && 4903 (noise_aver[i] <= 4904 noise_aver[min_noise_chain_n])) { 4905 min_noise_chain_n = i; 4906 } 4907 } 4908 } 4909 4910 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0; 4911 4912 /* determine gain difference between chains */ 4913 for (i = 0; i < RX_CHAINS_NUM; i++) { 4914 if (!gain_diff_p->disconnect_chain[i] && 4915 (CHAIN_GAIN_DIFF_INIT_VAL == 4916 gain_diff_p->gain_diff_chain[i])) { 4917 4918 noise_diff = noise_aver[i] - 4919 noise_aver[min_noise_chain_n]; 4920 gain_diff_p->gain_diff_chain[i] = 4921 (uint8_t)((noise_diff * 10) / 15); 4922 4923 if (gain_diff_p->gain_diff_chain[i] > 3) { 4924 gain_diff_p->gain_diff_chain[i] = 3; 4925 } 4926 4927 gain_diff_p->gain_diff_chain[i] |= (1 << 2); 4928 } else { 4929 gain_diff_p->gain_diff_chain[i] = 0; 4930 } 4931 } 4932 4933 if (!gain_diff_p->gain_diff_send) { 4934 gain_diff_p->gain_diff_send = 1; 4935 4936 (void) memset(&cmd, 0, sizeof (cmd)); 4937 4938 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4939 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0]; 4940 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1]; 4941 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2]; 4942 4943 /* 4944 * send out PHY calibration command to 4945 * adjust every chain's Rx gain 4946 */ 4947 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 4948 &cmd, sizeof (cmd), 1); 4949 if (rv) { 4950 return (rv); 4951 } 4952 4953 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED; 4954 } 4955 4956 gain_diff_p->beacon_stren_a = 0; 4957 gain_diff_p->beacon_stren_b = 0; 4958 gain_diff_p->beacon_stren_c = 0; 4959 4960 gain_diff_p->noise_stren_a = 0; 4961 gain_diff_p->noise_stren_b = 0; 4962 gain_diff_p->noise_stren_c = 0; 4963 } 4964 4965 return (IWK_SUCCESS); 4966 } 4967 4968 /* Make necessary preparation for Receiver sensitivity calibration */ 4969 static int iwk_rx_sens_init(iwk_sc_t *sc) 4970 { 4971 int i, rv; 4972 struct iwk_rx_sensitivity_cmd cmd; 4973 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 4974 4975 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd)); 4976 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity)); 4977 4978 rx_sens_p->auto_corr_ofdm_x4 = 90; 4979 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170; 4980 rx_sens_p->auto_corr_ofdm_x1 = 105; 4981 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220; 4982 4983 rx_sens_p->auto_corr_cck_x4 = 125; 4984 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 4985 rx_sens_p->min_energy_det_cck = 100; 4986 4987 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK); 4988 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 4989 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 4990 4991 rx_sens_p->last_bad_plcp_cnt_ofdm = 0; 4992 rx_sens_p->last_false_alarm_cnt_ofdm = 0; 4993 rx_sens_p->last_bad_plcp_cnt_cck = 0; 4994 rx_sens_p->last_false_alarm_cnt_cck = 0; 4995 4996 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 4997 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM; 4998 rx_sens_p->cck_no_false_alarm_num = 0; 4999 rx_sens_p->cck_beacon_idx = 0; 5000 5001 for (i = 0; i < 10; i++) { 5002 rx_sens_p->cck_beacon_min[i] = 0; 5003 } 5004 5005 rx_sens_p->cck_noise_idx = 0; 5006 rx_sens_p->cck_noise_ref = 0; 5007 5008 for (i = 0; i < 20; i++) { 5009 rx_sens_p->cck_noise_max[i] = 0; 5010 } 5011 5012 rx_sens_p->cck_noise_diff = 0; 5013 rx_sens_p->cck_no_false_alarm_num = 0; 5014 5015 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5016 5017 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5018 rx_sens_p->auto_corr_ofdm_x4; 5019 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5020 rx_sens_p->auto_corr_mrc_ofdm_x4; 5021 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5022 rx_sens_p->auto_corr_ofdm_x1; 5023 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5024 rx_sens_p->auto_corr_mrc_ofdm_x1; 5025 5026 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5027 rx_sens_p->auto_corr_cck_x4; 5028 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5029 rx_sens_p->auto_corr_mrc_cck_x4; 5030 cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck; 5031 5032 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5033 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5034 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5035 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5036 5037 /* at first, set up Rx to maximum sensitivity */ 5038 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5039 if (rv) { 5040 cmn_err(CE_WARN, "iwk_rx_sens_init(): " 5041 "in the process of initialization, " 5042 "failed to send rx sensitivity command\n"); 5043 return (rv); 5044 } 5045 5046 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK; 5047 5048 return (IWK_SUCCESS); 5049 } 5050 5051 /* 5052 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity. 5053 * for more infomation, please refer to iwk_calibration.h file 5054 */ 5055 static int iwk_rx_sens(iwk_sc_t *sc) 5056 { 5057 int rv; 5058 uint32_t actual_rx_time; 5059 struct statistics_rx_non_phy *rx_general_p = 5060 &sc->sc_statistics.rx.general; 5061 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5062 struct iwk_rx_sensitivity_cmd cmd; 5063 5064 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) { 5065 cmn_err(CE_WARN, "iwk_rx_sens(): " 5066 "sensitivity initialization has not finished.\n"); 5067 return (DDI_FAILURE); 5068 } 5069 5070 if (INTERFERENCE_DATA_AVAILABLE != 5071 rx_general_p->interference_data_flag) { 5072 cmn_err(CE_WARN, "iwk_rx_sens(): " 5073 "can't make rx sensitivity calibration," 5074 "because of invalid statistics\n"); 5075 return (DDI_FAILURE); 5076 } 5077 5078 actual_rx_time = rx_general_p->channel_load; 5079 if (!actual_rx_time) { 5080 cmn_err(CE_WARN, "iwk_rx_sens(): " 5081 "can't make rx sensitivity calibration," 5082 "because has not enough rx time\n"); 5083 return (DDI_FAILURE); 5084 } 5085 5086 /* make Rx sensitivity calibration for OFDM mode */ 5087 rv = iwk_ofdm_sens(sc, actual_rx_time); 5088 if (rv) { 5089 return (rv); 5090 } 5091 5092 /* make Rx sensitivity calibration for CCK mode */ 5093 rv = iwk_cck_sens(sc, actual_rx_time); 5094 if (rv) { 5095 return (rv); 5096 } 5097 5098 /* 5099 * if the sum of false alarm had not changed, nothing will be done 5100 */ 5101 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) && 5102 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) { 5103 return (IWK_SUCCESS); 5104 } 5105 5106 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5107 5108 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5109 rx_sens_p->auto_corr_ofdm_x4; 5110 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5111 rx_sens_p->auto_corr_mrc_ofdm_x4; 5112 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5113 rx_sens_p->auto_corr_ofdm_x1; 5114 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5115 rx_sens_p->auto_corr_mrc_ofdm_x1; 5116 5117 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5118 rx_sens_p->auto_corr_cck_x4; 5119 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5120 rx_sens_p->auto_corr_mrc_cck_x4; 5121 cmd.table[MIN_ENERGY_CCK_DET_IDX] = 5122 rx_sens_p->min_energy_det_cck; 5123 5124 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5125 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5126 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5127 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5128 5129 /* 5130 * send sensitivity command to complete actual sensitivity calibration 5131 */ 5132 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5133 if (rv) { 5134 cmn_err(CE_WARN, "iwk_rx_sens(): " 5135 "fail to send rx sensitivity command\n"); 5136 return (rv); 5137 } 5138 5139 return (IWK_SUCCESS); 5140 5141 } 5142 5143 /* 5144 * make Rx sensitivity calibration for CCK mode. 5145 * This is preparing parameters for Sensitivity command 5146 */ 5147 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5148 { 5149 int i; 5150 uint8_t noise_a, noise_b, noise_c; 5151 uint8_t max_noise_abc, max_noise_20; 5152 uint32_t beacon_a, beacon_b, beacon_c; 5153 uint32_t min_beacon_abc, max_beacon_10; 5154 uint32_t cck_fa, cck_bp; 5155 uint32_t cck_sum_fa_bp; 5156 uint32_t temp; 5157 struct statistics_rx_non_phy *rx_general_p = 5158 &sc->sc_statistics.rx.general; 5159 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5160 5161 cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt; 5162 cck_bp = sc->sc_statistics.rx.cck.plcp_err; 5163 5164 /* accumulate false alarm */ 5165 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) { 5166 temp = rx_sens_p->last_false_alarm_cnt_cck; 5167 rx_sens_p->last_false_alarm_cnt_cck = cck_fa; 5168 cck_fa += (0xFFFFFFFF - temp); 5169 } else { 5170 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck; 5171 rx_sens_p->last_false_alarm_cnt_cck += cck_fa; 5172 } 5173 5174 /* accumulate bad plcp */ 5175 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) { 5176 temp = rx_sens_p->last_bad_plcp_cnt_cck; 5177 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp; 5178 cck_bp += (0xFFFFFFFF - temp); 5179 } else { 5180 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck; 5181 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp; 5182 } 5183 5184 /* 5185 * calculate relative value 5186 */ 5187 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024; 5188 rx_sens_p->cck_noise_diff = 0; 5189 5190 noise_a = 5191 (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8); 5192 noise_b = 5193 (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8); 5194 noise_c = 5195 (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8); 5196 5197 beacon_a = rx_general_p->beacon_energy_a; 5198 beacon_b = rx_general_p->beacon_energy_b; 5199 beacon_c = rx_general_p->beacon_energy_c; 5200 5201 /* determine maximum noise among 3 chains */ 5202 if ((noise_a >= noise_b) && (noise_a >= noise_c)) { 5203 max_noise_abc = noise_a; 5204 } else if (noise_b >= noise_c) { 5205 max_noise_abc = noise_b; 5206 } else { 5207 max_noise_abc = noise_c; 5208 } 5209 5210 /* record maximum noise among 3 chains */ 5211 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc; 5212 rx_sens_p->cck_noise_idx++; 5213 if (rx_sens_p->cck_noise_idx >= 20) { 5214 rx_sens_p->cck_noise_idx = 0; 5215 } 5216 5217 /* determine maximum noise among 20 max noise */ 5218 max_noise_20 = rx_sens_p->cck_noise_max[0]; 5219 for (i = 0; i < 20; i++) { 5220 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) { 5221 max_noise_20 = rx_sens_p->cck_noise_max[i]; 5222 } 5223 } 5224 5225 /* determine minimum beacon among 3 chains */ 5226 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) { 5227 min_beacon_abc = beacon_a; 5228 } else if (beacon_b <= beacon_c) { 5229 min_beacon_abc = beacon_b; 5230 } else { 5231 min_beacon_abc = beacon_c; 5232 } 5233 5234 /* record miminum beacon among 3 chains */ 5235 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc; 5236 rx_sens_p->cck_beacon_idx++; 5237 if (rx_sens_p->cck_beacon_idx >= 10) { 5238 rx_sens_p->cck_beacon_idx = 0; 5239 } 5240 5241 /* determine maximum beacon among 10 miminum beacon among 3 chains */ 5242 max_beacon_10 = rx_sens_p->cck_beacon_min[0]; 5243 for (i = 0; i < 10; i++) { 5244 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) { 5245 max_beacon_10 = rx_sens_p->cck_beacon_min[i]; 5246 } 5247 } 5248 5249 /* add a little margin */ 5250 max_beacon_10 += 6; 5251 5252 /* record the count of having no false alarms */ 5253 if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5254 rx_sens_p->cck_no_false_alarm_num++; 5255 } else { 5256 rx_sens_p->cck_no_false_alarm_num = 0; 5257 } 5258 5259 /* 5260 * adjust parameters in sensitivity command 5261 * according to different status. 5262 * for more infomation, please refer to iwk_calibration.h file 5263 */ 5264 if (cck_sum_fa_bp > (50 * actual_rx_time)) { 5265 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5266 5267 if (rx_sens_p->auto_corr_cck_x4 > 160) { 5268 rx_sens_p->cck_noise_ref = max_noise_20; 5269 5270 if (rx_sens_p->min_energy_det_cck > 2) { 5271 rx_sens_p->min_energy_det_cck -= 2; 5272 } 5273 } 5274 5275 if (rx_sens_p->auto_corr_cck_x4 < 160) { 5276 rx_sens_p->auto_corr_cck_x4 = 160 + 1; 5277 } else { 5278 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) { 5279 rx_sens_p->auto_corr_cck_x4 += 3; 5280 } else { 5281 rx_sens_p->auto_corr_cck_x4 = 200; 5282 } 5283 } 5284 5285 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) { 5286 rx_sens_p->auto_corr_mrc_cck_x4 += 3; 5287 } else { 5288 rx_sens_p->auto_corr_mrc_cck_x4 = 400; 5289 } 5290 5291 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5292 5293 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5294 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM; 5295 5296 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref - 5297 (int32_t)max_noise_20; 5298 5299 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) && 5300 ((rx_sens_p->cck_noise_diff > 2) || 5301 (rx_sens_p->cck_no_false_alarm_num > 100))) { 5302 if ((rx_sens_p->min_energy_det_cck + 2) < 97) { 5303 rx_sens_p->min_energy_det_cck += 2; 5304 } else { 5305 rx_sens_p->min_energy_det_cck = 97; 5306 } 5307 5308 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) { 5309 rx_sens_p->auto_corr_cck_x4 -= 3; 5310 } else { 5311 rx_sens_p->auto_corr_cck_x4 = 125; 5312 } 5313 5314 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) { 5315 rx_sens_p->auto_corr_mrc_cck_x4 -= 3; 5316 } else { 5317 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5318 } 5319 5320 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5321 } else { 5322 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5323 } 5324 } else { 5325 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM; 5326 5327 rx_sens_p->cck_noise_ref = max_noise_20; 5328 5329 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) { 5330 rx_sens_p->min_energy_det_cck -= 8; 5331 } 5332 5333 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5334 } 5335 5336 if (rx_sens_p->min_energy_det_cck < max_beacon_10) { 5337 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10; 5338 } 5339 5340 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state; 5341 5342 return (IWK_SUCCESS); 5343 } 5344 5345 /* 5346 * make Rx sensitivity calibration for OFDM mode. 5347 * This is preparing parameters for Sensitivity command 5348 */ 5349 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5350 { 5351 uint32_t temp; 5352 uint16_t temp1; 5353 uint32_t ofdm_fa, ofdm_bp; 5354 uint32_t ofdm_sum_fa_bp; 5355 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5356 5357 ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt; 5358 ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err; 5359 5360 /* accumulate false alarm */ 5361 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) { 5362 temp = rx_sens_p->last_false_alarm_cnt_ofdm; 5363 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa; 5364 ofdm_fa += (0xFFFFFFFF - temp); 5365 } else { 5366 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm; 5367 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa; 5368 } 5369 5370 /* accumulate bad plcp */ 5371 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) { 5372 temp = rx_sens_p->last_bad_plcp_cnt_ofdm; 5373 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp; 5374 ofdm_bp += (0xFFFFFFFF - temp); 5375 } else { 5376 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm; 5377 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp; 5378 } 5379 5380 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */ 5381 5382 /* 5383 * adjust parameter in sensitivity command according to different status 5384 */ 5385 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) { 5386 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1; 5387 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120; 5388 5389 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1; 5390 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5391 (temp1 <= 210) ? temp1 : 210; 5392 5393 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1; 5394 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140; 5395 5396 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1; 5397 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5398 (temp1 <= 270) ? temp1 : 270; 5399 5400 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5401 5402 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) { 5403 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1; 5404 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85; 5405 5406 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1; 5407 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5408 (temp1 >= 170) ? temp1 : 170; 5409 5410 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1; 5411 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105; 5412 5413 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1; 5414 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5415 (temp1 >= 220) ? temp1 : 220; 5416 5417 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5418 5419 } else { 5420 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5421 } 5422 5423 return (IWK_SUCCESS); 5424 } 5425 5426 /* 5427 * 1) log_event_table_ptr indicates base of the event log. This traces 5428 * a 256-entry history of uCode execution within a circular buffer. 5429 * Its header format is: 5430 * 5431 * uint32_t log_size; log capacity (in number of entries) 5432 * uint32_t type; (1) timestamp with each entry, (0) no timestamp 5433 * uint32_t wraps; # times uCode has wrapped to top of circular buffer 5434 * uint32_t write_index; next circular buffer entry that uCode would fill 5435 * 5436 * The header is followed by the circular buffer of log entries. Entries 5437 * with timestamps have the following format: 5438 * 5439 * uint32_t event_id; range 0 - 1500 5440 * uint32_t timestamp; low 32 bits of TSF (of network, if associated) 5441 * uint32_t data; event_id-specific data value 5442 * 5443 * Entries without timestamps contain only event_id and data. 5444 */ 5445 5446 /* 5447 * iwk_write_event_log - Write event log to dmesg 5448 */ 5449 static void iwk_write_event_log(iwk_sc_t *sc) 5450 { 5451 uint32_t log_event_table_ptr; /* Start address of event table */ 5452 uint32_t startptr; /* Start address of log data */ 5453 uint32_t logptr; /* address of log data entry */ 5454 uint32_t i, n, num_events; 5455 uint32_t event_id, data1, data2; /* log data */ 5456 5457 uint32_t log_size; /* log capacity (in number of entries) */ 5458 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */ 5459 uint32_t wraps; /* # times uCode has wrapped to */ 5460 /* the top of circular buffer */ 5461 uint32_t idx; /* index of entry to be filled in next */ 5462 5463 log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr; 5464 if (!(log_event_table_ptr)) { 5465 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n")); 5466 return; 5467 } 5468 5469 iwk_mac_access_enter(sc); 5470 5471 /* Read log header */ 5472 log_size = iwk_mem_read(sc, log_event_table_ptr); 5473 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */ 5474 type = iwk_mem_read(sc, log_event_table_ptr); 5475 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */ 5476 wraps = iwk_mem_read(sc, log_event_table_ptr); 5477 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */ 5478 idx = iwk_mem_read(sc, log_event_table_ptr); 5479 startptr = log_event_table_ptr + 5480 sizeof (uint32_t); /* addr of start of log data */ 5481 if (!log_size & !wraps) { 5482 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n")); 5483 iwk_mac_access_exit(sc); 5484 return; 5485 } 5486 5487 if (!wraps) { 5488 num_events = idx; 5489 logptr = startptr; 5490 } else { 5491 num_events = log_size - idx; 5492 n = type ? 2 : 3; 5493 logptr = startptr + (idx * n * sizeof (uint32_t)); 5494 } 5495 5496 for (i = 0; i < num_events; i++) { 5497 event_id = iwk_mem_read(sc, logptr); 5498 logptr += sizeof (uint32_t); 5499 data1 = iwk_mem_read(sc, logptr); 5500 logptr += sizeof (uint32_t); 5501 if (type == 0) { /* no timestamp */ 5502 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x", 5503 event_id, data1)); 5504 } else { /* timestamp */ 5505 data2 = iwk_mem_read(sc, logptr); 5506 printf("Time=%d, Event ID=%d, Data=0x%x\n", 5507 data1, event_id, data2); 5508 IWK_DBG((IWK_DEBUG_EEPROM, 5509 "Time=%d, Event ID=%d, Data=0x%x\n", 5510 data1, event_id, data2)); 5511 logptr += sizeof (uint32_t); 5512 } 5513 } 5514 5515 /* 5516 * Print the wrapped around entries, if any 5517 */ 5518 if (wraps) { 5519 logptr = startptr; 5520 for (i = 0; i < idx; i++) { 5521 event_id = iwk_mem_read(sc, logptr); 5522 logptr += sizeof (uint32_t); 5523 data1 = iwk_mem_read(sc, logptr); 5524 logptr += sizeof (uint32_t); 5525 if (type == 0) { /* no timestamp */ 5526 IWK_DBG((IWK_DEBUG_EEPROM, 5527 "Event ID=%d, Data=%x0x", event_id, data1)); 5528 } else { /* timestamp */ 5529 data2 = iwk_mem_read(sc, logptr); 5530 IWK_DBG((IWK_DEBUG_EEPROM, 5531 "Time = %d, Event ID=%d, Data=0x%x\n", 5532 data1, event_id, data2)); 5533 logptr += sizeof (uint32_t); 5534 } 5535 } 5536 } 5537 5538 iwk_mac_access_exit(sc); 5539 } 5540 5541 /* 5542 * error_event_table_ptr indicates base of the error log. This contains 5543 * information about any uCode error that occurs. For 4965, the format is: 5544 * 5545 * uint32_t valid; (nonzero) valid, (0) log is empty 5546 * uint32_t error_id; type of error 5547 * uint32_t pc; program counter 5548 * uint32_t blink1; branch link 5549 * uint32_t blink2; branch link 5550 * uint32_t ilink1; interrupt link 5551 * uint32_t ilink2; interrupt link 5552 * uint32_t data1; error-specific data 5553 * uint32_t data2; error-specific data 5554 * uint32_t line; source code line of error 5555 * uint32_t bcon_time; beacon timer 5556 * uint32_t tsf_low; network timestamp function timer 5557 * uint32_t tsf_hi; network timestamp function timer 5558 */ 5559 /* 5560 * iwk_write_error_log - Write error log to dmesg 5561 */ 5562 static void iwk_write_error_log(iwk_sc_t *sc) 5563 { 5564 uint32_t err_ptr; /* Start address of error log */ 5565 uint32_t valid; /* is error log valid */ 5566 5567 err_ptr = sc->sc_card_alive_run.error_event_table_ptr; 5568 if (!(err_ptr)) { 5569 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n")); 5570 return; 5571 } 5572 5573 iwk_mac_access_enter(sc); 5574 5575 valid = iwk_mem_read(sc, err_ptr); 5576 if (!(valid)) { 5577 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n")); 5578 iwk_mac_access_exit(sc); 5579 return; 5580 } 5581 err_ptr += sizeof (uint32_t); 5582 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr))); 5583 err_ptr += sizeof (uint32_t); 5584 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr))); 5585 err_ptr += sizeof (uint32_t); 5586 IWK_DBG((IWK_DEBUG_EEPROM, 5587 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5588 err_ptr += sizeof (uint32_t); 5589 IWK_DBG((IWK_DEBUG_EEPROM, 5590 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5591 err_ptr += sizeof (uint32_t); 5592 IWK_DBG((IWK_DEBUG_EEPROM, 5593 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5594 err_ptr += sizeof (uint32_t); 5595 IWK_DBG((IWK_DEBUG_EEPROM, 5596 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5597 err_ptr += sizeof (uint32_t); 5598 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr))); 5599 err_ptr += sizeof (uint32_t); 5600 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr))); 5601 err_ptr += sizeof (uint32_t); 5602 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr))); 5603 err_ptr += sizeof (uint32_t); 5604 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr))); 5605 err_ptr += sizeof (uint32_t); 5606 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr))); 5607 err_ptr += sizeof (uint32_t); 5608 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr))); 5609 5610 iwk_mac_access_exit(sc); 5611 } 5612