1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2007, Intel Corporation 8 * All rights reserved. 9 */ 10 11 /* 12 * Copyright (c) 2006 13 * Copyright (c) 2007 14 * Damien Bergamini <damien.bergamini@free.fr> 15 * 16 * Permission to use, copy, modify, and distribute this software for any 17 * purpose with or without fee is hereby granted, provided that the above 18 * copyright notice and this permission notice appear in all copies. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 27 */ 28 29 /* 30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/byteorder.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/strsubr.h> 41 #include <sys/ethernet.h> 42 #include <inet/common.h> 43 #include <inet/nd.h> 44 #include <inet/mi.h> 45 #include <sys/note.h> 46 #include <sys/stream.h> 47 #include <sys/strsun.h> 48 #include <sys/modctl.h> 49 #include <sys/devops.h> 50 #include <sys/dlpi.h> 51 #include <sys/mac_provider.h> 52 #include <sys/mac_wifi.h> 53 #include <sys/net80211.h> 54 #include <sys/net80211_proto.h> 55 #include <sys/varargs.h> 56 #include <sys/policy.h> 57 #include <sys/pci.h> 58 59 #include "iwk_calibration.h" 60 #include "iwk_hw.h" 61 #include "iwk_eeprom.h" 62 #include "iwk2_var.h" 63 #include <inet/wifi_ioctl.h> 64 65 #ifdef DEBUG 66 #define IWK_DEBUG_80211 (1 << 0) 67 #define IWK_DEBUG_CMD (1 << 1) 68 #define IWK_DEBUG_DMA (1 << 2) 69 #define IWK_DEBUG_EEPROM (1 << 3) 70 #define IWK_DEBUG_FW (1 << 4) 71 #define IWK_DEBUG_HW (1 << 5) 72 #define IWK_DEBUG_INTR (1 << 6) 73 #define IWK_DEBUG_MRR (1 << 7) 74 #define IWK_DEBUG_PIO (1 << 8) 75 #define IWK_DEBUG_RX (1 << 9) 76 #define IWK_DEBUG_SCAN (1 << 10) 77 #define IWK_DEBUG_TX (1 << 11) 78 #define IWK_DEBUG_RATECTL (1 << 12) 79 #define IWK_DEBUG_RADIO (1 << 13) 80 #define IWK_DEBUG_RESUME (1 << 14) 81 #define IWK_DEBUG_CALIBRATION (1 << 15) 82 uint32_t iwk_dbg_flags = 0; 83 #define IWK_DBG(x) \ 84 iwk_dbg x 85 #else 86 #define IWK_DBG(x) 87 #endif 88 89 static void *iwk_soft_state_p = NULL; 90 static uint8_t iwk_fw_bin [] = { 91 #include "fw-iw/iw4965.ucode.hex" 92 }; 93 94 /* DMA attributes for a shared page */ 95 static ddi_dma_attr_t sh_dma_attr = { 96 DMA_ATTR_V0, /* version of this structure */ 97 0, /* lowest usable address */ 98 0xffffffffU, /* highest usable address */ 99 0xffffffffU, /* maximum DMAable byte count */ 100 0x1000, /* alignment in bytes */ 101 0x1000, /* burst sizes (any?) */ 102 1, /* minimum transfer */ 103 0xffffffffU, /* maximum transfer */ 104 0xffffffffU, /* maximum segment length */ 105 1, /* maximum number of segments */ 106 1, /* granularity */ 107 0, /* flags (reserved) */ 108 }; 109 110 /* DMA attributes for a keep warm DRAM descriptor */ 111 static ddi_dma_attr_t kw_dma_attr = { 112 DMA_ATTR_V0, /* version of this structure */ 113 0, /* lowest usable address */ 114 0xffffffffU, /* highest usable address */ 115 0xffffffffU, /* maximum DMAable byte count */ 116 0x1000, /* alignment in bytes */ 117 0x1000, /* burst sizes (any?) */ 118 1, /* minimum transfer */ 119 0xffffffffU, /* maximum transfer */ 120 0xffffffffU, /* maximum segment length */ 121 1, /* maximum number of segments */ 122 1, /* granularity */ 123 0, /* flags (reserved) */ 124 }; 125 126 /* DMA attributes for a ring descriptor */ 127 static ddi_dma_attr_t ring_desc_dma_attr = { 128 DMA_ATTR_V0, /* version of this structure */ 129 0, /* lowest usable address */ 130 0xffffffffU, /* highest usable address */ 131 0xffffffffU, /* maximum DMAable byte count */ 132 0x100, /* alignment in bytes */ 133 0x100, /* burst sizes (any?) */ 134 1, /* minimum transfer */ 135 0xffffffffU, /* maximum transfer */ 136 0xffffffffU, /* maximum segment length */ 137 1, /* maximum number of segments */ 138 1, /* granularity */ 139 0, /* flags (reserved) */ 140 }; 141 142 /* DMA attributes for a cmd */ 143 static ddi_dma_attr_t cmd_dma_attr = { 144 DMA_ATTR_V0, /* version of this structure */ 145 0, /* lowest usable address */ 146 0xffffffffU, /* highest usable address */ 147 0xffffffffU, /* maximum DMAable byte count */ 148 4, /* alignment in bytes */ 149 0x100, /* burst sizes (any?) */ 150 1, /* minimum transfer */ 151 0xffffffffU, /* maximum transfer */ 152 0xffffffffU, /* maximum segment length */ 153 1, /* maximum number of segments */ 154 1, /* granularity */ 155 0, /* flags (reserved) */ 156 }; 157 158 /* DMA attributes for a rx buffer */ 159 static ddi_dma_attr_t rx_buffer_dma_attr = { 160 DMA_ATTR_V0, /* version of this structure */ 161 0, /* lowest usable address */ 162 0xffffffffU, /* highest usable address */ 163 0xffffffffU, /* maximum DMAable byte count */ 164 0x100, /* alignment in bytes */ 165 0x100, /* burst sizes (any?) */ 166 1, /* minimum transfer */ 167 0xffffffffU, /* maximum transfer */ 168 0xffffffffU, /* maximum segment length */ 169 1, /* maximum number of segments */ 170 1, /* granularity */ 171 0, /* flags (reserved) */ 172 }; 173 174 /* 175 * DMA attributes for a tx buffer. 176 * the maximum number of segments is 4 for the hardware. 177 * now all the wifi drivers put the whole frame in a single 178 * descriptor, so we define the maximum number of segments 1, 179 * just the same as the rx_buffer. we consider leverage the HW 180 * ability in the future, that is why we don't define rx and tx 181 * buffer_dma_attr as the same. 182 */ 183 static ddi_dma_attr_t tx_buffer_dma_attr = { 184 DMA_ATTR_V0, /* version of this structure */ 185 0, /* lowest usable address */ 186 0xffffffffU, /* highest usable address */ 187 0xffffffffU, /* maximum DMAable byte count */ 188 4, /* alignment in bytes */ 189 0x100, /* burst sizes (any?) */ 190 1, /* minimum transfer */ 191 0xffffffffU, /* maximum transfer */ 192 0xffffffffU, /* maximum segment length */ 193 1, /* maximum number of segments */ 194 1, /* granularity */ 195 0, /* flags (reserved) */ 196 }; 197 198 /* DMA attributes for text and data part in the firmware */ 199 static ddi_dma_attr_t fw_dma_attr = { 200 DMA_ATTR_V0, /* version of this structure */ 201 0, /* lowest usable address */ 202 0xffffffffU, /* highest usable address */ 203 0x7fffffff, /* maximum DMAable byte count */ 204 0x10, /* alignment in bytes */ 205 0x100, /* burst sizes (any?) */ 206 1, /* minimum transfer */ 207 0xffffffffU, /* maximum transfer */ 208 0xffffffffU, /* maximum segment length */ 209 1, /* maximum number of segments */ 210 1, /* granularity */ 211 0, /* flags (reserved) */ 212 }; 213 214 215 /* regs access attributes */ 216 static ddi_device_acc_attr_t iwk_reg_accattr = { 217 DDI_DEVICE_ATTR_V0, 218 DDI_STRUCTURE_LE_ACC, 219 DDI_STRICTORDER_ACC, 220 DDI_DEFAULT_ACC 221 }; 222 223 /* DMA access attributes */ 224 static ddi_device_acc_attr_t iwk_dma_accattr = { 225 DDI_DEVICE_ATTR_V0, 226 DDI_NEVERSWAP_ACC, 227 DDI_STRICTORDER_ACC, 228 DDI_DEFAULT_ACC 229 }; 230 231 static int iwk_ring_init(iwk_sc_t *); 232 static void iwk_ring_free(iwk_sc_t *); 233 static int iwk_alloc_shared(iwk_sc_t *); 234 static void iwk_free_shared(iwk_sc_t *); 235 static int iwk_alloc_kw(iwk_sc_t *); 236 static void iwk_free_kw(iwk_sc_t *); 237 static int iwk_alloc_fw_dma(iwk_sc_t *); 238 static void iwk_free_fw_dma(iwk_sc_t *); 239 static int iwk_alloc_rx_ring(iwk_sc_t *); 240 static void iwk_reset_rx_ring(iwk_sc_t *); 241 static void iwk_free_rx_ring(iwk_sc_t *); 242 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *, 243 int, int); 244 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 245 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 246 247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *); 248 static void iwk_node_free(ieee80211_node_t *); 249 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int); 250 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *, 251 const uint8_t mac[IEEE80211_ADDR_LEN]); 252 static void iwk_mac_access_enter(iwk_sc_t *); 253 static void iwk_mac_access_exit(iwk_sc_t *); 254 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t); 255 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t); 256 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t, 257 uint32_t *, int); 258 static int iwk_load_firmware(iwk_sc_t *); 259 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *, 260 iwk_rx_data_t *); 261 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *, 262 iwk_rx_data_t *); 263 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *); 264 static uint_t iwk_intr(caddr_t, caddr_t); 265 static int iwk_eep_load(iwk_sc_t *sc); 266 static void iwk_get_mac_from_eep(iwk_sc_t *sc); 267 static int iwk_eep_sem_down(iwk_sc_t *sc); 268 static void iwk_eep_sem_up(iwk_sc_t *sc); 269 static uint_t iwk_rx_softintr(caddr_t, caddr_t); 270 static uint8_t iwk_rate_to_plcp(int); 271 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int); 272 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t); 273 static int iwk_hw_set_before_auth(iwk_sc_t *); 274 static int iwk_scan(iwk_sc_t *); 275 static int iwk_config(iwk_sc_t *); 276 static void iwk_stop_master(iwk_sc_t *); 277 static int iwk_power_up(iwk_sc_t *); 278 static int iwk_preinit(iwk_sc_t *); 279 static int iwk_init(iwk_sc_t *); 280 static void iwk_stop(iwk_sc_t *); 281 static void iwk_amrr_init(iwk_amrr_t *); 282 static void iwk_amrr_timeout(iwk_sc_t *); 283 static void iwk_amrr_ratectl(void *, ieee80211_node_t *); 284 static int32_t iwk_curr_tempera(iwk_sc_t *sc); 285 static int iwk_tx_power_calibration(iwk_sc_t *sc); 286 static inline int iwk_is_24G_band(iwk_sc_t *sc); 287 static inline int iwk_is_fat_channel(iwk_sc_t *sc); 288 static int iwk_txpower_grp(uint16_t channel); 289 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 290 uint16_t channel, 291 int is_24G, int is_fat, int is_hi_chan); 292 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel); 293 static int iwk_division(int32_t num, int32_t denom, int32_t *res); 294 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 295 int32_t x2, int32_t y2); 296 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 297 struct iwk_eep_calib_channel_info *chan_info); 298 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 299 int32_t curr_voltage); 300 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G); 301 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 302 struct iwk_tx_power_db *tp_db); 303 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc); 304 static int iwk_is_associated(iwk_sc_t *sc); 305 static int iwk_rxgain_diff_init(iwk_sc_t *sc); 306 static int iwk_rxgain_diff(iwk_sc_t *sc); 307 static int iwk_rx_sens_init(iwk_sc_t *sc); 308 static int iwk_rx_sens(iwk_sc_t *sc); 309 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 310 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 311 312 static void iwk_write_event_log(iwk_sc_t *); 313 static void iwk_write_error_log(iwk_sc_t *); 314 315 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 316 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 317 static int iwk_quiesce(dev_info_t *dip); 318 319 /* 320 * GLD specific operations 321 */ 322 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val); 323 static int iwk_m_start(void *arg); 324 static void iwk_m_stop(void *arg); 325 static int iwk_m_unicst(void *arg, const uint8_t *macaddr); 326 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m); 327 static int iwk_m_promisc(void *arg, boolean_t on); 328 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp); 329 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp); 330 static int iwk_m_setprop(void *arg, const char *pr_name, 331 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf); 332 static int iwk_m_getprop(void *arg, const char *pr_name, 333 mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length, 334 void *wldp_buf, uint_t *perm); 335 static void iwk_destroy_locks(iwk_sc_t *sc); 336 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type); 337 static void iwk_thread(iwk_sc_t *sc); 338 339 /* 340 * Supported rates for 802.11b/g modes (in 500Kbps unit). 341 * 11a and 11n support will be added later. 342 */ 343 static const struct ieee80211_rateset iwk_rateset_11b = 344 { 4, { 2, 4, 11, 22 } }; 345 346 static const struct ieee80211_rateset iwk_rateset_11g = 347 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 348 349 /* 350 * For mfthread only 351 */ 352 extern pri_t minclsyspri; 353 354 #define DRV_NAME_4965 "iwk" 355 356 /* 357 * Module Loading Data & Entry Points 358 */ 359 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach, 360 iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce); 361 362 static struct modldrv iwk_modldrv = { 363 &mod_driverops, 364 "Intel(R) 4965AGN driver(N)", 365 &iwk_devops 366 }; 367 368 static struct modlinkage iwk_modlinkage = { 369 MODREV_1, 370 &iwk_modldrv, 371 NULL 372 }; 373 374 int 375 _init(void) 376 { 377 int status; 378 379 status = ddi_soft_state_init(&iwk_soft_state_p, 380 sizeof (iwk_sc_t), 1); 381 if (status != DDI_SUCCESS) 382 return (status); 383 384 mac_init_ops(&iwk_devops, DRV_NAME_4965); 385 status = mod_install(&iwk_modlinkage); 386 if (status != DDI_SUCCESS) { 387 mac_fini_ops(&iwk_devops); 388 ddi_soft_state_fini(&iwk_soft_state_p); 389 } 390 391 return (status); 392 } 393 394 int 395 _fini(void) 396 { 397 int status; 398 399 status = mod_remove(&iwk_modlinkage); 400 if (status == DDI_SUCCESS) { 401 mac_fini_ops(&iwk_devops); 402 ddi_soft_state_fini(&iwk_soft_state_p); 403 } 404 405 return (status); 406 } 407 408 int 409 _info(struct modinfo *mip) 410 { 411 return (mod_info(&iwk_modlinkage, mip)); 412 } 413 414 /* 415 * Mac Call Back entries 416 */ 417 mac_callbacks_t iwk_m_callbacks = { 418 MC_IOCTL | MC_SETPROP | MC_GETPROP, 419 iwk_m_stat, 420 iwk_m_start, 421 iwk_m_stop, 422 iwk_m_promisc, 423 iwk_m_multicst, 424 iwk_m_unicst, 425 iwk_m_tx, 426 iwk_m_ioctl, 427 NULL, 428 NULL, 429 NULL, 430 iwk_m_setprop, 431 iwk_m_getprop 432 }; 433 434 #ifdef DEBUG 435 void 436 iwk_dbg(uint32_t flags, const char *fmt, ...) 437 { 438 va_list ap; 439 440 if (flags & iwk_dbg_flags) { 441 va_start(ap, fmt); 442 vcmn_err(CE_NOTE, fmt, ap); 443 va_end(ap); 444 } 445 } 446 #endif 447 448 /* 449 * device operations 450 */ 451 int 452 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 453 { 454 iwk_sc_t *sc; 455 ieee80211com_t *ic; 456 int instance, err, i; 457 char strbuf[32]; 458 wifi_data_t wd = { 0 }; 459 mac_register_t *macp; 460 461 int intr_type; 462 int intr_count; 463 int intr_actual; 464 465 switch (cmd) { 466 case DDI_ATTACH: 467 break; 468 case DDI_RESUME: 469 sc = ddi_get_soft_state(iwk_soft_state_p, 470 ddi_get_instance(dip)); 471 ASSERT(sc != NULL); 472 mutex_enter(&sc->sc_glock); 473 sc->sc_flags &= ~IWK_F_SUSPEND; 474 mutex_exit(&sc->sc_glock); 475 if (sc->sc_flags & IWK_F_RUNNING) { 476 (void) iwk_init(sc); 477 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 478 } 479 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n")); 480 return (DDI_SUCCESS); 481 default: 482 err = DDI_FAILURE; 483 goto attach_fail1; 484 } 485 486 instance = ddi_get_instance(dip); 487 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance); 488 if (err != DDI_SUCCESS) { 489 cmn_err(CE_WARN, 490 "iwk_attach(): failed to allocate soft state\n"); 491 goto attach_fail1; 492 } 493 sc = ddi_get_soft_state(iwk_soft_state_p, instance); 494 sc->sc_dip = dip; 495 496 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0, 497 &iwk_reg_accattr, &sc->sc_cfg_handle); 498 if (err != DDI_SUCCESS) { 499 cmn_err(CE_WARN, 500 "iwk_attach(): failed to map config spaces regs\n"); 501 goto attach_fail2; 502 } 503 sc->sc_rev = ddi_get8(sc->sc_cfg_handle, 504 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID)); 505 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0); 506 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle, 507 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ)); 508 if (!sc->sc_clsz) 509 sc->sc_clsz = 16; 510 sc->sc_clsz = (sc->sc_clsz << 2); 511 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) + 512 IEEE80211_MTU + IEEE80211_CRC_LEN + 513 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 514 IEEE80211_WEP_CRCLEN), sc->sc_clsz); 515 /* 516 * Map operating registers 517 */ 518 err = ddi_regs_map_setup(dip, 1, &sc->sc_base, 519 0, 0, &iwk_reg_accattr, &sc->sc_handle); 520 if (err != DDI_SUCCESS) { 521 cmn_err(CE_WARN, 522 "iwk_attach(): failed to map device regs\n"); 523 goto attach_fail2a; 524 } 525 526 err = ddi_intr_get_supported_types(dip, &intr_type); 527 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) { 528 cmn_err(CE_WARN, "iwk_attach(): " 529 "Fixed type interrupt is not supported\n"); 530 goto attach_fail_intr_a; 531 } 532 533 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count); 534 if ((err != DDI_SUCCESS) || (intr_count != 1)) { 535 cmn_err(CE_WARN, "iwk_attach(): " 536 "No fixed interrupts\n"); 537 goto attach_fail_intr_a; 538 } 539 540 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 541 542 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 543 intr_count, &intr_actual, 0); 544 if ((err != DDI_SUCCESS) || (intr_actual != 1)) { 545 cmn_err(CE_WARN, "iwk_attach(): " 546 "ddi_intr_alloc() failed 0x%x\n", err); 547 goto attach_fail_intr_b; 548 } 549 550 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri); 551 if (err != DDI_SUCCESS) { 552 cmn_err(CE_WARN, "iwk_attach(): " 553 "ddi_intr_get_pri() failed 0x%x\n", err); 554 goto attach_fail_intr_c; 555 } 556 557 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, 558 DDI_INTR_PRI(sc->sc_intr_pri)); 559 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, 560 DDI_INTR_PRI(sc->sc_intr_pri)); 561 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER, 562 DDI_INTR_PRI(sc->sc_intr_pri)); 563 564 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL); 565 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL); 566 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL); 567 /* 568 * initialize the mfthread 569 */ 570 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL); 571 sc->sc_mf_thread = NULL; 572 sc->sc_mf_thread_switch = 0; 573 574 /* 575 * Allocate shared page. 576 */ 577 err = iwk_alloc_shared(sc); 578 if (err != DDI_SUCCESS) { 579 cmn_err(CE_WARN, "iwk_attach(): " 580 "failed to allocate shared page\n"); 581 goto attach_fail3; 582 } 583 584 /* 585 * Allocate keep warm page. 586 */ 587 err = iwk_alloc_kw(sc); 588 if (err != DDI_SUCCESS) { 589 cmn_err(CE_WARN, "iwk_attach(): " 590 "failed to allocate keep warm page\n"); 591 goto attach_fail3a; 592 } 593 594 /* 595 * Do some necessary hardware initializations. 596 */ 597 err = iwk_preinit(sc); 598 if (err != DDI_SUCCESS) { 599 cmn_err(CE_WARN, "iwk_attach(): " 600 "failed to init hardware\n"); 601 goto attach_fail4; 602 } 603 604 /* initialize EEPROM */ 605 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */ 606 if (err != 0) { 607 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n"); 608 goto attach_fail4; 609 } 610 611 if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) { 612 cmn_err(CE_WARN, "older EEPROM detected\n"); 613 goto attach_fail4; 614 } 615 616 iwk_get_mac_from_eep(sc); 617 618 err = iwk_ring_init(sc); 619 if (err != DDI_SUCCESS) { 620 cmn_err(CE_WARN, "iwk_attach(): " 621 "failed to allocate and initialize ring\n"); 622 goto attach_fail4; 623 } 624 625 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin; 626 627 err = iwk_alloc_fw_dma(sc); 628 if (err != DDI_SUCCESS) { 629 cmn_err(CE_WARN, "iwk_attach(): " 630 "failed to allocate firmware dma\n"); 631 goto attach_fail5; 632 } 633 634 /* 635 * Initialize the wifi part, which will be used by 636 * generic layer 637 */ 638 ic = &sc->sc_ic; 639 ic->ic_phytype = IEEE80211_T_OFDM; 640 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 641 ic->ic_state = IEEE80211_S_INIT; 642 ic->ic_maxrssi = 100; /* experimental number */ 643 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT | 644 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT; 645 /* 646 * use software WEP and TKIP, hardware CCMP; 647 */ 648 ic->ic_caps |= IEEE80211_C_AES_CCM; 649 /* 650 * Support WPA/WPA2 651 */ 652 ic->ic_caps |= IEEE80211_C_WPA; 653 654 /* set supported .11b and .11g rates */ 655 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b; 656 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g; 657 658 /* set supported .11b and .11g channels (1 through 11) */ 659 for (i = 1; i <= 11; i++) { 660 ic->ic_sup_channels[i].ich_freq = 661 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 662 ic->ic_sup_channels[i].ich_flags = 663 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 664 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ | 665 IEEE80211_CHAN_PASSIVE; 666 } 667 668 ic->ic_xmit = iwk_send; 669 /* 670 * init Wifi layer 671 */ 672 ieee80211_attach(ic); 673 674 /* 675 * different instance has different WPA door 676 */ 677 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 678 ddi_driver_name(dip), 679 ddi_get_instance(dip)); 680 681 /* 682 * Override 80211 default routines 683 */ 684 sc->sc_newstate = ic->ic_newstate; 685 ic->ic_newstate = iwk_newstate; 686 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 687 ic->ic_node_alloc = iwk_node_alloc; 688 ic->ic_node_free = iwk_node_free; 689 ic->ic_crypto.cs_key_set = iwk_key_set; 690 ieee80211_media_init(ic); 691 /* 692 * initialize default tx key 693 */ 694 ic->ic_def_txkey = 0; 695 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX, 696 iwk_rx_softintr, (caddr_t)sc); 697 if (err != DDI_SUCCESS) { 698 cmn_err(CE_WARN, "iwk_attach(): " 699 "add soft interrupt failed\n"); 700 goto attach_fail7; 701 } 702 703 /* 704 * Add the interrupt handler 705 */ 706 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr, 707 (caddr_t)sc, NULL); 708 if (err != DDI_SUCCESS) { 709 cmn_err(CE_WARN, "iwk_attach(): " 710 "ddi_intr_add_handle() failed\n"); 711 goto attach_fail8; 712 } 713 714 err = ddi_intr_enable(sc->sc_intr_htable[0]); 715 if (err != DDI_SUCCESS) { 716 cmn_err(CE_WARN, "iwk_attach(): " 717 "ddi_intr_enable() failed\n"); 718 goto attach_fail_intr_d; 719 } 720 721 /* 722 * Initialize pointer to device specific functions 723 */ 724 wd.wd_secalloc = WIFI_SEC_NONE; 725 wd.wd_opmode = ic->ic_opmode; 726 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr); 727 728 macp = mac_alloc(MAC_VERSION); 729 if (err != DDI_SUCCESS) { 730 cmn_err(CE_WARN, 731 "iwk_attach(): failed to do mac_alloc()\n"); 732 goto attach_fail9; 733 } 734 735 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 736 macp->m_driver = sc; 737 macp->m_dip = dip; 738 macp->m_src_addr = ic->ic_macaddr; 739 macp->m_callbacks = &iwk_m_callbacks; 740 macp->m_min_sdu = 0; 741 macp->m_max_sdu = IEEE80211_MTU; 742 macp->m_pdata = &wd; 743 macp->m_pdata_size = sizeof (wd); 744 745 /* 746 * Register the macp to mac 747 */ 748 err = mac_register(macp, &ic->ic_mach); 749 mac_free(macp); 750 if (err != DDI_SUCCESS) { 751 cmn_err(CE_WARN, 752 "iwk_attach(): failed to do mac_register()\n"); 753 goto attach_fail9; 754 } 755 756 /* 757 * Create minor node of type DDI_NT_NET_WIFI 758 */ 759 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance); 760 err = ddi_create_minor_node(dip, strbuf, S_IFCHR, 761 instance + 1, DDI_NT_NET_WIFI, 0); 762 if (err != DDI_SUCCESS) 763 cmn_err(CE_WARN, 764 "iwk_attach(): failed to do ddi_create_minor_node()\n"); 765 766 /* 767 * Notify link is down now 768 */ 769 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 770 771 /* 772 * create the mf thread to handle the link status, 773 * recovery fatal error, etc. 774 */ 775 sc->sc_mf_thread_switch = 1; 776 if (sc->sc_mf_thread == NULL) 777 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0, 778 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri); 779 780 sc->sc_flags |= IWK_F_ATTACHED; 781 782 return (DDI_SUCCESS); 783 attach_fail9: 784 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 785 attach_fail_intr_d: 786 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 787 788 attach_fail8: 789 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 790 sc->sc_soft_hdl = NULL; 791 attach_fail7: 792 ieee80211_detach(ic); 793 attach_fail6: 794 iwk_free_fw_dma(sc); 795 attach_fail5: 796 iwk_ring_free(sc); 797 attach_fail4: 798 iwk_free_kw(sc); 799 attach_fail3a: 800 iwk_free_shared(sc); 801 attach_fail3: 802 iwk_destroy_locks(sc); 803 attach_fail_intr_c: 804 (void) ddi_intr_free(sc->sc_intr_htable[0]); 805 attach_fail_intr_b: 806 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 807 attach_fail_intr_a: 808 ddi_regs_map_free(&sc->sc_handle); 809 attach_fail2a: 810 ddi_regs_map_free(&sc->sc_cfg_handle); 811 attach_fail2: 812 ddi_soft_state_free(iwk_soft_state_p, instance); 813 attach_fail1: 814 return (err); 815 } 816 817 int 818 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 819 { 820 iwk_sc_t *sc; 821 int err; 822 823 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 824 ASSERT(sc != NULL); 825 826 switch (cmd) { 827 case DDI_DETACH: 828 break; 829 case DDI_SUSPEND: 830 if (sc->sc_flags & IWK_F_RUNNING) { 831 iwk_stop(sc); 832 } 833 mutex_enter(&sc->sc_glock); 834 sc->sc_flags |= IWK_F_SUSPEND; 835 mutex_exit(&sc->sc_glock); 836 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n")); 837 return (DDI_SUCCESS); 838 default: 839 return (DDI_FAILURE); 840 } 841 842 if (!(sc->sc_flags & IWK_F_ATTACHED)) 843 return (DDI_FAILURE); 844 845 err = mac_disable(sc->sc_ic.ic_mach); 846 if (err != DDI_SUCCESS) 847 return (err); 848 849 /* 850 * Destroy the mf_thread 851 */ 852 mutex_enter(&sc->sc_mt_lock); 853 sc->sc_mf_thread_switch = 0; 854 while (sc->sc_mf_thread != NULL) { 855 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) 856 break; 857 } 858 mutex_exit(&sc->sc_mt_lock); 859 860 iwk_stop(sc); 861 DELAY(500000); 862 863 /* 864 * Unregiste from the MAC layer subsystem 865 */ 866 (void) mac_unregister(sc->sc_ic.ic_mach); 867 868 mutex_enter(&sc->sc_glock); 869 iwk_free_fw_dma(sc); 870 iwk_ring_free(sc); 871 iwk_free_kw(sc); 872 iwk_free_shared(sc); 873 mutex_exit(&sc->sc_glock); 874 875 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 876 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 877 (void) ddi_intr_free(sc->sc_intr_htable[0]); 878 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 879 880 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 881 sc->sc_soft_hdl = NULL; 882 883 /* 884 * detach ieee80211 885 */ 886 ieee80211_detach(&sc->sc_ic); 887 888 iwk_destroy_locks(sc); 889 890 ddi_regs_map_free(&sc->sc_handle); 891 ddi_regs_map_free(&sc->sc_cfg_handle); 892 ddi_remove_minor_node(dip, NULL); 893 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip)); 894 895 return (DDI_SUCCESS); 896 } 897 898 /* 899 * quiesce(9E) entry point. 900 * 901 * This function is called when the system is single-threaded at high 902 * PIL with preemption disabled. Therefore, this function must not be 903 * blocked. 904 * 905 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 906 * DDI_FAILURE indicates an error condition and should almost never happen. 907 */ 908 int 909 iwk_quiesce(dev_info_t *dip) 910 { 911 iwk_sc_t *sc; 912 913 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 914 ASSERT(sc != NULL); 915 916 /* no message prints and no lock accquisition */ 917 #ifdef DEBUG 918 iwk_dbg_flags = 0; 919 #endif 920 sc->sc_flags |= IWK_F_QUIESCED; 921 922 iwk_stop(sc); 923 924 return (DDI_SUCCESS); 925 } 926 927 static void 928 iwk_destroy_locks(iwk_sc_t *sc) 929 { 930 cv_destroy(&sc->sc_mt_cv); 931 mutex_destroy(&sc->sc_mt_lock); 932 cv_destroy(&sc->sc_tx_cv); 933 cv_destroy(&sc->sc_cmd_cv); 934 cv_destroy(&sc->sc_fw_cv); 935 mutex_destroy(&sc->sc_tx_lock); 936 mutex_destroy(&sc->sc_glock); 937 } 938 939 /* 940 * Allocate an area of memory and a DMA handle for accessing it 941 */ 942 static int 943 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize, 944 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p, 945 uint_t dma_flags, iwk_dma_t *dma_p) 946 { 947 caddr_t vaddr; 948 int err; 949 950 /* 951 * Allocate handle 952 */ 953 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p, 954 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 955 if (err != DDI_SUCCESS) { 956 dma_p->dma_hdl = NULL; 957 return (DDI_FAILURE); 958 } 959 960 /* 961 * Allocate memory 962 */ 963 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 964 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 965 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 966 if (err != DDI_SUCCESS) { 967 ddi_dma_free_handle(&dma_p->dma_hdl); 968 dma_p->dma_hdl = NULL; 969 dma_p->acc_hdl = NULL; 970 return (DDI_FAILURE); 971 } 972 973 /* 974 * Bind the two together 975 */ 976 dma_p->mem_va = vaddr; 977 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 978 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 979 &dma_p->cookie, &dma_p->ncookies); 980 if (err != DDI_DMA_MAPPED) { 981 ddi_dma_mem_free(&dma_p->acc_hdl); 982 ddi_dma_free_handle(&dma_p->dma_hdl); 983 dma_p->acc_hdl = NULL; 984 dma_p->dma_hdl = NULL; 985 return (DDI_FAILURE); 986 } 987 988 dma_p->nslots = ~0U; 989 dma_p->size = ~0U; 990 dma_p->token = ~0U; 991 dma_p->offset = 0; 992 return (DDI_SUCCESS); 993 } 994 995 /* 996 * Free one allocated area of DMAable memory 997 */ 998 static void 999 iwk_free_dma_mem(iwk_dma_t *dma_p) 1000 { 1001 if (dma_p->dma_hdl != NULL) { 1002 if (dma_p->ncookies) { 1003 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1004 dma_p->ncookies = 0; 1005 } 1006 ddi_dma_free_handle(&dma_p->dma_hdl); 1007 dma_p->dma_hdl = NULL; 1008 } 1009 1010 if (dma_p->acc_hdl != NULL) { 1011 ddi_dma_mem_free(&dma_p->acc_hdl); 1012 dma_p->acc_hdl = NULL; 1013 } 1014 } 1015 1016 /* 1017 * 1018 */ 1019 static int 1020 iwk_alloc_fw_dma(iwk_sc_t *sc) 1021 { 1022 int err = DDI_SUCCESS; 1023 iwk_dma_t *dma_p; 1024 char *t; 1025 1026 /* 1027 * firmware image layout: 1028 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->| 1029 */ 1030 t = (char *)(sc->sc_hdr + 1); 1031 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz), 1032 &fw_dma_attr, &iwk_dma_accattr, 1033 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1034 &sc->sc_dma_fw_text); 1035 dma_p = &sc->sc_dma_fw_text; 1036 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n", 1037 dma_p->ncookies, dma_p->cookie.dmac_address, 1038 dma_p->cookie.dmac_size)); 1039 if (err != DDI_SUCCESS) { 1040 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1041 " text dma memory"); 1042 goto fail; 1043 } 1044 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz)); 1045 1046 t += LE_32(sc->sc_hdr->textsz); 1047 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1048 &fw_dma_attr, &iwk_dma_accattr, 1049 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1050 &sc->sc_dma_fw_data); 1051 dma_p = &sc->sc_dma_fw_data; 1052 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n", 1053 dma_p->ncookies, dma_p->cookie.dmac_address, 1054 dma_p->cookie.dmac_size)); 1055 if (err != DDI_SUCCESS) { 1056 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1057 " data dma memory"); 1058 goto fail; 1059 } 1060 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1061 1062 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1063 &fw_dma_attr, &iwk_dma_accattr, 1064 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1065 &sc->sc_dma_fw_data_bak); 1066 dma_p = &sc->sc_dma_fw_data_bak; 1067 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx " 1068 "size:%lx]\n", 1069 dma_p->ncookies, dma_p->cookie.dmac_address, 1070 dma_p->cookie.dmac_size)); 1071 if (err != DDI_SUCCESS) { 1072 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1073 " data bakeup dma memory"); 1074 goto fail; 1075 } 1076 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1077 1078 t += LE_32(sc->sc_hdr->datasz); 1079 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz), 1080 &fw_dma_attr, &iwk_dma_accattr, 1081 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1082 &sc->sc_dma_fw_init_text); 1083 dma_p = &sc->sc_dma_fw_init_text; 1084 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx " 1085 "size:%lx]\n", 1086 dma_p->ncookies, dma_p->cookie.dmac_address, 1087 dma_p->cookie.dmac_size)); 1088 if (err != DDI_SUCCESS) { 1089 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1090 "init text dma memory"); 1091 goto fail; 1092 } 1093 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz)); 1094 1095 t += LE_32(sc->sc_hdr->init_textsz); 1096 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz), 1097 &fw_dma_attr, &iwk_dma_accattr, 1098 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1099 &sc->sc_dma_fw_init_data); 1100 dma_p = &sc->sc_dma_fw_init_data; 1101 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx " 1102 "size:%lx]\n", 1103 dma_p->ncookies, dma_p->cookie.dmac_address, 1104 dma_p->cookie.dmac_size)); 1105 if (err != DDI_SUCCESS) { 1106 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1107 "init data dma memory"); 1108 goto fail; 1109 } 1110 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz)); 1111 1112 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz); 1113 fail: 1114 return (err); 1115 } 1116 1117 static void 1118 iwk_free_fw_dma(iwk_sc_t *sc) 1119 { 1120 iwk_free_dma_mem(&sc->sc_dma_fw_text); 1121 iwk_free_dma_mem(&sc->sc_dma_fw_data); 1122 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak); 1123 iwk_free_dma_mem(&sc->sc_dma_fw_init_text); 1124 iwk_free_dma_mem(&sc->sc_dma_fw_init_data); 1125 } 1126 1127 /* 1128 * Allocate a shared page between host and NIC. 1129 */ 1130 static int 1131 iwk_alloc_shared(iwk_sc_t *sc) 1132 { 1133 iwk_dma_t *dma_p; 1134 int err = DDI_SUCCESS; 1135 1136 /* must be aligned on a 4K-page boundary */ 1137 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t), 1138 &sh_dma_attr, &iwk_dma_accattr, 1139 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1140 &sc->sc_dma_sh); 1141 if (err != DDI_SUCCESS) 1142 goto fail; 1143 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va; 1144 1145 dma_p = &sc->sc_dma_sh; 1146 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n", 1147 dma_p->ncookies, dma_p->cookie.dmac_address, 1148 dma_p->cookie.dmac_size)); 1149 1150 return (err); 1151 fail: 1152 iwk_free_shared(sc); 1153 return (err); 1154 } 1155 1156 static void 1157 iwk_free_shared(iwk_sc_t *sc) 1158 { 1159 iwk_free_dma_mem(&sc->sc_dma_sh); 1160 } 1161 1162 /* 1163 * Allocate a keep warm page. 1164 */ 1165 static int 1166 iwk_alloc_kw(iwk_sc_t *sc) 1167 { 1168 iwk_dma_t *dma_p; 1169 int err = DDI_SUCCESS; 1170 1171 /* must be aligned on a 4K-page boundary */ 1172 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE, 1173 &kw_dma_attr, &iwk_dma_accattr, 1174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1175 &sc->sc_dma_kw); 1176 if (err != DDI_SUCCESS) 1177 goto fail; 1178 1179 dma_p = &sc->sc_dma_kw; 1180 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n", 1181 dma_p->ncookies, dma_p->cookie.dmac_address, 1182 dma_p->cookie.dmac_size)); 1183 1184 return (err); 1185 fail: 1186 iwk_free_kw(sc); 1187 return (err); 1188 } 1189 1190 static void 1191 iwk_free_kw(iwk_sc_t *sc) 1192 { 1193 iwk_free_dma_mem(&sc->sc_dma_kw); 1194 } 1195 1196 static int 1197 iwk_alloc_rx_ring(iwk_sc_t *sc) 1198 { 1199 iwk_rx_ring_t *ring; 1200 iwk_rx_data_t *data; 1201 iwk_dma_t *dma_p; 1202 int i, err = DDI_SUCCESS; 1203 1204 ring = &sc->sc_rxq; 1205 ring->cur = 0; 1206 1207 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t), 1208 &ring_desc_dma_attr, &iwk_dma_accattr, 1209 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1210 &ring->dma_desc); 1211 if (err != DDI_SUCCESS) { 1212 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n"); 1213 goto fail; 1214 } 1215 ring->desc = (uint32_t *)ring->dma_desc.mem_va; 1216 dma_p = &ring->dma_desc; 1217 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n", 1218 dma_p->ncookies, dma_p->cookie.dmac_address, 1219 dma_p->cookie.dmac_size)); 1220 1221 /* 1222 * Allocate Rx buffers. 1223 */ 1224 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1225 data = &ring->data[i]; 1226 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1227 &rx_buffer_dma_attr, &iwk_dma_accattr, 1228 DDI_DMA_READ | DDI_DMA_STREAMING, 1229 &data->dma_data); 1230 if (err != DDI_SUCCESS) { 1231 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] " 1232 "failed\n", i); 1233 goto fail; 1234 } 1235 /* 1236 * the physical address bit [8-36] are used, 1237 * instead of bit [0-31] in 3945. 1238 */ 1239 ring->desc[i] = LE_32((uint32_t) 1240 (data->dma_data.cookie.dmac_address >> 8)); 1241 } 1242 dma_p = &ring->data[0].dma_data; 1243 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx " 1244 "size:%lx]\n", 1245 dma_p->ncookies, dma_p->cookie.dmac_address, 1246 dma_p->cookie.dmac_size)); 1247 1248 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1249 1250 return (err); 1251 1252 fail: 1253 iwk_free_rx_ring(sc); 1254 return (err); 1255 } 1256 1257 static void 1258 iwk_reset_rx_ring(iwk_sc_t *sc) 1259 { 1260 int n; 1261 1262 iwk_mac_access_enter(sc); 1263 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 1264 for (n = 0; n < 2000; n++) { 1265 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) 1266 break; 1267 DELAY(1000); 1268 } 1269 1270 if (n == 2000) 1271 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n")); 1272 1273 iwk_mac_access_exit(sc); 1274 1275 sc->sc_rxq.cur = 0; 1276 } 1277 1278 static void 1279 iwk_free_rx_ring(iwk_sc_t *sc) 1280 { 1281 int i; 1282 1283 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1284 if (sc->sc_rxq.data[i].dma_data.dma_hdl) 1285 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data, 1286 DDI_DMA_SYNC_FORCPU); 1287 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data); 1288 } 1289 1290 if (sc->sc_rxq.dma_desc.dma_hdl) 1291 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV); 1292 iwk_free_dma_mem(&sc->sc_rxq.dma_desc); 1293 } 1294 1295 static int 1296 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring, 1297 int slots, int qid) 1298 { 1299 iwk_tx_data_t *data; 1300 iwk_tx_desc_t *desc_h; 1301 uint32_t paddr_desc_h; 1302 iwk_cmd_t *cmd_h; 1303 uint32_t paddr_cmd_h; 1304 iwk_dma_t *dma_p; 1305 int i, err = DDI_SUCCESS; 1306 1307 ring->qid = qid; 1308 ring->count = TFD_QUEUE_SIZE_MAX; 1309 ring->window = slots; 1310 ring->queued = 0; 1311 ring->cur = 0; 1312 1313 err = iwk_alloc_dma_mem(sc, 1314 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t), 1315 &ring_desc_dma_attr, &iwk_dma_accattr, 1316 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1317 &ring->dma_desc); 1318 if (err != DDI_SUCCESS) { 1319 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] " 1320 "failed\n", qid); 1321 goto fail; 1322 } 1323 dma_p = &ring->dma_desc; 1324 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n", 1325 dma_p->ncookies, dma_p->cookie.dmac_address, 1326 dma_p->cookie.dmac_size)); 1327 1328 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va; 1329 paddr_desc_h = ring->dma_desc.cookie.dmac_address; 1330 1331 err = iwk_alloc_dma_mem(sc, 1332 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t), 1333 &cmd_dma_attr, &iwk_dma_accattr, 1334 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1335 &ring->dma_cmd); 1336 if (err != DDI_SUCCESS) { 1337 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] " 1338 "failed\n", qid); 1339 goto fail; 1340 } 1341 dma_p = &ring->dma_cmd; 1342 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n", 1343 dma_p->ncookies, dma_p->cookie.dmac_address, 1344 dma_p->cookie.dmac_size)); 1345 1346 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va; 1347 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address; 1348 1349 /* 1350 * Allocate Tx buffers. 1351 */ 1352 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX, 1353 KM_NOSLEEP); 1354 if (ring->data == NULL) { 1355 cmn_err(CE_WARN, "could not allocate tx data slots\n"); 1356 goto fail; 1357 } 1358 1359 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) { 1360 data = &ring->data[i]; 1361 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1362 &tx_buffer_dma_attr, &iwk_dma_accattr, 1363 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1364 &data->dma_data); 1365 if (err != DDI_SUCCESS) { 1366 cmn_err(CE_WARN, "dma alloc tx ring " 1367 "buf[%d] failed\n", i); 1368 goto fail; 1369 } 1370 1371 data->desc = desc_h + i; 1372 data->paddr_desc = paddr_desc_h + 1373 _PTRDIFF(data->desc, desc_h); 1374 data->cmd = cmd_h + i; /* (i % slots); */ 1375 /* ((i % slots) * sizeof (iwk_cmd_t)); */ 1376 data->paddr_cmd = paddr_cmd_h + 1377 _PTRDIFF(data->cmd, cmd_h); 1378 } 1379 dma_p = &ring->data[0].dma_data; 1380 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx " 1381 "size:%lx]\n", 1382 dma_p->ncookies, dma_p->cookie.dmac_address, 1383 dma_p->cookie.dmac_size)); 1384 1385 return (err); 1386 1387 fail: 1388 if (ring->data) 1389 kmem_free(ring->data, 1390 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX); 1391 iwk_free_tx_ring(sc, ring); 1392 return (err); 1393 } 1394 1395 static void 1396 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1397 { 1398 iwk_tx_data_t *data; 1399 int i, n; 1400 1401 iwk_mac_access_enter(sc); 1402 1403 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0); 1404 for (n = 0; n < 200; n++) { 1405 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) & 1406 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) 1407 break; 1408 DELAY(10); 1409 } 1410 if (n == 200) { 1411 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n", 1412 ring->qid)); 1413 } 1414 iwk_mac_access_exit(sc); 1415 1416 for (i = 0; i < ring->count; i++) { 1417 data = &ring->data[i]; 1418 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 1419 } 1420 1421 ring->queued = 0; 1422 ring->cur = 0; 1423 } 1424 1425 /*ARGSUSED*/ 1426 static void 1427 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1428 { 1429 int i; 1430 1431 if (ring->dma_desc.dma_hdl != NULL) 1432 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1433 iwk_free_dma_mem(&ring->dma_desc); 1434 1435 if (ring->dma_cmd.dma_hdl != NULL) 1436 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV); 1437 iwk_free_dma_mem(&ring->dma_cmd); 1438 1439 if (ring->data != NULL) { 1440 for (i = 0; i < ring->count; i++) { 1441 if (ring->data[i].dma_data.dma_hdl) 1442 IWK_DMA_SYNC(ring->data[i].dma_data, 1443 DDI_DMA_SYNC_FORDEV); 1444 iwk_free_dma_mem(&ring->data[i].dma_data); 1445 } 1446 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t)); 1447 } 1448 } 1449 1450 static int 1451 iwk_ring_init(iwk_sc_t *sc) 1452 { 1453 int i, err = DDI_SUCCESS; 1454 1455 for (i = 0; i < IWK_NUM_QUEUES; i++) { 1456 if (i == IWK_CMD_QUEUE_NUM) 1457 continue; 1458 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS, 1459 i); 1460 if (err != DDI_SUCCESS) 1461 goto fail; 1462 } 1463 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM], 1464 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM); 1465 if (err != DDI_SUCCESS) 1466 goto fail; 1467 err = iwk_alloc_rx_ring(sc); 1468 if (err != DDI_SUCCESS) 1469 goto fail; 1470 return (err); 1471 1472 fail: 1473 return (err); 1474 } 1475 1476 static void 1477 iwk_ring_free(iwk_sc_t *sc) 1478 { 1479 int i = IWK_NUM_QUEUES; 1480 1481 iwk_free_rx_ring(sc); 1482 while (--i >= 0) { 1483 iwk_free_tx_ring(sc, &sc->sc_txq[i]); 1484 } 1485 } 1486 1487 /* ARGSUSED */ 1488 static ieee80211_node_t * 1489 iwk_node_alloc(ieee80211com_t *ic) 1490 { 1491 iwk_amrr_t *amrr; 1492 1493 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP); 1494 if (amrr != NULL) 1495 iwk_amrr_init(amrr); 1496 return (&amrr->in); 1497 } 1498 1499 static void 1500 iwk_node_free(ieee80211_node_t *in) 1501 { 1502 ieee80211com_t *ic = in->in_ic; 1503 1504 ic->ic_node_cleanup(in); 1505 if (in->in_wpa_ie != NULL) 1506 ieee80211_free(in->in_wpa_ie); 1507 kmem_free(in, sizeof (iwk_amrr_t)); 1508 } 1509 1510 /*ARGSUSED*/ 1511 static int 1512 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1513 { 1514 iwk_sc_t *sc = (iwk_sc_t *)ic; 1515 ieee80211_node_t *in = ic->ic_bss; 1516 enum ieee80211_state ostate = ic->ic_state; 1517 int i, err = IWK_SUCCESS; 1518 1519 mutex_enter(&sc->sc_glock); 1520 switch (nstate) { 1521 case IEEE80211_S_SCAN: 1522 switch (ostate) { 1523 case IEEE80211_S_INIT: 1524 { 1525 iwk_add_sta_t node; 1526 1527 sc->sc_flags |= IWK_F_SCANNING; 1528 iwk_set_led(sc, 2, 10, 2); 1529 1530 /* 1531 * clear association to receive beacons from 1532 * all BSS'es 1533 */ 1534 sc->sc_config.assoc_id = 0; 1535 sc->sc_config.filter_flags &= 1536 ~LE_32(RXON_FILTER_ASSOC_MSK); 1537 1538 IWK_DBG((IWK_DEBUG_80211, "config chan %d " 1539 "flags %x filter_flags %x\n", sc->sc_config.chan, 1540 sc->sc_config.flags, sc->sc_config.filter_flags)); 1541 1542 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1543 sizeof (iwk_rxon_cmd_t), 1); 1544 if (err != IWK_SUCCESS) { 1545 cmn_err(CE_WARN, 1546 "could not clear association\n"); 1547 sc->sc_flags &= ~IWK_F_SCANNING; 1548 mutex_exit(&sc->sc_glock); 1549 return (err); 1550 } 1551 1552 /* add broadcast node to send probe request */ 1553 (void) memset(&node, 0, sizeof (node)); 1554 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN); 1555 node.id = IWK_BROADCAST_ID; 1556 err = iwk_cmd(sc, REPLY_ADD_STA, &node, 1557 sizeof (node), 1); 1558 if (err != IWK_SUCCESS) { 1559 cmn_err(CE_WARN, "could not add " 1560 "broadcast node\n"); 1561 sc->sc_flags &= ~IWK_F_SCANNING; 1562 mutex_exit(&sc->sc_glock); 1563 return (err); 1564 } 1565 break; 1566 } 1567 case IEEE80211_S_SCAN: 1568 mutex_exit(&sc->sc_glock); 1569 /* step to next channel before actual FW scan */ 1570 err = sc->sc_newstate(ic, nstate, arg); 1571 mutex_enter(&sc->sc_glock); 1572 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) { 1573 cmn_err(CE_WARN, 1574 "could not initiate scan\n"); 1575 sc->sc_flags &= ~IWK_F_SCANNING; 1576 ieee80211_cancel_scan(ic); 1577 } 1578 mutex_exit(&sc->sc_glock); 1579 return (err); 1580 default: 1581 break; 1582 1583 } 1584 sc->sc_clk = 0; 1585 break; 1586 1587 case IEEE80211_S_AUTH: 1588 if (ostate == IEEE80211_S_SCAN) { 1589 sc->sc_flags &= ~IWK_F_SCANNING; 1590 } 1591 1592 /* reset state to handle reassociations correctly */ 1593 sc->sc_config.assoc_id = 0; 1594 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 1595 1596 /* 1597 * before sending authentication and association request frame, 1598 * we need do something in the hardware, such as setting the 1599 * channel same to the target AP... 1600 */ 1601 if ((err = iwk_hw_set_before_auth(sc)) != 0) { 1602 cmn_err(CE_WARN, "could not setup firmware for " 1603 "authentication\n"); 1604 mutex_exit(&sc->sc_glock); 1605 return (err); 1606 } 1607 break; 1608 1609 case IEEE80211_S_RUN: 1610 if (ostate == IEEE80211_S_SCAN) { 1611 sc->sc_flags &= ~IWK_F_SCANNING; 1612 } 1613 1614 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 1615 /* let LED blink when monitoring */ 1616 iwk_set_led(sc, 2, 10, 10); 1617 break; 1618 } 1619 IWK_DBG((IWK_DEBUG_80211, "iwk: associated.")); 1620 1621 /* none IBSS mode */ 1622 if (ic->ic_opmode != IEEE80211_M_IBSS) { 1623 /* update adapter's configuration */ 1624 if (sc->sc_assoc_id != in->in_associd) { 1625 cmn_err(CE_WARN, 1626 "associate ID mismatch: expected %d, " 1627 "got %d\n", 1628 in->in_associd, sc->sc_assoc_id); 1629 } 1630 sc->sc_config.assoc_id = in->in_associd & 0x3fff; 1631 /* 1632 * short preamble/slot time are 1633 * negotiated when associating 1634 */ 1635 sc->sc_config.flags &= 1636 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 1637 RXON_FLG_SHORT_SLOT_MSK); 1638 1639 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1640 sc->sc_config.flags |= 1641 LE_32(RXON_FLG_SHORT_SLOT_MSK); 1642 1643 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 1644 sc->sc_config.flags |= 1645 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 1646 1647 sc->sc_config.filter_flags |= 1648 LE_32(RXON_FILTER_ASSOC_MSK); 1649 1650 if (ic->ic_opmode != IEEE80211_M_STA) 1651 sc->sc_config.filter_flags |= 1652 LE_32(RXON_FILTER_BCON_AWARE_MSK); 1653 1654 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x" 1655 " filter_flags %x\n", 1656 sc->sc_config.chan, sc->sc_config.flags, 1657 sc->sc_config.filter_flags)); 1658 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1659 sizeof (iwk_rxon_cmd_t), 1); 1660 if (err != IWK_SUCCESS) { 1661 cmn_err(CE_WARN, "could not update " 1662 "configuration\n"); 1663 mutex_exit(&sc->sc_glock); 1664 return (err); 1665 } 1666 } 1667 1668 /* obtain current temperature of chipset */ 1669 sc->sc_tempera = iwk_curr_tempera(sc); 1670 1671 /* 1672 * make Tx power calibration to determine 1673 * the gains of DSP and radio 1674 */ 1675 err = iwk_tx_power_calibration(sc); 1676 if (err) { 1677 cmn_err(CE_WARN, "iwk_newstate(): " 1678 "failed to set tx power table\n"); 1679 return (err); 1680 } 1681 1682 /* start automatic rate control */ 1683 mutex_enter(&sc->sc_mt_lock); 1684 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1685 sc->sc_flags |= IWK_F_RATE_AUTO_CTL; 1686 /* set rate to some reasonable initial value */ 1687 i = in->in_rates.ir_nrates - 1; 1688 while (i > 0 && IEEE80211_RATE(i) > 72) 1689 i--; 1690 in->in_txrate = i; 1691 } else { 1692 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 1693 } 1694 mutex_exit(&sc->sc_mt_lock); 1695 1696 /* set LED on after associated */ 1697 iwk_set_led(sc, 2, 0, 1); 1698 break; 1699 1700 case IEEE80211_S_INIT: 1701 if (ostate == IEEE80211_S_SCAN) { 1702 sc->sc_flags &= ~IWK_F_SCANNING; 1703 } 1704 1705 /* set LED off after init */ 1706 iwk_set_led(sc, 2, 1, 0); 1707 break; 1708 case IEEE80211_S_ASSOC: 1709 if (ostate == IEEE80211_S_SCAN) { 1710 sc->sc_flags &= ~IWK_F_SCANNING; 1711 } 1712 1713 break; 1714 } 1715 1716 mutex_exit(&sc->sc_glock); 1717 1718 err = sc->sc_newstate(ic, nstate, arg); 1719 1720 if (nstate == IEEE80211_S_RUN) { 1721 1722 mutex_enter(&sc->sc_glock); 1723 1724 /* 1725 * make initialization for Receiver 1726 * sensitivity calibration 1727 */ 1728 err = iwk_rx_sens_init(sc); 1729 if (err) { 1730 cmn_err(CE_WARN, "iwk_newstate(): " 1731 "failed to init RX sensitivity\n"); 1732 mutex_exit(&sc->sc_glock); 1733 return (err); 1734 } 1735 1736 /* make initialization for Receiver gain balance */ 1737 err = iwk_rxgain_diff_init(sc); 1738 if (err) { 1739 cmn_err(CE_WARN, "iwk_newstate(): " 1740 "failed to init phy calibration\n"); 1741 mutex_exit(&sc->sc_glock); 1742 return (err); 1743 } 1744 1745 mutex_exit(&sc->sc_glock); 1746 1747 } 1748 1749 return (err); 1750 } 1751 1752 /*ARGSUSED*/ 1753 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k, 1754 const uint8_t mac[IEEE80211_ADDR_LEN]) 1755 { 1756 iwk_sc_t *sc = (iwk_sc_t *)ic; 1757 iwk_add_sta_t node; 1758 int err; 1759 1760 switch (k->wk_cipher->ic_cipher) { 1761 case IEEE80211_CIPHER_WEP: 1762 case IEEE80211_CIPHER_TKIP: 1763 return (1); /* sofeware do it. */ 1764 case IEEE80211_CIPHER_AES_CCM: 1765 break; 1766 default: 1767 return (0); 1768 } 1769 sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK | 1770 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 1771 1772 mutex_enter(&sc->sc_glock); 1773 1774 /* update ap/multicast node */ 1775 (void) memset(&node, 0, sizeof (node)); 1776 if (IEEE80211_IS_MULTICAST(mac)) { 1777 (void) memset(node.bssid, 0xff, 6); 1778 node.id = IWK_BROADCAST_ID; 1779 } else { 1780 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid); 1781 node.id = IWK_AP_ID; 1782 } 1783 if (k->wk_flags & IEEE80211_KEY_XMIT) { 1784 node.key_flags = 0; 1785 node.keyp = k->wk_keyix; 1786 } else { 1787 node.key_flags = (1 << 14); 1788 node.keyp = k->wk_keyix + 4; 1789 } 1790 (void) memcpy(node.key, k->wk_key, k->wk_keylen); 1791 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8)); 1792 node.sta_mask = STA_MODIFY_KEY_MASK; 1793 node.control = 1; 1794 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 1795 if (err != IWK_SUCCESS) { 1796 cmn_err(CE_WARN, "iwk_key_set():" 1797 "failed to update ap node\n"); 1798 mutex_exit(&sc->sc_glock); 1799 return (0); 1800 } 1801 mutex_exit(&sc->sc_glock); 1802 return (1); 1803 } 1804 1805 /* 1806 * exclusive access to mac begin. 1807 */ 1808 static void 1809 iwk_mac_access_enter(iwk_sc_t *sc) 1810 { 1811 uint32_t tmp; 1812 int n; 1813 1814 tmp = IWK_READ(sc, CSR_GP_CNTRL); 1815 IWK_WRITE(sc, CSR_GP_CNTRL, 1816 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1817 1818 /* wait until we succeed */ 1819 for (n = 0; n < 1000; n++) { 1820 if ((IWK_READ(sc, CSR_GP_CNTRL) & 1821 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1822 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) == 1823 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) 1824 break; 1825 DELAY(10); 1826 } 1827 if (n == 1000) 1828 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n")); 1829 } 1830 1831 /* 1832 * exclusive access to mac end. 1833 */ 1834 static void 1835 iwk_mac_access_exit(iwk_sc_t *sc) 1836 { 1837 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL); 1838 IWK_WRITE(sc, CSR_GP_CNTRL, 1839 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1840 } 1841 1842 static uint32_t 1843 iwk_mem_read(iwk_sc_t *sc, uint32_t addr) 1844 { 1845 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr); 1846 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT)); 1847 } 1848 1849 static void 1850 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1851 { 1852 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr); 1853 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data); 1854 } 1855 1856 static uint32_t 1857 iwk_reg_read(iwk_sc_t *sc, uint32_t addr) 1858 { 1859 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); 1860 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT)); 1861 } 1862 1863 static void 1864 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1865 { 1866 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24)); 1867 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data); 1868 } 1869 1870 static void 1871 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr, 1872 uint32_t *data, int wlen) 1873 { 1874 for (; wlen > 0; wlen--, data++, addr += 4) 1875 iwk_reg_write(sc, addr, *data); 1876 } 1877 1878 1879 /* 1880 * ucode load/initialization steps: 1881 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image. 1882 * BSM contains a small memory that *always* stays powered up, so it can 1883 * retain the bootstrap program even when the card is in a power-saving 1884 * power-down state. The BSM loads the small program into ARC processor's 1885 * instruction memory when triggered by power-up. 1886 * 2) load Initialize image via bootstrap program. 1887 * The Initialize image sets up regulatory and calibration data for the 1888 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed. 1889 * The 4965 reply contains calibration data for temperature, voltage and tx gain 1890 * correction. 1891 */ 1892 static int 1893 iwk_load_firmware(iwk_sc_t *sc) 1894 { 1895 uint32_t *boot_fw = (uint32_t *)sc->sc_boot; 1896 uint32_t size = sc->sc_hdr->bootsz; 1897 int n, err = IWK_SUCCESS; 1898 1899 /* 1900 * The physical address bit [4-35] of the initialize uCode. 1901 * In the initialize alive notify interrupt the physical address of 1902 * the runtime ucode will be set for loading. 1903 */ 1904 iwk_mac_access_enter(sc); 1905 1906 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 1907 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4); 1908 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 1909 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4); 1910 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 1911 sc->sc_dma_fw_init_text.cookie.dmac_size); 1912 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 1913 sc->sc_dma_fw_init_data.cookie.dmac_size); 1914 1915 /* load bootstrap code into BSM memory */ 1916 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw, 1917 size / sizeof (uint32_t)); 1918 1919 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0); 1920 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); 1921 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t)); 1922 1923 /* 1924 * prepare to load initialize uCode 1925 */ 1926 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 1927 1928 /* wait while the adapter is busy loading the firmware */ 1929 for (n = 0; n < 1000; n++) { 1930 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) & 1931 BSM_WR_CTRL_REG_BIT_START)) 1932 break; 1933 DELAY(10); 1934 } 1935 if (n == 1000) { 1936 cmn_err(CE_WARN, "timeout transferring firmware\n"); 1937 err = ETIMEDOUT; 1938 return (err); 1939 } 1940 1941 /* for future power-save mode use */ 1942 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 1943 1944 iwk_mac_access_exit(sc); 1945 1946 return (err); 1947 } 1948 1949 /*ARGSUSED*/ 1950 static void 1951 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 1952 { 1953 ieee80211com_t *ic = &sc->sc_ic; 1954 iwk_rx_ring_t *ring = &sc->sc_rxq; 1955 iwk_rx_phy_res_t *stat; 1956 ieee80211_node_t *in; 1957 uint32_t *tail; 1958 struct ieee80211_frame *wh; 1959 mblk_t *mp; 1960 uint16_t len, rssi, mrssi, agc; 1961 int16_t t; 1962 uint32_t ants, i; 1963 struct iwk_rx_non_cfg_phy *phyinfo; 1964 1965 /* assuming not 11n here. cope with 11n in phase-II */ 1966 stat = (iwk_rx_phy_res_t *)(desc + 1); 1967 if (stat->cfg_phy_cnt > 20) { 1968 return; 1969 } 1970 1971 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy; 1972 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS; 1973 mrssi = 0; 1974 ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >> 1975 RX_PHY_FLAGS_ANTENNAE_OFFSET; 1976 for (i = 0; i < 3; i++) { 1977 if (ants & (1 << i)) 1978 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]); 1979 } 1980 t = mrssi - agc - 44; /* t is the dBM value */ 1981 /* 1982 * convert dBm to percentage ??? 1983 */ 1984 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) / 1985 (75 * 75); 1986 if (rssi > 100) 1987 rssi = 100; 1988 if (rssi < 1) 1989 rssi = 1; 1990 len = stat->byte_count; 1991 tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len); 1992 1993 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d " 1994 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x " 1995 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat), 1996 len, stat->rate.r.s.rate, stat->channel, 1997 LE_32(stat->timestampl), stat->non_cfg_phy_cnt, 1998 stat->cfg_phy_cnt, LE_32(*tail))); 1999 2000 if ((len < 16) || (len > sc->sc_dmabuf_sz)) { 2001 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n")); 2002 return; 2003 } 2004 2005 /* 2006 * discard Rx frames with bad CRC 2007 */ 2008 if ((LE_32(*tail) & 2009 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) != 2010 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) { 2011 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n", 2012 LE_32(*tail))); 2013 sc->sc_rx_err++; 2014 return; 2015 } 2016 2017 wh = (struct ieee80211_frame *) 2018 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt); 2019 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) { 2020 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2); 2021 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n", 2022 sc->sc_assoc_id)); 2023 } 2024 #ifdef DEBUG 2025 if (iwk_dbg_flags & IWK_DEBUG_RX) 2026 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0); 2027 #endif 2028 in = ieee80211_find_rxnode(ic, wh); 2029 mp = allocb(len, BPRI_MED); 2030 if (mp) { 2031 (void) memcpy(mp->b_wptr, wh, len); 2032 mp->b_wptr += len; 2033 2034 /* send the frame to the 802.11 layer */ 2035 (void) ieee80211_input(ic, mp, in, rssi, 0); 2036 } else { 2037 sc->sc_rx_nobuf++; 2038 IWK_DBG((IWK_DEBUG_RX, 2039 "iwk_rx_intr(): alloc rx buf failed\n")); 2040 } 2041 /* release node reference */ 2042 ieee80211_free_node(in); 2043 } 2044 2045 /*ARGSUSED*/ 2046 static void 2047 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 2048 { 2049 ieee80211com_t *ic = &sc->sc_ic; 2050 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3]; 2051 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1); 2052 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss; 2053 2054 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d" 2055 " retries=%d frame_count=%x nkill=%d " 2056 "rate=%x duration=%d status=%x\n", 2057 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count, 2058 stat->bt_kill_count, stat->rate.r.s.rate, 2059 LE_32(stat->duration), LE_32(stat->status))); 2060 2061 amrr->txcnt++; 2062 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt)); 2063 if (stat->ntries > 0) { 2064 amrr->retrycnt++; 2065 sc->sc_tx_retries++; 2066 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n", 2067 sc->sc_tx_retries)); 2068 } 2069 2070 sc->sc_tx_timer = 0; 2071 2072 mutex_enter(&sc->sc_tx_lock); 2073 ring->queued--; 2074 if (ring->queued < 0) 2075 ring->queued = 0; 2076 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) { 2077 sc->sc_need_reschedule = 0; 2078 mutex_exit(&sc->sc_tx_lock); 2079 mac_tx_update(ic->ic_mach); 2080 mutex_enter(&sc->sc_tx_lock); 2081 } 2082 mutex_exit(&sc->sc_tx_lock); 2083 } 2084 2085 static void 2086 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2087 { 2088 if ((desc->hdr.qid & 7) != 4) { 2089 return; 2090 } 2091 mutex_enter(&sc->sc_glock); 2092 sc->sc_flags |= IWK_F_CMD_DONE; 2093 cv_signal(&sc->sc_cmd_cv); 2094 mutex_exit(&sc->sc_glock); 2095 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: " 2096 "qid=%x idx=%d flags=%x type=0x%x\n", 2097 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags, 2098 desc->hdr.type)); 2099 } 2100 2101 static void 2102 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2103 { 2104 uint32_t base, i; 2105 struct iwk_alive_resp *ar = 2106 (struct iwk_alive_resp *)(desc + 1); 2107 2108 /* the microcontroller is ready */ 2109 IWK_DBG((IWK_DEBUG_FW, 2110 "microcode alive notification minor: %x major: %x type:" 2111 " %x subtype: %x\n", 2112 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype)); 2113 2114 if (LE_32(ar->is_valid) != UCODE_VALID_OK) { 2115 IWK_DBG((IWK_DEBUG_FW, 2116 "microcontroller initialization failed\n")); 2117 } 2118 if (ar->ver_subtype == INITIALIZE_SUBTYPE) { 2119 IWK_DBG((IWK_DEBUG_FW, 2120 "initialization alive received.\n")); 2121 (void) memcpy(&sc->sc_card_alive_init, ar, 2122 sizeof (struct iwk_init_alive_resp)); 2123 /* XXX get temperature */ 2124 iwk_mac_access_enter(sc); 2125 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 2126 sc->sc_dma_fw_text.cookie.dmac_address >> 4); 2127 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 2128 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4); 2129 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 2130 sc->sc_dma_fw_data.cookie.dmac_size); 2131 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 2132 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000); 2133 iwk_mac_access_exit(sc); 2134 } else { 2135 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n")); 2136 (void) memcpy(&sc->sc_card_alive_run, ar, 2137 sizeof (struct iwk_alive_resp)); 2138 2139 /* 2140 * Init SCD related registers to make Tx work. XXX 2141 */ 2142 iwk_mac_access_enter(sc); 2143 2144 /* read sram address of data base */ 2145 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR); 2146 2147 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */ 2148 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0; 2149 i < 128; i += 4) 2150 iwk_mem_write(sc, base + i, 0); 2151 2152 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */ 2153 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET; 2154 i < 256; i += 4) 2155 iwk_mem_write(sc, base + i, 0); 2156 2157 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */ 2158 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET; 2159 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4) 2160 iwk_mem_write(sc, base + i, 0); 2161 2162 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR, 2163 sc->sc_dma_sh.cookie.dmac_address >> 10); 2164 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0); 2165 2166 /* initiate the tx queues */ 2167 for (i = 0; i < IWK_NUM_QUEUES; i++) { 2168 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0); 2169 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8)); 2170 iwk_mem_write(sc, sc->sc_scd_base + 2171 SCD_CONTEXT_QUEUE_OFFSET(i), 2172 (SCD_WIN_SIZE & 0x7f)); 2173 iwk_mem_write(sc, sc->sc_scd_base + 2174 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t), 2175 (SCD_FRAME_LIMIT & 0x7f) << 16); 2176 } 2177 /* interrupt enable on each queue0-7 */ 2178 iwk_reg_write(sc, SCD_INTERRUPT_MASK, 2179 (1 << IWK_NUM_QUEUES) - 1); 2180 /* enable each channel 0-7 */ 2181 iwk_reg_write(sc, SCD_TXFACT, 2182 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 2183 /* 2184 * queue 0-7 maps to FIFO 0-7 and 2185 * all queues work under FIFO mode (none-scheduler-ack) 2186 */ 2187 for (i = 0; i < 7; i++) { 2188 iwk_reg_write(sc, 2189 SCD_QUEUE_STATUS_BITS(i), 2190 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2191 (i << SCD_QUEUE_STTS_REG_POS_TXF)| 2192 SCD_QUEUE_STTS_REG_MSK); 2193 } 2194 iwk_mac_access_exit(sc); 2195 2196 sc->sc_flags |= IWK_F_FW_INIT; 2197 cv_signal(&sc->sc_fw_cv); 2198 } 2199 2200 } 2201 2202 static uint_t 2203 /* LINTED: argument unused in function: unused */ 2204 iwk_rx_softintr(caddr_t arg, caddr_t unused) 2205 { 2206 iwk_sc_t *sc = (iwk_sc_t *)arg; 2207 ieee80211com_t *ic = &sc->sc_ic; 2208 iwk_rx_desc_t *desc; 2209 iwk_rx_data_t *data; 2210 uint32_t index; 2211 2212 mutex_enter(&sc->sc_glock); 2213 if (sc->sc_rx_softint_pending != 1) { 2214 mutex_exit(&sc->sc_glock); 2215 return (DDI_INTR_UNCLAIMED); 2216 } 2217 /* disable interrupts */ 2218 IWK_WRITE(sc, CSR_INT_MASK, 0); 2219 mutex_exit(&sc->sc_glock); 2220 2221 /* 2222 * firmware has moved the index of the rx queue, driver get it, 2223 * and deal with it. 2224 */ 2225 index = LE_32(sc->sc_shared->val0) & 0xfff; 2226 2227 while (sc->sc_rxq.cur != index) { 2228 data = &sc->sc_rxq.data[sc->sc_rxq.cur]; 2229 desc = (iwk_rx_desc_t *)data->dma_data.mem_va; 2230 2231 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d" 2232 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n", 2233 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx, 2234 desc->hdr.flags, desc->hdr.type, LE_32(desc->len))); 2235 2236 /* a command other than a tx need to be replied */ 2237 if (!(desc->hdr.qid & 0x80) && 2238 (desc->hdr.type != REPLY_RX_PHY_CMD) && 2239 (desc->hdr.type != REPLY_TX) && 2240 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) && 2241 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) && 2242 (desc->hdr.type != SENSITIVITY_CMD)) 2243 iwk_cmd_intr(sc, desc); 2244 2245 switch (desc->hdr.type) { 2246 case REPLY_4965_RX: 2247 iwk_rx_intr(sc, desc, data); 2248 break; 2249 2250 case REPLY_TX: 2251 iwk_tx_intr(sc, desc, data); 2252 break; 2253 2254 case REPLY_ALIVE: 2255 iwk_ucode_alive(sc, desc); 2256 break; 2257 2258 case CARD_STATE_NOTIFICATION: 2259 { 2260 uint32_t *status = (uint32_t *)(desc + 1); 2261 2262 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n", 2263 LE_32(*status))); 2264 2265 if (LE_32(*status) & 1) { 2266 /* 2267 * the radio button has to be pushed(OFF). It 2268 * is considered as a hw error, the 2269 * iwk_thread() tries to recover it after the 2270 * button is pushed again(ON) 2271 */ 2272 cmn_err(CE_NOTE, 2273 "iwk_rx_softintr(): " 2274 "Radio transmitter is off\n"); 2275 sc->sc_ostate = sc->sc_ic.ic_state; 2276 ieee80211_new_state(&sc->sc_ic, 2277 IEEE80211_S_INIT, -1); 2278 sc->sc_flags |= 2279 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF); 2280 } 2281 break; 2282 } 2283 case SCAN_START_NOTIFICATION: 2284 { 2285 iwk_start_scan_t *scan = 2286 (iwk_start_scan_t *)(desc + 1); 2287 2288 IWK_DBG((IWK_DEBUG_SCAN, 2289 "scanning channel %d status %x\n", 2290 scan->chan, LE_32(scan->status))); 2291 2292 ic->ic_curchan = &ic->ic_sup_channels[scan->chan]; 2293 break; 2294 } 2295 case SCAN_COMPLETE_NOTIFICATION: 2296 { 2297 iwk_stop_scan_t *scan = 2298 (iwk_stop_scan_t *)(desc + 1); 2299 2300 IWK_DBG((IWK_DEBUG_SCAN, 2301 "completed channel %d (burst of %d) status %02x\n", 2302 scan->chan, scan->nchan, scan->status)); 2303 2304 sc->sc_scan_pending++; 2305 break; 2306 } 2307 case STATISTICS_NOTIFICATION: 2308 /* handle statistics notification */ 2309 iwk_statistics_notify(sc, desc); 2310 break; 2311 } 2312 2313 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE; 2314 } 2315 2316 /* 2317 * driver dealt with what reveived in rx queue and tell the information 2318 * to the firmware. 2319 */ 2320 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1; 2321 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7)); 2322 2323 mutex_enter(&sc->sc_glock); 2324 /* re-enable interrupts */ 2325 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2326 sc->sc_rx_softint_pending = 0; 2327 mutex_exit(&sc->sc_glock); 2328 2329 return (DDI_INTR_CLAIMED); 2330 } 2331 2332 static uint_t 2333 /* LINTED: argument unused in function: unused */ 2334 iwk_intr(caddr_t arg, caddr_t unused) 2335 { 2336 iwk_sc_t *sc = (iwk_sc_t *)arg; 2337 uint32_t r, rfh; 2338 2339 mutex_enter(&sc->sc_glock); 2340 2341 if (sc->sc_flags & IWK_F_SUSPEND) { 2342 mutex_exit(&sc->sc_glock); 2343 return (DDI_INTR_UNCLAIMED); 2344 } 2345 2346 r = IWK_READ(sc, CSR_INT); 2347 if (r == 0 || r == 0xffffffff) { 2348 mutex_exit(&sc->sc_glock); 2349 return (DDI_INTR_UNCLAIMED); 2350 } 2351 2352 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r)); 2353 2354 rfh = IWK_READ(sc, CSR_FH_INT_STATUS); 2355 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh)); 2356 /* disable interrupts */ 2357 IWK_WRITE(sc, CSR_INT_MASK, 0); 2358 /* ack interrupts */ 2359 IWK_WRITE(sc, CSR_INT, r); 2360 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh); 2361 2362 if (sc->sc_soft_hdl == NULL) { 2363 mutex_exit(&sc->sc_glock); 2364 return (DDI_INTR_CLAIMED); 2365 } 2366 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) { 2367 cmn_err(CE_WARN, "fatal firmware error\n"); 2368 mutex_exit(&sc->sc_glock); 2369 #ifdef DEBUG 2370 /* dump event and error logs to dmesg */ 2371 iwk_write_error_log(sc); 2372 iwk_write_event_log(sc); 2373 #endif /* DEBUG */ 2374 iwk_stop(sc); 2375 sc->sc_ostate = sc->sc_ic.ic_state; 2376 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 2377 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2378 return (DDI_INTR_CLAIMED); 2379 } 2380 2381 if (r & BIT_INT_RF_KILL) { 2382 IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n")); 2383 } 2384 2385 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) || 2386 (rfh & FH_INT_RX_MASK)) { 2387 sc->sc_rx_softint_pending = 1; 2388 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL); 2389 } 2390 2391 if (r & BIT_INT_ALIVE) { 2392 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n")); 2393 } 2394 2395 /* re-enable interrupts */ 2396 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2397 mutex_exit(&sc->sc_glock); 2398 2399 return (DDI_INTR_CLAIMED); 2400 } 2401 2402 static uint8_t 2403 iwk_rate_to_plcp(int rate) 2404 { 2405 uint8_t ret; 2406 2407 switch (rate) { 2408 /* CCK rates */ 2409 case 2: 2410 ret = 0xa; 2411 break; 2412 case 4: 2413 ret = 0x14; 2414 break; 2415 case 11: 2416 ret = 0x37; 2417 break; 2418 case 22: 2419 ret = 0x6e; 2420 break; 2421 /* OFDM rates */ 2422 case 12: 2423 ret = 0xd; 2424 break; 2425 case 18: 2426 ret = 0xf; 2427 break; 2428 case 24: 2429 ret = 0x5; 2430 break; 2431 case 36: 2432 ret = 0x7; 2433 break; 2434 case 48: 2435 ret = 0x9; 2436 break; 2437 case 72: 2438 ret = 0xb; 2439 break; 2440 case 96: 2441 ret = 0x1; 2442 break; 2443 case 108: 2444 ret = 0x3; 2445 break; 2446 default: 2447 ret = 0; 2448 break; 2449 } 2450 return (ret); 2451 } 2452 2453 static mblk_t * 2454 iwk_m_tx(void *arg, mblk_t *mp) 2455 { 2456 iwk_sc_t *sc = (iwk_sc_t *)arg; 2457 ieee80211com_t *ic = &sc->sc_ic; 2458 mblk_t *next; 2459 2460 if (sc->sc_flags & IWK_F_SUSPEND) { 2461 freemsgchain(mp); 2462 return (NULL); 2463 } 2464 2465 if (ic->ic_state != IEEE80211_S_RUN) { 2466 freemsgchain(mp); 2467 return (NULL); 2468 } 2469 2470 while (mp != NULL) { 2471 next = mp->b_next; 2472 mp->b_next = NULL; 2473 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) { 2474 mp->b_next = next; 2475 break; 2476 } 2477 mp = next; 2478 } 2479 return (mp); 2480 } 2481 2482 /* ARGSUSED */ 2483 static int 2484 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 2485 { 2486 iwk_sc_t *sc = (iwk_sc_t *)ic; 2487 iwk_tx_ring_t *ring; 2488 iwk_tx_desc_t *desc; 2489 iwk_tx_data_t *data; 2490 iwk_cmd_t *cmd; 2491 iwk_tx_cmd_t *tx; 2492 ieee80211_node_t *in; 2493 struct ieee80211_frame *wh; 2494 struct ieee80211_key *k = NULL; 2495 mblk_t *m, *m0; 2496 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS; 2497 uint16_t masks = 0; 2498 2499 ring = &sc->sc_txq[0]; 2500 data = &ring->data[ring->cur]; 2501 desc = data->desc; 2502 cmd = data->cmd; 2503 bzero(desc, sizeof (*desc)); 2504 bzero(cmd, sizeof (*cmd)); 2505 2506 mutex_enter(&sc->sc_tx_lock); 2507 if (sc->sc_flags & IWK_F_SUSPEND) { 2508 mutex_exit(&sc->sc_tx_lock); 2509 if ((type & IEEE80211_FC0_TYPE_MASK) != 2510 IEEE80211_FC0_TYPE_DATA) { 2511 freemsg(mp); 2512 } 2513 err = IWK_FAIL; 2514 goto exit; 2515 } 2516 2517 if (ring->queued > ring->count - 64) { 2518 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n")); 2519 sc->sc_need_reschedule = 1; 2520 mutex_exit(&sc->sc_tx_lock); 2521 if ((type & IEEE80211_FC0_TYPE_MASK) != 2522 IEEE80211_FC0_TYPE_DATA) { 2523 freemsg(mp); 2524 } 2525 sc->sc_tx_nobuf++; 2526 err = IWK_FAIL; 2527 goto exit; 2528 } 2529 mutex_exit(&sc->sc_tx_lock); 2530 2531 hdrlen = sizeof (struct ieee80211_frame); 2532 2533 m = allocb(msgdsize(mp) + 32, BPRI_MED); 2534 if (m == NULL) { /* can not alloc buf, drop this package */ 2535 cmn_err(CE_WARN, 2536 "iwk_send(): failed to allocate msgbuf\n"); 2537 freemsg(mp); 2538 err = IWK_SUCCESS; 2539 goto exit; 2540 } 2541 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) { 2542 mblen = MBLKL(m0); 2543 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen); 2544 off += mblen; 2545 } 2546 m->b_wptr += off; 2547 freemsg(mp); 2548 2549 wh = (struct ieee80211_frame *)m->b_rptr; 2550 2551 in = ieee80211_find_txnode(ic, wh->i_addr1); 2552 if (in == NULL) { 2553 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n"); 2554 freemsg(m); 2555 sc->sc_tx_err++; 2556 err = IWK_SUCCESS; 2557 goto exit; 2558 } 2559 (void) ieee80211_encap(ic, m, in); 2560 2561 cmd->hdr.type = REPLY_TX; 2562 cmd->hdr.flags = 0; 2563 cmd->hdr.qid = ring->qid; 2564 cmd->hdr.idx = ring->cur; 2565 2566 tx = (iwk_tx_cmd_t *)cmd->data; 2567 tx->tx_flags = 0; 2568 2569 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2570 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK)); 2571 } else { 2572 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2573 } 2574 2575 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2576 k = ieee80211_crypto_encap(ic, m); 2577 if (k == NULL) { 2578 freemsg(m); 2579 sc->sc_tx_err++; 2580 err = IWK_SUCCESS; 2581 goto exit; 2582 } 2583 2584 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 2585 tx->sec_ctl = 2; /* for CCMP */ 2586 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2587 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen); 2588 } 2589 2590 /* packet header may have moved, reset our local pointer */ 2591 wh = (struct ieee80211_frame *)m->b_rptr; 2592 } 2593 2594 len = msgdsize(m); 2595 2596 #ifdef DEBUG 2597 if (iwk_dbg_flags & IWK_DEBUG_TX) 2598 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0); 2599 #endif 2600 2601 /* pickup a rate */ 2602 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2603 IEEE80211_FC0_TYPE_MGT) { 2604 /* mgmt frames are sent at 1M */ 2605 rate = in->in_rates.ir_rates[0]; 2606 } else { 2607 /* 2608 * do it here for the software way rate control. 2609 * later for rate scaling in hardware. 2610 * maybe like the following, for management frame: 2611 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; 2612 * for data frame: 2613 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK)); 2614 * rate = in->in_rates.ir_rates[in->in_txrate]; 2615 * tx->initial_rate_index = 1; 2616 * 2617 * now the txrate is determined in tx cmd flags, set to the 2618 * max value 54M for 11g and 11M for 11b. 2619 */ 2620 2621 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { 2622 rate = ic->ic_fixed_rate; 2623 } else { 2624 rate = in->in_rates.ir_rates[in->in_txrate]; 2625 } 2626 } 2627 rate &= IEEE80211_RATE_VAL; 2628 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x", 2629 in->in_txrate, in->in_rates.ir_nrates, rate)); 2630 2631 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK)); 2632 2633 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4); 2634 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen)) 2635 tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2636 2637 /* retrieve destination node's id */ 2638 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2639 tx->sta_id = IWK_BROADCAST_ID; 2640 } else { 2641 if (ic->ic_opmode != IEEE80211_M_IBSS) 2642 tx->sta_id = IWK_AP_ID; 2643 } 2644 2645 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2646 IEEE80211_FC0_TYPE_MGT) { 2647 /* tell h/w to set timestamp in probe responses */ 2648 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2649 IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2650 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK); 2651 2652 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2653 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) || 2654 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2655 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) 2656 tx->timeout.pm_frame_timeout = 3; 2657 else 2658 tx->timeout.pm_frame_timeout = 2; 2659 } else 2660 tx->timeout.pm_frame_timeout = 0; 2661 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 2662 masks |= RATE_MCS_CCK_MSK; 2663 2664 masks |= RATE_MCS_ANT_B_MSK; 2665 tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks); 2666 2667 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x", 2668 tx->tx_flags)); 2669 2670 tx->rts_retry_limit = 60; 2671 tx->data_retry_limit = 15; 2672 2673 tx->stop_time.life_time = LE_32(0xffffffff); 2674 2675 tx->len = LE_16(len); 2676 2677 tx->dram_lsb_ptr = 2678 data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch); 2679 tx->dram_msb_ptr = 0; 2680 tx->driver_txop = 0; 2681 tx->next_frame_len = 0; 2682 2683 (void) memcpy(tx + 1, m->b_rptr, hdrlen); 2684 m->b_rptr += hdrlen; 2685 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen); 2686 2687 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d", 2688 ring->qid, ring->cur, len)); 2689 2690 /* 2691 * first segment includes the tx cmd plus the 802.11 header, 2692 * the second includes the remaining of the 802.11 frame. 2693 */ 2694 desc->val0 = LE_32(2 << 24); 2695 desc->pa[0].tb1_addr = LE_32(data->paddr_cmd); 2696 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) | 2697 ((data->dma_data.cookie.dmac_address & 0xffff) << 16); 2698 desc->pa[0].val2 = 2699 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) | 2700 ((len - hdrlen) << 20); 2701 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x " 2702 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x", 2703 data->paddr_cmd, data->dma_data.cookie.dmac_address, 2704 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2)); 2705 2706 mutex_enter(&sc->sc_tx_lock); 2707 ring->queued++; 2708 mutex_exit(&sc->sc_tx_lock); 2709 2710 /* kick ring */ 2711 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2712 tfd_offset[ring->cur].val = 8 + len; 2713 if (ring->cur < IWK_MAX_WIN_SIZE) { 2714 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2715 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len; 2716 } 2717 2718 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 2719 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 2720 2721 ring->cur = (ring->cur + 1) % ring->count; 2722 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2723 freemsg(m); 2724 /* release node reference */ 2725 ieee80211_free_node(in); 2726 2727 ic->ic_stats.is_tx_bytes += len; 2728 ic->ic_stats.is_tx_frags++; 2729 2730 if (sc->sc_tx_timer == 0) 2731 sc->sc_tx_timer = 10; 2732 exit: 2733 return (err); 2734 } 2735 2736 static void 2737 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp) 2738 { 2739 iwk_sc_t *sc = (iwk_sc_t *)arg; 2740 ieee80211com_t *ic = &sc->sc_ic; 2741 int err; 2742 2743 err = ieee80211_ioctl(ic, wq, mp); 2744 2745 if (err == ENETRESET) { 2746 /* 2747 * This is special for the hidden AP connection. 2748 * In any case, we should make sure only one 'scan' 2749 * in the driver for a 'connect' CLI command. So 2750 * when connecting to a hidden AP, the scan is just 2751 * sent out to the air when we know the desired 2752 * essid of the AP we want to connect. 2753 */ 2754 if (ic->ic_des_esslen) { 2755 if (sc->sc_flags & IWK_F_RUNNING) { 2756 iwk_m_stop(sc); 2757 (void) iwk_m_start(sc); 2758 (void) ieee80211_new_state(ic, 2759 IEEE80211_S_SCAN, -1); 2760 } 2761 } 2762 } 2763 } 2764 2765 /* 2766 * callback functions for set/get properties 2767 */ 2768 /* ARGSUSED */ 2769 static int 2770 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2771 uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm) 2772 { 2773 int err = 0; 2774 iwk_sc_t *sc = (iwk_sc_t *)arg; 2775 2776 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num, 2777 pr_flags, wldp_length, wldp_buf, perm); 2778 2779 return (err); 2780 } 2781 static int 2782 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2783 uint_t wldp_length, const void *wldp_buf) 2784 { 2785 int err; 2786 iwk_sc_t *sc = (iwk_sc_t *)arg; 2787 ieee80211com_t *ic = &sc->sc_ic; 2788 2789 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length, 2790 wldp_buf); 2791 2792 if (err == ENETRESET) { 2793 if (ic->ic_des_esslen) { 2794 if (sc->sc_flags & IWK_F_RUNNING) { 2795 iwk_m_stop(sc); 2796 (void) iwk_m_start(sc); 2797 (void) ieee80211_new_state(ic, 2798 IEEE80211_S_SCAN, -1); 2799 } 2800 } 2801 err = 0; 2802 } 2803 2804 return (err); 2805 } 2806 2807 /*ARGSUSED*/ 2808 static int 2809 iwk_m_stat(void *arg, uint_t stat, uint64_t *val) 2810 { 2811 iwk_sc_t *sc = (iwk_sc_t *)arg; 2812 ieee80211com_t *ic = &sc->sc_ic; 2813 ieee80211_node_t *in = ic->ic_bss; 2814 struct ieee80211_rateset *rs = &in->in_rates; 2815 2816 mutex_enter(&sc->sc_glock); 2817 switch (stat) { 2818 case MAC_STAT_IFSPEED: 2819 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ? 2820 (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) 2821 : ic->ic_fixed_rate) /2 * 1000000; 2822 break; 2823 case MAC_STAT_NOXMTBUF: 2824 *val = sc->sc_tx_nobuf; 2825 break; 2826 case MAC_STAT_NORCVBUF: 2827 *val = sc->sc_rx_nobuf; 2828 break; 2829 case MAC_STAT_IERRORS: 2830 *val = sc->sc_rx_err; 2831 break; 2832 case MAC_STAT_RBYTES: 2833 *val = ic->ic_stats.is_rx_bytes; 2834 break; 2835 case MAC_STAT_IPACKETS: 2836 *val = ic->ic_stats.is_rx_frags; 2837 break; 2838 case MAC_STAT_OBYTES: 2839 *val = ic->ic_stats.is_tx_bytes; 2840 break; 2841 case MAC_STAT_OPACKETS: 2842 *val = ic->ic_stats.is_tx_frags; 2843 break; 2844 case MAC_STAT_OERRORS: 2845 case WIFI_STAT_TX_FAILED: 2846 *val = sc->sc_tx_err; 2847 break; 2848 case WIFI_STAT_TX_RETRANS: 2849 *val = sc->sc_tx_retries; 2850 break; 2851 case WIFI_STAT_FCS_ERRORS: 2852 case WIFI_STAT_WEP_ERRORS: 2853 case WIFI_STAT_TX_FRAGS: 2854 case WIFI_STAT_MCAST_TX: 2855 case WIFI_STAT_RTS_SUCCESS: 2856 case WIFI_STAT_RTS_FAILURE: 2857 case WIFI_STAT_ACK_FAILURE: 2858 case WIFI_STAT_RX_FRAGS: 2859 case WIFI_STAT_MCAST_RX: 2860 case WIFI_STAT_RX_DUPS: 2861 mutex_exit(&sc->sc_glock); 2862 return (ieee80211_stat(ic, stat, val)); 2863 default: 2864 mutex_exit(&sc->sc_glock); 2865 return (ENOTSUP); 2866 } 2867 mutex_exit(&sc->sc_glock); 2868 2869 return (IWK_SUCCESS); 2870 2871 } 2872 2873 static int 2874 iwk_m_start(void *arg) 2875 { 2876 iwk_sc_t *sc = (iwk_sc_t *)arg; 2877 ieee80211com_t *ic = &sc->sc_ic; 2878 int err; 2879 2880 err = iwk_init(sc); 2881 2882 if (err != IWK_SUCCESS) { 2883 /* 2884 * The hw init err(eg. RF is OFF). Return Success to make 2885 * the 'plumb' succeed. The iwk_thread() tries to re-init 2886 * background. 2887 */ 2888 cmn_err(CE_WARN, "iwk_m_start(): failed to initialize " 2889 "hardware\n"); 2890 mutex_enter(&sc->sc_glock); 2891 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2892 mutex_exit(&sc->sc_glock); 2893 return (IWK_SUCCESS); 2894 } 2895 2896 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2897 2898 mutex_enter(&sc->sc_glock); 2899 sc->sc_flags |= IWK_F_RUNNING; 2900 mutex_exit(&sc->sc_glock); 2901 2902 return (IWK_SUCCESS); 2903 } 2904 2905 static void 2906 iwk_m_stop(void *arg) 2907 { 2908 iwk_sc_t *sc = (iwk_sc_t *)arg; 2909 ieee80211com_t *ic = &sc->sc_ic; 2910 2911 iwk_stop(sc); 2912 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2913 mutex_enter(&sc->sc_mt_lock); 2914 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 2915 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 2916 mutex_exit(&sc->sc_mt_lock); 2917 mutex_enter(&sc->sc_glock); 2918 sc->sc_flags &= ~IWK_F_RUNNING; 2919 sc->sc_flags &= ~IWK_F_SCANNING; 2920 mutex_exit(&sc->sc_glock); 2921 } 2922 2923 /*ARGSUSED*/ 2924 static int 2925 iwk_m_unicst(void *arg, const uint8_t *macaddr) 2926 { 2927 iwk_sc_t *sc = (iwk_sc_t *)arg; 2928 ieee80211com_t *ic = &sc->sc_ic; 2929 int err; 2930 2931 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) { 2932 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); 2933 mutex_enter(&sc->sc_glock); 2934 err = iwk_config(sc); 2935 mutex_exit(&sc->sc_glock); 2936 if (err != IWK_SUCCESS) { 2937 cmn_err(CE_WARN, 2938 "iwk_m_unicst(): " 2939 "failed to configure device\n"); 2940 goto fail; 2941 } 2942 } 2943 return (IWK_SUCCESS); 2944 fail: 2945 return (err); 2946 } 2947 2948 /*ARGSUSED*/ 2949 static int 2950 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m) 2951 { 2952 return (IWK_SUCCESS); 2953 } 2954 2955 /*ARGSUSED*/ 2956 static int 2957 iwk_m_promisc(void *arg, boolean_t on) 2958 { 2959 return (IWK_SUCCESS); 2960 } 2961 2962 static void 2963 iwk_thread(iwk_sc_t *sc) 2964 { 2965 ieee80211com_t *ic = &sc->sc_ic; 2966 clock_t clk; 2967 int times = 0, err, n = 0, timeout = 0; 2968 uint32_t tmp; 2969 2970 mutex_enter(&sc->sc_mt_lock); 2971 while (sc->sc_mf_thread_switch) { 2972 tmp = IWK_READ(sc, CSR_GP_CNTRL); 2973 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) { 2974 sc->sc_flags &= ~IWK_F_RADIO_OFF; 2975 } else { 2976 sc->sc_flags |= IWK_F_RADIO_OFF; 2977 } 2978 /* 2979 * If in SUSPEND or the RF is OFF, do nothing 2980 */ 2981 if ((sc->sc_flags & IWK_F_SUSPEND) || 2982 (sc->sc_flags & IWK_F_RADIO_OFF)) { 2983 mutex_exit(&sc->sc_mt_lock); 2984 delay(drv_usectohz(100000)); 2985 mutex_enter(&sc->sc_mt_lock); 2986 continue; 2987 } 2988 2989 /* 2990 * recovery fatal error 2991 */ 2992 if (ic->ic_mach && 2993 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) { 2994 2995 IWK_DBG((IWK_DEBUG_FW, 2996 "iwk_thread(): " 2997 "try to recover fatal hw error: %d\n", times++)); 2998 2999 iwk_stop(sc); 3000 3001 mutex_exit(&sc->sc_mt_lock); 3002 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3003 delay(drv_usectohz(2000000 + n*500000)); 3004 mutex_enter(&sc->sc_mt_lock); 3005 3006 err = iwk_init(sc); 3007 if (err != IWK_SUCCESS) { 3008 n++; 3009 if (n < 20) 3010 continue; 3011 } 3012 n = 0; 3013 if (!err) 3014 sc->sc_flags |= IWK_F_RUNNING; 3015 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 3016 mutex_exit(&sc->sc_mt_lock); 3017 delay(drv_usectohz(2000000)); 3018 if (sc->sc_ostate != IEEE80211_S_INIT) 3019 ieee80211_new_state(ic, IEEE80211_S_SCAN, 0); 3020 mutex_enter(&sc->sc_mt_lock); 3021 } 3022 3023 if (ic->ic_mach && 3024 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) { 3025 3026 IWK_DBG((IWK_DEBUG_SCAN, 3027 "iwk_thread(): " 3028 "wait for probe response\n")); 3029 3030 sc->sc_scan_pending--; 3031 mutex_exit(&sc->sc_mt_lock); 3032 delay(drv_usectohz(200000)); 3033 ieee80211_next_scan(ic); 3034 mutex_enter(&sc->sc_mt_lock); 3035 } 3036 3037 /* 3038 * rate ctl 3039 */ 3040 if (ic->ic_mach && 3041 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) { 3042 clk = ddi_get_lbolt(); 3043 if (clk > sc->sc_clk + drv_usectohz(500000)) { 3044 iwk_amrr_timeout(sc); 3045 } 3046 } 3047 3048 mutex_exit(&sc->sc_mt_lock); 3049 delay(drv_usectohz(100000)); 3050 mutex_enter(&sc->sc_mt_lock); 3051 3052 if (sc->sc_tx_timer) { 3053 timeout++; 3054 if (timeout == 10) { 3055 sc->sc_tx_timer--; 3056 if (sc->sc_tx_timer == 0) { 3057 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 3058 sc->sc_ostate = IEEE80211_S_RUN; 3059 IWK_DBG((IWK_DEBUG_FW, 3060 "iwk_thread(): try to recover from" 3061 " 'send fail\n")); 3062 } 3063 timeout = 0; 3064 } 3065 } 3066 3067 } 3068 sc->sc_mf_thread = NULL; 3069 cv_signal(&sc->sc_mt_cv); 3070 mutex_exit(&sc->sc_mt_lock); 3071 } 3072 3073 3074 /* 3075 * Send a command to the firmware. 3076 */ 3077 static int 3078 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async) 3079 { 3080 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3081 iwk_tx_desc_t *desc; 3082 iwk_cmd_t *cmd; 3083 clock_t clk; 3084 3085 ASSERT(size <= sizeof (cmd->data)); 3086 ASSERT(mutex_owned(&sc->sc_glock)); 3087 3088 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code)); 3089 desc = ring->data[ring->cur].desc; 3090 cmd = ring->data[ring->cur].cmd; 3091 3092 cmd->hdr.type = (uint8_t)code; 3093 cmd->hdr.flags = 0; 3094 cmd->hdr.qid = ring->qid; 3095 cmd->hdr.idx = ring->cur; 3096 (void) memcpy(cmd->data, buf, size); 3097 (void) memset(desc, 0, sizeof (*desc)); 3098 3099 desc->val0 = LE_32(1 << 24); 3100 desc->pa[0].tb1_addr = 3101 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff); 3102 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0; 3103 3104 /* kick cmd ring XXX */ 3105 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3106 tfd_offset[ring->cur].val = 8; 3107 if (ring->cur < IWK_MAX_WIN_SIZE) { 3108 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3109 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3110 } 3111 ring->cur = (ring->cur + 1) % ring->count; 3112 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3113 3114 if (async) 3115 return (IWK_SUCCESS); 3116 else { 3117 sc->sc_flags &= ~IWK_F_CMD_DONE; 3118 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3119 while (!(sc->sc_flags & IWK_F_CMD_DONE)) { 3120 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) < 3121 0) 3122 break; 3123 } 3124 if (sc->sc_flags & IWK_F_CMD_DONE) 3125 return (IWK_SUCCESS); 3126 else 3127 return (IWK_FAIL); 3128 } 3129 } 3130 3131 static void 3132 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on) 3133 { 3134 iwk_led_cmd_t led; 3135 3136 led.interval = LE_32(100000); /* unit: 100ms */ 3137 led.id = id; 3138 led.off = off; 3139 led.on = on; 3140 3141 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1); 3142 } 3143 3144 static int 3145 iwk_hw_set_before_auth(iwk_sc_t *sc) 3146 { 3147 ieee80211com_t *ic = &sc->sc_ic; 3148 ieee80211_node_t *in = ic->ic_bss; 3149 iwk_add_sta_t node; 3150 iwk_link_quality_cmd_t link_quality; 3151 struct ieee80211_rateset rs; 3152 uint16_t masks = 0, rate; 3153 int i, err; 3154 3155 /* update adapter's configuration according the info of target AP */ 3156 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 3157 sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan); 3158 if (ic->ic_curmode == IEEE80211_MODE_11B) { 3159 sc->sc_config.cck_basic_rates = 0x03; 3160 sc->sc_config.ofdm_basic_rates = 0; 3161 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 3162 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 3163 sc->sc_config.cck_basic_rates = 0; 3164 sc->sc_config.ofdm_basic_rates = 0x15; 3165 } else { /* assume 802.11b/g */ 3166 sc->sc_config.cck_basic_rates = 0x0f; 3167 sc->sc_config.ofdm_basic_rates = 0xff; 3168 } 3169 3170 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 3171 RXON_FLG_SHORT_SLOT_MSK); 3172 3173 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3174 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK); 3175 else 3176 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK); 3177 3178 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3179 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3180 else 3181 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK); 3182 3183 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x " 3184 "filter_flags %x cck %x ofdm %x" 3185 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n", 3186 sc->sc_config.chan, sc->sc_config.flags, 3187 sc->sc_config.filter_flags, 3188 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates, 3189 sc->sc_config.bssid[0], sc->sc_config.bssid[1], 3190 sc->sc_config.bssid[2], sc->sc_config.bssid[3], 3191 sc->sc_config.bssid[4], sc->sc_config.bssid[5])); 3192 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3193 sizeof (iwk_rxon_cmd_t), 1); 3194 if (err != IWK_SUCCESS) { 3195 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3196 " failed to config chan%d\n", 3197 sc->sc_config.chan); 3198 return (err); 3199 } 3200 3201 /* obtain current temperature of chipset */ 3202 sc->sc_tempera = iwk_curr_tempera(sc); 3203 3204 /* make Tx power calibration to determine the gains of DSP and radio */ 3205 err = iwk_tx_power_calibration(sc); 3206 if (err) { 3207 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3208 "failed to set tx power table\n"); 3209 return (err); 3210 } 3211 3212 /* add default AP node */ 3213 (void) memset(&node, 0, sizeof (node)); 3214 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid); 3215 node.id = IWK_AP_ID; 3216 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 3217 if (err != IWK_SUCCESS) { 3218 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3219 "failed to add BSS node\n"); 3220 return (err); 3221 } 3222 3223 /* TX_LINK_QUALITY cmd ? */ 3224 (void) memset(&link_quality, 0, sizeof (link_quality)); 3225 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)]; 3226 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3227 if (i < rs.ir_nrates) 3228 rate = rs.ir_rates[rs.ir_nrates - i]; 3229 else 3230 rate = 2; 3231 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 3232 masks |= RATE_MCS_CCK_MSK; 3233 masks |= RATE_MCS_ANT_B_MSK; 3234 masks &= ~RATE_MCS_ANT_A_MSK; 3235 link_quality.rate_n_flags[i] = 3236 iwk_rate_to_plcp(rate) | masks; 3237 } 3238 3239 link_quality.general_params.single_stream_ant_msk = 2; 3240 link_quality.general_params.dual_stream_ant_msk = 3; 3241 link_quality.agg_params.agg_dis_start_th = 3; 3242 link_quality.agg_params.agg_time_limit = LE_16(4000); 3243 link_quality.sta_id = IWK_AP_ID; 3244 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3245 sizeof (link_quality), 1); 3246 if (err != IWK_SUCCESS) { 3247 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3248 "failed to config link quality table\n"); 3249 return (err); 3250 } 3251 3252 return (IWK_SUCCESS); 3253 } 3254 3255 /* 3256 * Send a scan request(assembly scan cmd) to the firmware. 3257 */ 3258 static int 3259 iwk_scan(iwk_sc_t *sc) 3260 { 3261 ieee80211com_t *ic = &sc->sc_ic; 3262 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3263 iwk_tx_desc_t *desc; 3264 iwk_tx_data_t *data; 3265 iwk_cmd_t *cmd; 3266 iwk_scan_hdr_t *hdr; 3267 iwk_scan_chan_t *chan; 3268 struct ieee80211_frame *wh; 3269 ieee80211_node_t *in = ic->ic_bss; 3270 uint8_t essid[IEEE80211_NWID_LEN+1]; 3271 struct ieee80211_rateset *rs; 3272 enum ieee80211_phymode mode; 3273 uint8_t *frm; 3274 int i, pktlen, nrates; 3275 3276 data = &ring->data[ring->cur]; 3277 desc = data->desc; 3278 cmd = (iwk_cmd_t *)data->dma_data.mem_va; 3279 3280 cmd->hdr.type = REPLY_SCAN_CMD; 3281 cmd->hdr.flags = 0; 3282 cmd->hdr.qid = ring->qid; 3283 cmd->hdr.idx = ring->cur | 0x40; 3284 3285 hdr = (iwk_scan_hdr_t *)cmd->data; 3286 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t)); 3287 hdr->nchan = 1; 3288 hdr->quiet_time = LE_16(50); 3289 hdr->quiet_plcp_th = LE_16(1); 3290 3291 hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 3292 hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3293 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3294 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3295 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3296 3297 hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 3298 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID; 3299 hdr->tx_cmd.stop_time.life_time = 0xffffffff; 3300 hdr->tx_cmd.tx_flags |= (0x200); 3301 hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2); 3302 hdr->tx_cmd.rate.r.rate_n_flags |= 3303 (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); 3304 hdr->direct_scan[0].len = ic->ic_des_esslen; 3305 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID; 3306 3307 if (ic->ic_des_esslen) { 3308 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen); 3309 essid[ic->ic_des_esslen] = '\0'; 3310 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid)); 3311 3312 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid, 3313 ic->ic_des_esslen); 3314 } else { 3315 bzero(hdr->direct_scan[0].ssid, 3316 sizeof (hdr->direct_scan[0].ssid)); 3317 } 3318 /* 3319 * a probe request frame is required after the REPLY_SCAN_CMD 3320 */ 3321 wh = (struct ieee80211_frame *)(hdr + 1); 3322 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3323 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3324 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3325 (void) memset(wh->i_addr1, 0xff, 6); 3326 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr); 3327 (void) memset(wh->i_addr3, 0xff, 6); 3328 *(uint16_t *)&wh->i_dur[0] = 0; 3329 *(uint16_t *)&wh->i_seq[0] = 0; 3330 3331 frm = (uint8_t *)(wh + 1); 3332 3333 /* essid IE */ 3334 if (in->in_esslen) { 3335 bcopy(in->in_essid, essid, in->in_esslen); 3336 essid[in->in_esslen] = '\0'; 3337 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n", 3338 essid)); 3339 } 3340 *frm++ = IEEE80211_ELEMID_SSID; 3341 *frm++ = in->in_esslen; 3342 (void) memcpy(frm, in->in_essid, in->in_esslen); 3343 frm += in->in_esslen; 3344 3345 mode = ieee80211_chan2mode(ic, ic->ic_curchan); 3346 rs = &ic->ic_sup_rates[mode]; 3347 3348 /* supported rates IE */ 3349 *frm++ = IEEE80211_ELEMID_RATES; 3350 nrates = rs->ir_nrates; 3351 if (nrates > IEEE80211_RATE_SIZE) 3352 nrates = IEEE80211_RATE_SIZE; 3353 *frm++ = (uint8_t)nrates; 3354 (void) memcpy(frm, rs->ir_rates, nrates); 3355 frm += nrates; 3356 3357 /* supported xrates IE */ 3358 if (rs->ir_nrates > IEEE80211_RATE_SIZE) { 3359 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE; 3360 *frm++ = IEEE80211_ELEMID_XRATES; 3361 *frm++ = (uint8_t)nrates; 3362 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates); 3363 frm += nrates; 3364 } 3365 3366 /* optionnal IE (usually for wpa) */ 3367 if (ic->ic_opt_ie != NULL) { 3368 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len); 3369 frm += ic->ic_opt_ie_len; 3370 } 3371 3372 /* setup length of probe request */ 3373 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh)); 3374 hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) + 3375 hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t); 3376 3377 /* 3378 * the attribute of the scan channels are required after the probe 3379 * request frame. 3380 */ 3381 chan = (iwk_scan_chan_t *)frm; 3382 for (i = 1; i <= hdr->nchan; i++, chan++) { 3383 if (ic->ic_des_esslen) { 3384 chan->type = 3; 3385 } else { 3386 chan->type = 1; 3387 } 3388 3389 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3390 chan->tpc.tx_gain = 0x3f; 3391 chan->tpc.dsp_atten = 110; 3392 chan->active_dwell = LE_16(50); 3393 chan->passive_dwell = LE_16(120); 3394 3395 frm += sizeof (iwk_scan_chan_t); 3396 } 3397 3398 pktlen = _PTRDIFF(frm, cmd); 3399 3400 (void) memset(desc, 0, sizeof (*desc)); 3401 desc->val0 = LE_32(1 << 24); 3402 desc->pa[0].tb1_addr = 3403 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff); 3404 desc->pa[0].val1 = (pktlen << 4) & 0xfff0; 3405 3406 /* 3407 * maybe for cmd, filling the byte cnt table is not necessary. 3408 * anyway, we fill it here. 3409 */ 3410 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3411 tfd_offset[ring->cur].val = 8; 3412 if (ring->cur < IWK_MAX_WIN_SIZE) { 3413 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3414 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3415 } 3416 3417 /* kick cmd ring */ 3418 ring->cur = (ring->cur + 1) % ring->count; 3419 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3420 3421 return (IWK_SUCCESS); 3422 } 3423 3424 static int 3425 iwk_config(iwk_sc_t *sc) 3426 { 3427 ieee80211com_t *ic = &sc->sc_ic; 3428 iwk_powertable_cmd_t powertable; 3429 iwk_bt_cmd_t bt; 3430 iwk_add_sta_t node; 3431 iwk_link_quality_cmd_t link_quality; 3432 int i, err; 3433 uint16_t masks = 0; 3434 3435 /* 3436 * set power mode. Disable power management at present, do it later 3437 */ 3438 (void) memset(&powertable, 0, sizeof (powertable)); 3439 powertable.flags = LE_16(0x8); 3440 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable, 3441 sizeof (powertable), 0); 3442 if (err != IWK_SUCCESS) { 3443 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n"); 3444 return (err); 3445 } 3446 3447 /* configure bt coexistence */ 3448 (void) memset(&bt, 0, sizeof (bt)); 3449 bt.flags = 3; 3450 bt.lead_time = 0xaa; 3451 bt.max_kill = 1; 3452 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt, 3453 sizeof (bt), 0); 3454 if (err != IWK_SUCCESS) { 3455 cmn_err(CE_WARN, 3456 "iwk_config(): " 3457 "failed to configurate bt coexistence\n"); 3458 return (err); 3459 } 3460 3461 /* configure rxon */ 3462 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t)); 3463 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 3464 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 3465 sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3466 sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | 3467 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK); 3468 sc->sc_config.flags &= (~RXON_FLG_CCK_MSK); 3469 switch (ic->ic_opmode) { 3470 case IEEE80211_M_STA: 3471 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 3472 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3473 RXON_FILTER_DIS_DECRYPT_MSK | 3474 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3475 break; 3476 case IEEE80211_M_AHDEMO: 3477 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 3478 sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3479 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3480 RXON_FILTER_DIS_DECRYPT_MSK | 3481 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3482 break; 3483 case IEEE80211_M_HOSTAP: 3484 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 3485 break; 3486 case IEEE80211_M_MONITOR: 3487 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 3488 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3489 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); 3490 break; 3491 } 3492 sc->sc_config.cck_basic_rates = 0x0f; 3493 sc->sc_config.ofdm_basic_rates = 0xff; 3494 3495 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff; 3496 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff; 3497 3498 /* set antenna */ 3499 3500 sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3501 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3502 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3503 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3504 3505 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3506 sizeof (iwk_rxon_cmd_t), 0); 3507 if (err != IWK_SUCCESS) { 3508 cmn_err(CE_WARN, "iwk_config(): " 3509 "failed to set configure command\n"); 3510 return (err); 3511 } 3512 /* obtain current temperature of chipset */ 3513 sc->sc_tempera = iwk_curr_tempera(sc); 3514 3515 /* make Tx power calibration to determine the gains of DSP and radio */ 3516 err = iwk_tx_power_calibration(sc); 3517 if (err) { 3518 cmn_err(CE_WARN, "iwk_config(): " 3519 "failed to set tx power table\n"); 3520 return (err); 3521 } 3522 3523 /* add broadcast node so that we can send broadcast frame */ 3524 (void) memset(&node, 0, sizeof (node)); 3525 (void) memset(node.bssid, 0xff, 6); 3526 node.id = IWK_BROADCAST_ID; 3527 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0); 3528 if (err != IWK_SUCCESS) { 3529 cmn_err(CE_WARN, "iwk_config(): " 3530 "failed to add broadcast node\n"); 3531 return (err); 3532 } 3533 3534 /* TX_LINK_QUALITY cmd ? */ 3535 (void) memset(&link_quality, 0, sizeof (link_quality)); 3536 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3537 masks |= RATE_MCS_CCK_MSK; 3538 masks |= RATE_MCS_ANT_B_MSK; 3539 masks &= ~RATE_MCS_ANT_A_MSK; 3540 link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks; 3541 } 3542 3543 link_quality.general_params.single_stream_ant_msk = 2; 3544 link_quality.general_params.dual_stream_ant_msk = 3; 3545 link_quality.agg_params.agg_dis_start_th = 3; 3546 link_quality.agg_params.agg_time_limit = LE_16(4000); 3547 link_quality.sta_id = IWK_BROADCAST_ID; 3548 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3549 sizeof (link_quality), 0); 3550 if (err != IWK_SUCCESS) { 3551 cmn_err(CE_WARN, "iwk_config(): " 3552 "failed to config link quality table\n"); 3553 return (err); 3554 } 3555 3556 return (IWK_SUCCESS); 3557 } 3558 3559 static void 3560 iwk_stop_master(iwk_sc_t *sc) 3561 { 3562 uint32_t tmp; 3563 int n; 3564 3565 tmp = IWK_READ(sc, CSR_RESET); 3566 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER); 3567 3568 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3569 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) == 3570 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) 3571 return; 3572 3573 for (n = 0; n < 2000; n++) { 3574 if (IWK_READ(sc, CSR_RESET) & 3575 CSR_RESET_REG_FLAG_MASTER_DISABLED) 3576 break; 3577 DELAY(1000); 3578 } 3579 if (n == 2000) 3580 IWK_DBG((IWK_DEBUG_HW, 3581 "timeout waiting for master stop\n")); 3582 } 3583 3584 static int 3585 iwk_power_up(iwk_sc_t *sc) 3586 { 3587 uint32_t tmp; 3588 3589 iwk_mac_access_enter(sc); 3590 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3591 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC; 3592 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN; 3593 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3594 iwk_mac_access_exit(sc); 3595 3596 DELAY(5000); 3597 return (IWK_SUCCESS); 3598 } 3599 3600 static int 3601 iwk_preinit(iwk_sc_t *sc) 3602 { 3603 uint32_t tmp; 3604 int n; 3605 uint8_t vlink; 3606 3607 /* clear any pending interrupts */ 3608 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3609 3610 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS); 3611 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS, 3612 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 3613 3614 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3615 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3616 3617 /* wait for clock ready */ 3618 for (n = 0; n < 1000; n++) { 3619 if (IWK_READ(sc, CSR_GP_CNTRL) & 3620 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) 3621 break; 3622 DELAY(10); 3623 } 3624 if (n == 1000) { 3625 cmn_err(CE_WARN, 3626 "iwk_preinit(): timeout waiting for clock ready\n"); 3627 return (ETIMEDOUT); 3628 } 3629 iwk_mac_access_enter(sc); 3630 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG); 3631 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp | 3632 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT); 3633 3634 DELAY(20); 3635 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT); 3636 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp | 3637 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE); 3638 iwk_mac_access_exit(sc); 3639 3640 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */ 3641 3642 (void) iwk_power_up(sc); 3643 3644 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) { 3645 tmp = ddi_get32(sc->sc_cfg_handle, 3646 (uint32_t *)(sc->sc_cfg_base + 0xe8)); 3647 ddi_put32(sc->sc_cfg_handle, 3648 (uint32_t *)(sc->sc_cfg_base + 0xe8), 3649 tmp & ~(1 << 11)); 3650 } 3651 3652 3653 vlink = ddi_get8(sc->sc_cfg_handle, 3654 (uint8_t *)(sc->sc_cfg_base + 0xf0)); 3655 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0), 3656 vlink & ~2); 3657 3658 tmp = IWK_READ(sc, CSR_SW_VER); 3659 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 3660 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 3661 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R; 3662 IWK_WRITE(sc, CSR_SW_VER, tmp); 3663 3664 /* make sure power supply on each part of the hardware */ 3665 iwk_mac_access_enter(sc); 3666 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3667 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3668 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3669 DELAY(5); 3670 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3671 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3672 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3673 iwk_mac_access_exit(sc); 3674 return (IWK_SUCCESS); 3675 } 3676 3677 /* 3678 * set up semphore flag to own EEPROM 3679 */ 3680 static int iwk_eep_sem_down(iwk_sc_t *sc) 3681 { 3682 int count1, count2; 3683 uint32_t tmp; 3684 3685 for (count1 = 0; count1 < 1000; count1++) { 3686 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3687 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3688 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM); 3689 3690 for (count2 = 0; count2 < 2; count2++) { 3691 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) & 3692 CSR_HW_IF_CONFIG_REG_EEP_SEM) 3693 return (IWK_SUCCESS); 3694 DELAY(10000); 3695 } 3696 } 3697 return (IWK_FAIL); 3698 } 3699 3700 /* 3701 * reset semphore flag to release EEPROM 3702 */ 3703 static void iwk_eep_sem_up(iwk_sc_t *sc) 3704 { 3705 uint32_t tmp; 3706 3707 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3708 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3709 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM)); 3710 } 3711 3712 /* 3713 * This function load all infomation in eeprom into iwk_eep 3714 * structure in iwk_sc_t structure 3715 */ 3716 static int iwk_eep_load(iwk_sc_t *sc) 3717 { 3718 int i, rr; 3719 uint32_t rv, tmp, eep_gp; 3720 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map); 3721 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map; 3722 3723 /* read eeprom gp register in CSR */ 3724 eep_gp = IWK_READ(sc, CSR_EEPROM_GP); 3725 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) == 3726 CSR_EEPROM_GP_BAD_SIGNATURE) { 3727 cmn_err(CE_WARN, "EEPROM not found\n"); 3728 return (IWK_FAIL); 3729 } 3730 3731 rr = iwk_eep_sem_down(sc); 3732 if (rr != 0) { 3733 cmn_err(CE_WARN, "failed to own EEPROM\n"); 3734 return (IWK_FAIL); 3735 } 3736 3737 for (addr = 0; addr < eep_sz; addr += 2) { 3738 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1); 3739 tmp = IWK_READ(sc, CSR_EEPROM_REG); 3740 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2)); 3741 3742 for (i = 0; i < 10; i++) { 3743 rv = IWK_READ(sc, CSR_EEPROM_REG); 3744 if (rv & 1) 3745 break; 3746 DELAY(10); 3747 } 3748 3749 if (!(rv & 1)) { 3750 cmn_err(CE_WARN, "time out when read EEPROM\n"); 3751 iwk_eep_sem_up(sc); 3752 return (IWK_FAIL); 3753 } 3754 3755 eep_p[addr/2] = rv >> 16; 3756 } 3757 3758 iwk_eep_sem_up(sc); 3759 return (IWK_SUCCESS); 3760 } 3761 3762 /* 3763 * init mac address in ieee80211com_t struct 3764 */ 3765 static void iwk_get_mac_from_eep(iwk_sc_t *sc) 3766 { 3767 ieee80211com_t *ic = &sc->sc_ic; 3768 struct iwk_eep *ep = &sc->sc_eep_map; 3769 3770 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address); 3771 3772 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n", 3773 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2], 3774 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5])); 3775 } 3776 3777 static int 3778 iwk_init(iwk_sc_t *sc) 3779 { 3780 int qid, n, err; 3781 clock_t clk; 3782 uint32_t tmp; 3783 3784 mutex_enter(&sc->sc_glock); 3785 sc->sc_flags &= ~IWK_F_FW_INIT; 3786 3787 (void) iwk_preinit(sc); 3788 3789 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3790 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) { 3791 cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n"); 3792 goto fail1; 3793 } 3794 3795 /* init Rx ring */ 3796 iwk_mac_access_enter(sc); 3797 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 3798 3799 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 3800 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 3801 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8); 3802 3803 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG, 3804 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address + 3805 offsetof(struct iwk_shared, val0)) >> 4)); 3806 3807 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 3808 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 3809 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 3810 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 3811 (RX_QUEUE_SIZE_LOG << 3812 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 3813 iwk_mac_access_exit(sc); 3814 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 3815 (RX_QUEUE_SIZE - 1) & ~0x7); 3816 3817 /* init Tx rings */ 3818 iwk_mac_access_enter(sc); 3819 iwk_reg_write(sc, SCD_TXFACT, 0); 3820 3821 /* keep warm page */ 3822 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG, 3823 sc->sc_dma_kw.cookie.dmac_address >> 4); 3824 3825 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) { 3826 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid), 3827 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8); 3828 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid), 3829 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 3830 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 3831 } 3832 iwk_mac_access_exit(sc); 3833 3834 /* clear "radio off" and "disable command" bits */ 3835 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3836 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, 3837 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3838 3839 /* clear any pending interrupts */ 3840 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3841 3842 /* enable interrupts */ 3843 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 3844 3845 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3846 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3847 3848 /* 3849 * backup ucode data part for future use. 3850 */ 3851 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va, 3852 sc->sc_dma_fw_data.mem_va, 3853 sc->sc_dma_fw_data.alength); 3854 3855 for (n = 0; n < 2; n++) { 3856 /* load firmware init segment into NIC */ 3857 err = iwk_load_firmware(sc); 3858 if (err != IWK_SUCCESS) { 3859 cmn_err(CE_WARN, "iwk_init(): " 3860 "failed to setup boot firmware\n"); 3861 continue; 3862 } 3863 3864 /* now press "execute" start running */ 3865 IWK_WRITE(sc, CSR_RESET, 0); 3866 break; 3867 } 3868 if (n == 2) { 3869 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n"); 3870 goto fail1; 3871 } 3872 /* ..and wait at most one second for adapter to initialize */ 3873 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3874 while (!(sc->sc_flags & IWK_F_FW_INIT)) { 3875 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0) 3876 break; 3877 } 3878 if (!(sc->sc_flags & IWK_F_FW_INIT)) { 3879 cmn_err(CE_WARN, 3880 "iwk_init(): timeout waiting for firmware init\n"); 3881 goto fail1; 3882 } 3883 3884 /* 3885 * at this point, the firmware is loaded OK, then config the hardware 3886 * with the ucode API, including rxon, txpower, etc. 3887 */ 3888 err = iwk_config(sc); 3889 if (err) { 3890 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n"); 3891 goto fail1; 3892 } 3893 3894 /* at this point, hardware may receive beacons :) */ 3895 mutex_exit(&sc->sc_glock); 3896 return (IWK_SUCCESS); 3897 3898 fail1: 3899 err = IWK_FAIL; 3900 mutex_exit(&sc->sc_glock); 3901 return (err); 3902 } 3903 3904 static void 3905 iwk_stop(iwk_sc_t *sc) 3906 { 3907 uint32_t tmp; 3908 int i; 3909 3910 if (!(sc->sc_flags & IWK_F_QUIESCED)) 3911 mutex_enter(&sc->sc_glock); 3912 3913 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3914 /* disable interrupts */ 3915 IWK_WRITE(sc, CSR_INT_MASK, 0); 3916 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK); 3917 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff); 3918 3919 /* reset all Tx rings */ 3920 for (i = 0; i < IWK_NUM_QUEUES; i++) 3921 iwk_reset_tx_ring(sc, &sc->sc_txq[i]); 3922 3923 /* reset Rx ring */ 3924 iwk_reset_rx_ring(sc); 3925 3926 iwk_mac_access_enter(sc); 3927 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT); 3928 iwk_mac_access_exit(sc); 3929 3930 DELAY(5); 3931 3932 iwk_stop_master(sc); 3933 3934 sc->sc_tx_timer = 0; 3935 tmp = IWK_READ(sc, CSR_RESET); 3936 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET); 3937 3938 if (!(sc->sc_flags & IWK_F_QUIESCED)) 3939 mutex_exit(&sc->sc_glock); 3940 } 3941 3942 /* 3943 * Naive implementation of the Adaptive Multi Rate Retry algorithm: 3944 * "IEEE 802.11 Rate Adaptation: A Practical Approach" 3945 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti 3946 * INRIA Sophia - Projet Planete 3947 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html 3948 */ 3949 #define is_success(amrr) \ 3950 ((amrr)->retrycnt < (amrr)->txcnt / 10) 3951 #define is_failure(amrr) \ 3952 ((amrr)->retrycnt > (amrr)->txcnt / 3) 3953 #define is_enough(amrr) \ 3954 ((amrr)->txcnt > 100) 3955 #define is_min_rate(in) \ 3956 ((in)->in_txrate == 0) 3957 #define is_max_rate(in) \ 3958 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1) 3959 #define increase_rate(in) \ 3960 ((in)->in_txrate++) 3961 #define decrease_rate(in) \ 3962 ((in)->in_txrate--) 3963 #define reset_cnt(amrr) \ 3964 { (amrr)->txcnt = (amrr)->retrycnt = 0; } 3965 3966 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1 3967 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15 3968 3969 static void 3970 iwk_amrr_init(iwk_amrr_t *amrr) 3971 { 3972 amrr->success = 0; 3973 amrr->recovery = 0; 3974 amrr->txcnt = amrr->retrycnt = 0; 3975 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD; 3976 } 3977 3978 static void 3979 iwk_amrr_timeout(iwk_sc_t *sc) 3980 { 3981 ieee80211com_t *ic = &sc->sc_ic; 3982 3983 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n")); 3984 if (ic->ic_opmode == IEEE80211_M_STA) 3985 iwk_amrr_ratectl(NULL, ic->ic_bss); 3986 else 3987 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL); 3988 sc->sc_clk = ddi_get_lbolt(); 3989 } 3990 3991 /* ARGSUSED */ 3992 static void 3993 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in) 3994 { 3995 iwk_amrr_t *amrr = (iwk_amrr_t *)in; 3996 int need_change = 0; 3997 3998 if (is_success(amrr) && is_enough(amrr)) { 3999 amrr->success++; 4000 if (amrr->success >= amrr->success_threshold && 4001 !is_max_rate(in)) { 4002 amrr->recovery = 1; 4003 amrr->success = 0; 4004 increase_rate(in); 4005 IWK_DBG((IWK_DEBUG_RATECTL, 4006 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n", 4007 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4008 need_change = 1; 4009 } else { 4010 amrr->recovery = 0; 4011 } 4012 } else if (is_failure(amrr)) { 4013 amrr->success = 0; 4014 if (!is_min_rate(in)) { 4015 if (amrr->recovery) { 4016 amrr->success_threshold++; 4017 if (amrr->success_threshold > 4018 IWK_AMRR_MAX_SUCCESS_THRESHOLD) 4019 amrr->success_threshold = 4020 IWK_AMRR_MAX_SUCCESS_THRESHOLD; 4021 } else { 4022 amrr->success_threshold = 4023 IWK_AMRR_MIN_SUCCESS_THRESHOLD; 4024 } 4025 decrease_rate(in); 4026 IWK_DBG((IWK_DEBUG_RATECTL, 4027 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n", 4028 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4029 need_change = 1; 4030 } 4031 amrr->recovery = 0; /* paper is incorrect */ 4032 } 4033 4034 if (is_enough(amrr) || need_change) 4035 reset_cnt(amrr); 4036 } 4037 4038 /* 4039 * calculate 4965 chipset's kelvin temperature according to 4040 * the data of init alive and satistics notification. 4041 * The details is described in iwk_calibration.h file 4042 */ 4043 static int32_t iwk_curr_tempera(iwk_sc_t *sc) 4044 { 4045 int32_t tempera; 4046 int32_t r1, r2, r3; 4047 uint32_t r4_u; 4048 int32_t r4_s; 4049 4050 if (iwk_is_fat_channel(sc)) { 4051 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]); 4052 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]); 4053 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]); 4054 r4_u = sc->sc_card_alive_init.therm_r4[1]; 4055 } else { 4056 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]); 4057 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]); 4058 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]); 4059 r4_u = sc->sc_card_alive_init.therm_r4[0]; 4060 } 4061 4062 if (sc->sc_flags & IWK_F_STATISTICS) { 4063 r4_s = (int32_t)(sc->sc_statistics.general.temperature << 4064 (31-23)) >> (31-23); 4065 } else { 4066 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23); 4067 } 4068 4069 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n", 4070 r1, r2, r3, r4_s)); 4071 4072 if (r3 == r1) { 4073 cmn_err(CE_WARN, "iwk_curr_tempera(): " 4074 "failed to calculate temperature" 4075 "because r3 = r1\n"); 4076 return (DDI_FAILURE); 4077 } 4078 4079 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2); 4080 tempera /= (r3 - r1); 4081 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; 4082 4083 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n", 4084 tempera, KELVIN_TO_CELSIUS(tempera))); 4085 4086 return (tempera); 4087 } 4088 4089 /* Determine whether 4965 is using 2.4 GHz band */ 4090 static inline int iwk_is_24G_band(iwk_sc_t *sc) 4091 { 4092 return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK); 4093 } 4094 4095 /* Determine whether 4965 is using fat channel */ 4096 static inline int iwk_is_fat_channel(iwk_sc_t *sc) 4097 { 4098 return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 4099 (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK)); 4100 } 4101 4102 /* 4103 * In MIMO mode, determine which group 4965's current channel belong to. 4104 * For more infomation about "channel group", 4105 * please refer to iwk_calibration.h file 4106 */ 4107 static int iwk_txpower_grp(uint16_t channel) 4108 { 4109 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH && 4110 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) { 4111 return (CALIB_CH_GROUP_5); 4112 } 4113 4114 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH && 4115 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) { 4116 return (CALIB_CH_GROUP_1); 4117 } 4118 4119 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH && 4120 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) { 4121 return (CALIB_CH_GROUP_2); 4122 } 4123 4124 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH && 4125 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) { 4126 return (CALIB_CH_GROUP_3); 4127 } 4128 4129 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH && 4130 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) { 4131 return (CALIB_CH_GROUP_4); 4132 } 4133 4134 cmn_err(CE_WARN, "iwk_txpower_grp(): " 4135 "can't find txpower group for channel %d.\n", channel); 4136 4137 return (DDI_FAILURE); 4138 } 4139 4140 /* 2.4 GHz */ 4141 static uint16_t iwk_eep_band_1[14] = { 4142 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 4143 }; 4144 4145 /* 5.2 GHz bands */ 4146 static uint16_t iwk_eep_band_2[13] = { 4147 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 4148 }; 4149 4150 static uint16_t iwk_eep_band_3[12] = { 4151 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 4152 }; 4153 4154 static uint16_t iwk_eep_band_4[11] = { 4155 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 4156 }; 4157 4158 static uint16_t iwk_eep_band_5[6] = { 4159 145, 149, 153, 157, 161, 165 4160 }; 4161 4162 static uint16_t iwk_eep_band_6[7] = { 4163 1, 2, 3, 4, 5, 6, 7 4164 }; 4165 4166 static uint16_t iwk_eep_band_7[11] = { 4167 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 4168 }; 4169 4170 /* Get regulatory data from eeprom for a given channel */ 4171 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 4172 uint16_t channel, 4173 int is_24G, int is_fat, int is_hi_chan) 4174 { 4175 int32_t i; 4176 uint16_t chan; 4177 4178 if (is_fat) { /* 11n mode */ 4179 4180 if (is_hi_chan) { 4181 chan = channel - 4; 4182 } else { 4183 chan = channel; 4184 } 4185 4186 for (i = 0; i < 7; i++) { 4187 if (iwk_eep_band_6[i] == chan) { 4188 return (&sc->sc_eep_map.band_24_channels[i]); 4189 } 4190 } 4191 for (i = 0; i < 11; i++) { 4192 if (iwk_eep_band_7[i] == chan) { 4193 return (&sc->sc_eep_map.band_52_channels[i]); 4194 } 4195 } 4196 } else if (is_24G) { /* 2.4 GHz band */ 4197 for (i = 0; i < 14; i++) { 4198 if (iwk_eep_band_1[i] == channel) { 4199 return (&sc->sc_eep_map.band_1_channels[i]); 4200 } 4201 } 4202 } else { /* 5 GHz band */ 4203 for (i = 0; i < 13; i++) { 4204 if (iwk_eep_band_2[i] == channel) { 4205 return (&sc->sc_eep_map.band_2_channels[i]); 4206 } 4207 } 4208 for (i = 0; i < 12; i++) { 4209 if (iwk_eep_band_3[i] == channel) { 4210 return (&sc->sc_eep_map.band_3_channels[i]); 4211 } 4212 } 4213 for (i = 0; i < 11; i++) { 4214 if (iwk_eep_band_4[i] == channel) { 4215 return (&sc->sc_eep_map.band_4_channels[i]); 4216 } 4217 } 4218 for (i = 0; i < 6; i++) { 4219 if (iwk_eep_band_5[i] == channel) { 4220 return (&sc->sc_eep_map.band_5_channels[i]); 4221 } 4222 } 4223 } 4224 4225 return (NULL); 4226 } 4227 4228 /* 4229 * Determine which subband a given channel belongs 4230 * to in 2.4 GHz or 5 GHz band 4231 */ 4232 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel) 4233 { 4234 int32_t b_n = -1; 4235 4236 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) { 4237 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) { 4238 continue; 4239 } 4240 4241 if ((channel >= 4242 (uint16_t)sc->sc_eep_map.calib_info. 4243 band_info_tbl[b_n].ch_from) && 4244 (channel <= 4245 (uint16_t)sc->sc_eep_map.calib_info. 4246 band_info_tbl[b_n].ch_to)) { 4247 break; 4248 } 4249 } 4250 4251 return (b_n); 4252 } 4253 4254 /* Make a special division for interpolation operation */ 4255 static int iwk_division(int32_t num, int32_t denom, int32_t *res) 4256 { 4257 int32_t sign = 1; 4258 4259 if (num < 0) { 4260 sign = -sign; 4261 num = -num; 4262 } 4263 4264 if (denom < 0) { 4265 sign = -sign; 4266 denom = -denom; 4267 } 4268 4269 *res = ((num*2 + denom) / (denom*2)) * sign; 4270 4271 return (IWK_SUCCESS); 4272 } 4273 4274 /* Make interpolation operation */ 4275 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 4276 int32_t x2, int32_t y2) 4277 { 4278 int32_t val; 4279 4280 if (x2 == x1) { 4281 return (y1); 4282 } else { 4283 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val); 4284 return (val + y2); 4285 } 4286 } 4287 4288 /* Get interpolation measurement data of a given channel for all chains. */ 4289 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 4290 struct iwk_eep_calib_channel_info *chan_info) 4291 { 4292 int32_t ban_n; 4293 uint32_t ch1_n, ch2_n; 4294 int32_t c, m; 4295 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p; 4296 4297 /* determine subband number */ 4298 ban_n = iwk_band_number(sc, channel); 4299 if (ban_n >= EEP_TX_POWER_BANDS) { 4300 return (DDI_FAILURE); 4301 } 4302 4303 ch1_n = 4304 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num; 4305 ch2_n = 4306 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num; 4307 4308 chan_info->ch_num = (uint8_t)channel; /* given channel number */ 4309 4310 /* 4311 * go through all chains on chipset 4312 */ 4313 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) { 4314 /* 4315 * go through all factory measurements 4316 */ 4317 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) { 4318 m1_p = 4319 &(sc->sc_eep_map.calib_info. 4320 band_info_tbl[ban_n].ch1.measure[c][m]); 4321 m2_p = 4322 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n]. 4323 ch2.measure[c][m]); 4324 m_p = &(chan_info->measure[c][m]); 4325 4326 /* 4327 * make interpolation to get actual 4328 * Tx power for given channel 4329 */ 4330 m_p->actual_pow = iwk_interpolate_value(channel, 4331 ch1_n, m1_p->actual_pow, 4332 ch2_n, m2_p->actual_pow); 4333 4334 /* make interpolation to get index into gain table */ 4335 m_p->gain_idx = iwk_interpolate_value(channel, 4336 ch1_n, m1_p->gain_idx, 4337 ch2_n, m2_p->gain_idx); 4338 4339 /* make interpolation to get chipset temperature */ 4340 m_p->temperature = iwk_interpolate_value(channel, 4341 ch1_n, m1_p->temperature, 4342 ch2_n, m2_p->temperature); 4343 4344 /* 4345 * make interpolation to get power 4346 * amp detector level 4347 */ 4348 m_p->pa_det = iwk_interpolate_value(channel, ch1_n, 4349 m1_p->pa_det, 4350 ch2_n, m2_p->pa_det); 4351 } 4352 } 4353 4354 return (IWK_SUCCESS); 4355 } 4356 4357 /* 4358 * Calculate voltage compensation for Tx power. For more infomation, 4359 * please refer to iwk_calibration.h file 4360 */ 4361 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 4362 int32_t curr_voltage) 4363 { 4364 int32_t vol_comp = 0; 4365 4366 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) || 4367 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) { 4368 return (vol_comp); 4369 } 4370 4371 (void) iwk_division(curr_voltage-eep_voltage, 4372 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp); 4373 4374 if (curr_voltage > eep_voltage) { 4375 vol_comp *= 2; 4376 } 4377 if ((vol_comp < -2) || (vol_comp > 2)) { 4378 vol_comp = 0; 4379 } 4380 4381 return (vol_comp); 4382 } 4383 4384 /* 4385 * Thermal compensation values for txpower for various frequency ranges ... 4386 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust 4387 */ 4388 static struct iwk_txpower_tempera_comp { 4389 int32_t degrees_per_05db_a; 4390 int32_t degrees_per_05db_a_denom; 4391 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = { 4392 {9, 2}, /* group 0 5.2, ch 34-43 */ 4393 {4, 1}, /* group 1 5.2, ch 44-70 */ 4394 {4, 1}, /* group 2 5.2, ch 71-124 */ 4395 {4, 1}, /* group 3 5.2, ch 125-200 */ 4396 {3, 1} /* group 4 2.4, ch all */ 4397 }; 4398 4399 /* 4400 * bit-rate-dependent table to prevent Tx distortion, in half-dB units, 4401 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. 4402 */ 4403 static int32_t back_off_table[] = { 4404 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ 4405 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ 4406 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ 4407 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ 4408 10 /* CCK */ 4409 }; 4410 4411 /* determine minimum Tx power index in gain table */ 4412 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G) 4413 { 4414 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) { 4415 return (MIN_TX_GAIN_INDEX_52GHZ_EXT); 4416 } 4417 4418 return (MIN_TX_GAIN_INDEX); 4419 } 4420 4421 /* 4422 * Determine DSP and radio gain according to temperature and other factors. 4423 * This function is the majority of Tx power calibration 4424 */ 4425 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 4426 struct iwk_tx_power_db *tp_db) 4427 { 4428 int is_24G, is_fat, is_high_chan, is_mimo; 4429 int c, r; 4430 int32_t target_power; 4431 int32_t tx_grp = CALIB_CH_GROUP_MAX; 4432 uint16_t channel; 4433 uint8_t saturation_power; 4434 int32_t regu_power; 4435 int32_t curr_regu_power; 4436 struct iwk_eep_channel *eep_chan_p; 4437 struct iwk_eep_calib_channel_info eep_chan_calib; 4438 int32_t eep_voltage, init_voltage; 4439 int32_t voltage_compensation; 4440 int32_t temperature; 4441 int32_t degrees_per_05db_num; 4442 int32_t degrees_per_05db_denom; 4443 struct iwk_eep_calib_measure *measure_p; 4444 int32_t interpo_temp; 4445 int32_t power_limit; 4446 int32_t atten_value; 4447 int32_t tempera_comp[2]; 4448 int32_t interpo_gain_idx[2]; 4449 int32_t interpo_actual_pow[2]; 4450 union iwk_tx_power_dual_stream txpower_gains; 4451 int32_t txpower_gains_idx; 4452 4453 channel = sc->sc_config.chan; 4454 4455 /* 2.4 GHz or 5 GHz band */ 4456 is_24G = iwk_is_24G_band(sc); 4457 4458 /* fat channel or not */ 4459 is_fat = iwk_is_fat_channel(sc); 4460 4461 /* 4462 * using low half channel number or high half channel number 4463 * identify fat channel 4464 */ 4465 if (is_fat && (sc->sc_config.flags & 4466 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) { 4467 is_high_chan = 1; 4468 } 4469 4470 if ((channel > 0) && (channel < 200)) { 4471 /* get regulatory channel data from eeprom */ 4472 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G, 4473 is_fat, is_high_chan); 4474 if (NULL == eep_chan_p) { 4475 cmn_err(CE_WARN, 4476 "iwk_txpower_table_cmd_init(): " 4477 "can't get channel infomation\n"); 4478 return (DDI_FAILURE); 4479 } 4480 } else { 4481 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4482 "channel(%d) isn't in proper range\n", 4483 channel); 4484 return (DDI_FAILURE); 4485 } 4486 4487 /* initial value of Tx power */ 4488 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg; 4489 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) { 4490 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4491 "user TX power is too weak\n"); 4492 return (DDI_FAILURE); 4493 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) { 4494 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4495 "user TX power is too strong\n"); 4496 return (DDI_FAILURE); 4497 } 4498 4499 target_power = 2 * sc->sc_user_txpower; 4500 4501 /* determine which group current channel belongs to */ 4502 tx_grp = iwk_txpower_grp(channel); 4503 if (tx_grp < 0) { 4504 return (tx_grp); 4505 } 4506 4507 4508 if (is_fat) { 4509 if (is_high_chan) { 4510 channel -= 2; 4511 } else { 4512 channel += 2; 4513 } 4514 } 4515 4516 /* determine saturation power */ 4517 if (is_24G) { 4518 saturation_power = 4519 sc->sc_eep_map.calib_info.saturation_power24; 4520 } else { 4521 saturation_power = 4522 sc->sc_eep_map.calib_info.saturation_power52; 4523 } 4524 4525 if (saturation_power < IWK_TX_POWER_SATURATION_MIN || 4526 saturation_power > IWK_TX_POWER_SATURATION_MAX) { 4527 if (is_24G) { 4528 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24; 4529 } else { 4530 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52; 4531 } 4532 } 4533 4534 /* determine regulatory power */ 4535 regu_power = (int32_t)eep_chan_p->max_power_avg * 2; 4536 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) || 4537 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) { 4538 if (is_24G) { 4539 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24; 4540 } else { 4541 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52; 4542 } 4543 } 4544 4545 /* 4546 * get measurement data for current channel 4547 * suach as temperature,index to gain table,actual Tx power 4548 */ 4549 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib); 4550 4551 eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage; 4552 init_voltage = (int32_t)sc->sc_card_alive_init.voltage; 4553 4554 /* calculate voltage compensation to Tx power */ 4555 voltage_compensation = 4556 iwk_voltage_compensation(eep_voltage, init_voltage); 4557 4558 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) { 4559 temperature = sc->sc_tempera; 4560 } else { 4561 temperature = IWK_TX_POWER_TEMPERATURE_MIN; 4562 } 4563 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) { 4564 temperature = sc->sc_tempera; 4565 } else { 4566 temperature = IWK_TX_POWER_TEMPERATURE_MAX; 4567 } 4568 temperature = KELVIN_TO_CELSIUS(temperature); 4569 4570 degrees_per_05db_num = 4571 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a; 4572 degrees_per_05db_denom = 4573 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom; 4574 4575 for (c = 0; c < 2; c++) { /* go through all chains */ 4576 measure_p = &eep_chan_calib.measure[c][1]; 4577 interpo_temp = measure_p->temperature; 4578 4579 /* determine temperature compensation to Tx power */ 4580 (void) iwk_division( 4581 (temperature-interpo_temp)*degrees_per_05db_denom, 4582 degrees_per_05db_num, &tempera_comp[c]); 4583 4584 interpo_gain_idx[c] = measure_p->gain_idx; 4585 interpo_actual_pow[c] = measure_p->actual_pow; 4586 } 4587 4588 /* 4589 * go through all rate entries in Tx power table 4590 */ 4591 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) { 4592 if (r & 0x8) { 4593 /* need to lower regulatory power for MIMO mode */ 4594 curr_regu_power = regu_power - 4595 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION; 4596 is_mimo = 1; 4597 } else { 4598 curr_regu_power = regu_power; 4599 is_mimo = 0; 4600 } 4601 4602 power_limit = saturation_power - back_off_table[r]; 4603 if (power_limit > curr_regu_power) { 4604 /* final Tx power limit */ 4605 power_limit = curr_regu_power; 4606 } 4607 4608 if (target_power > power_limit) { 4609 target_power = power_limit; /* final target Tx power */ 4610 } 4611 4612 for (c = 0; c < 2; c++) { /* go through all Tx chains */ 4613 if (is_mimo) { 4614 atten_value = 4615 sc->sc_card_alive_init.tx_atten[tx_grp][c]; 4616 } else { 4617 atten_value = 0; 4618 } 4619 4620 /* 4621 * calculate index in gain table 4622 * this step is very important 4623 */ 4624 txpower_gains_idx = interpo_gain_idx[c] - 4625 (target_power - interpo_actual_pow[c]) - 4626 tempera_comp[c] - voltage_compensation + 4627 atten_value; 4628 4629 if (txpower_gains_idx < 4630 iwk_min_power_index(r, is_24G)) { 4631 txpower_gains_idx = 4632 iwk_min_power_index(r, is_24G); 4633 } 4634 4635 if (!is_24G) { 4636 /* 4637 * support negative index for 5 GHz 4638 * band 4639 */ 4640 txpower_gains_idx += 9; 4641 } 4642 4643 if (POWER_TABLE_CCK_ENTRY == r) { 4644 /* for CCK mode, make necessary attenuaton */ 4645 txpower_gains_idx += 4646 IWK_TX_POWER_CCK_COMPENSATION_C_STEP; 4647 } 4648 4649 if (txpower_gains_idx > 107) { 4650 txpower_gains_idx = 107; 4651 } else if (txpower_gains_idx < 0) { 4652 txpower_gains_idx = 0; 4653 } 4654 4655 /* search DSP and radio gains in gain table */ 4656 txpower_gains.s.radio_tx_gain[c] = 4657 gains_table[is_24G][txpower_gains_idx].radio; 4658 txpower_gains.s.dsp_predis_atten[c] = 4659 gains_table[is_24G][txpower_gains_idx].dsp; 4660 4661 IWK_DBG((IWK_DEBUG_CALIBRATION, 4662 "rate_index: %d, " 4663 "gain_index %d, c: %d,is_mimo: %d\n", 4664 r, txpower_gains_idx, c, is_mimo)); 4665 } 4666 4667 /* initialize Tx power table */ 4668 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) { 4669 tp_db->ht_ofdm_power[r].dw = txpower_gains.dw; 4670 } else { 4671 tp_db->legacy_cck_power.dw = txpower_gains.dw; 4672 } 4673 } 4674 4675 return (IWK_SUCCESS); 4676 } 4677 4678 /* 4679 * make Tx power calibration to adjust Tx power. 4680 * This is completed by sending out Tx power table command. 4681 */ 4682 static int iwk_tx_power_calibration(iwk_sc_t *sc) 4683 { 4684 iwk_tx_power_table_cmd_t cmd; 4685 int rv; 4686 4687 if (sc->sc_flags & IWK_F_SCANNING) { 4688 return (IWK_SUCCESS); 4689 } 4690 4691 /* necessary initialization to Tx power table command */ 4692 cmd.band = (uint8_t)iwk_is_24G_band(sc); 4693 cmd.channel = sc->sc_config.chan; 4694 cmd.channel_normal_width = 0; 4695 4696 /* initialize Tx power table */ 4697 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power); 4698 if (rv) { 4699 cmn_err(CE_NOTE, "rv= %d\n", rv); 4700 return (rv); 4701 } 4702 4703 /* send out Tx power table command */ 4704 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1); 4705 if (rv) { 4706 return (rv); 4707 } 4708 4709 /* record current temperature */ 4710 sc->sc_last_tempera = sc->sc_tempera; 4711 4712 return (IWK_SUCCESS); 4713 } 4714 4715 /* This function is the handler of statistics notification from uCode */ 4716 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc) 4717 { 4718 int is_diff; 4719 struct iwk_notif_statistics *statistics_p = 4720 (struct iwk_notif_statistics *)(desc + 1); 4721 4722 mutex_enter(&sc->sc_glock); 4723 4724 is_diff = (sc->sc_statistics.general.temperature != 4725 statistics_p->general.temperature) || 4726 ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) != 4727 (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)); 4728 4729 /* update statistics data */ 4730 (void) memcpy(&sc->sc_statistics, statistics_p, 4731 sizeof (struct iwk_notif_statistics)); 4732 4733 sc->sc_flags |= IWK_F_STATISTICS; 4734 4735 if (!(sc->sc_flags & IWK_F_SCANNING)) { 4736 /* make Receiver gain balance calibration */ 4737 (void) iwk_rxgain_diff(sc); 4738 4739 /* make Receiver sensitivity calibration */ 4740 (void) iwk_rx_sens(sc); 4741 } 4742 4743 4744 if (!is_diff) { 4745 mutex_exit(&sc->sc_glock); 4746 return; 4747 } 4748 4749 /* calibration current temperature of 4965 chipset */ 4750 sc->sc_tempera = iwk_curr_tempera(sc); 4751 4752 /* distinct temperature change will trigger Tx power calibration */ 4753 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) || 4754 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) { 4755 /* make Tx power calibration */ 4756 (void) iwk_tx_power_calibration(sc); 4757 } 4758 4759 mutex_exit(&sc->sc_glock); 4760 } 4761 4762 /* Determine this station is in associated state or not */ 4763 static int iwk_is_associated(iwk_sc_t *sc) 4764 { 4765 return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK); 4766 } 4767 4768 /* Make necessary preparation for Receiver gain balance calibration */ 4769 static int iwk_rxgain_diff_init(iwk_sc_t *sc) 4770 { 4771 int i, rv; 4772 struct iwk_calibration_cmd cmd; 4773 struct iwk_rx_gain_diff *gain_diff_p; 4774 4775 gain_diff_p = &sc->sc_rxgain_diff; 4776 4777 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff)); 4778 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd)); 4779 4780 for (i = 0; i < RX_CHAINS_NUM; i++) { 4781 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL; 4782 } 4783 4784 if (iwk_is_associated(sc)) { 4785 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4786 cmd.diff_gain_a = 0; 4787 cmd.diff_gain_b = 0; 4788 cmd.diff_gain_c = 0; 4789 4790 /* assume the gains of every Rx chains is balanceable */ 4791 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd, 4792 sizeof (cmd), 1); 4793 if (rv) { 4794 return (rv); 4795 } 4796 4797 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE; 4798 } 4799 4800 return (IWK_SUCCESS); 4801 } 4802 4803 /* 4804 * make Receiver gain balance to balance Rx gain between Rx chains 4805 * and determine which chain is disconnected 4806 */ 4807 static int iwk_rxgain_diff(iwk_sc_t *sc) 4808 { 4809 int i, is_24G, rv; 4810 int max_beacon_chain_n; 4811 int min_noise_chain_n; 4812 uint16_t channel_n; 4813 int32_t beacon_diff; 4814 int32_t noise_diff; 4815 uint32_t noise_chain_a, noise_chain_b, noise_chain_c; 4816 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c; 4817 struct iwk_calibration_cmd cmd; 4818 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4819 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4820 struct statistics_rx_non_phy *rx_general_p = 4821 &sc->sc_statistics.rx.general; 4822 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff; 4823 4824 if (INTERFERENCE_DATA_AVAILABLE != 4825 rx_general_p->interference_data_flag) { 4826 return (IWK_SUCCESS); 4827 } 4828 4829 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) { 4830 return (IWK_SUCCESS); 4831 } 4832 4833 is_24G = iwk_is_24G_band(sc); 4834 channel_n = sc->sc_config.chan; /* channel number */ 4835 4836 if ((channel_n != (sc->sc_statistics.flag >> 16)) || 4837 ((STATISTICS_REPLY_FLG_BAND_24G_MSK == 4838 (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && 4839 !is_24G)) { 4840 return (IWK_SUCCESS); 4841 } 4842 4843 /* Rx chain's noise strength from statistics notification */ 4844 noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF; 4845 noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF; 4846 noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF; 4847 4848 /* Rx chain's beacon strength from statistics notification */ 4849 beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF; 4850 beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF; 4851 beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF; 4852 4853 gain_diff_p->beacon_count++; 4854 4855 /* accumulate chain's noise strength */ 4856 gain_diff_p->noise_stren_a += noise_chain_a; 4857 gain_diff_p->noise_stren_b += noise_chain_b; 4858 gain_diff_p->noise_stren_c += noise_chain_c; 4859 4860 /* accumulate chain's beacon strength */ 4861 gain_diff_p->beacon_stren_a += beacon_chain_a; 4862 gain_diff_p->beacon_stren_b += beacon_chain_b; 4863 gain_diff_p->beacon_stren_c += beacon_chain_c; 4864 4865 if (BEACON_NUM_20 == gain_diff_p->beacon_count) { 4866 /* calculate average beacon strength */ 4867 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20; 4868 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20; 4869 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20; 4870 4871 /* calculate average noise strength */ 4872 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20; 4873 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4874 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4875 4876 /* determine maximum beacon strength among 3 chains */ 4877 if ((beacon_aver[0] >= beacon_aver[1]) && 4878 (beacon_aver[0] >= beacon_aver[2])) { 4879 max_beacon_chain_n = 0; 4880 gain_diff_p->connected_chains = 1 << 0; 4881 } else if (beacon_aver[1] >= beacon_aver[2]) { 4882 max_beacon_chain_n = 1; 4883 gain_diff_p->connected_chains = 1 << 1; 4884 } else { 4885 max_beacon_chain_n = 2; 4886 gain_diff_p->connected_chains = 1 << 2; 4887 } 4888 4889 /* determine which chain is disconnected */ 4890 for (i = 0; i < RX_CHAINS_NUM; i++) { 4891 if (i != max_beacon_chain_n) { 4892 beacon_diff = beacon_aver[max_beacon_chain_n] - 4893 beacon_aver[i]; 4894 if (beacon_diff > MAX_ALLOWED_DIFF) { 4895 gain_diff_p->disconnect_chain[i] = 1; 4896 } else { 4897 gain_diff_p->connected_chains |= 4898 (1 << i); 4899 } 4900 } 4901 } 4902 4903 /* 4904 * if chain A and B are both disconnected, 4905 * assume the stronger in beacon strength is connected 4906 */ 4907 if (gain_diff_p->disconnect_chain[0] && 4908 gain_diff_p->disconnect_chain[1]) { 4909 if (beacon_aver[0] >= beacon_aver[1]) { 4910 gain_diff_p->disconnect_chain[0] = 0; 4911 gain_diff_p->connected_chains |= (1 << 0); 4912 } else { 4913 gain_diff_p->disconnect_chain[1] = 0; 4914 gain_diff_p->connected_chains |= (1 << 1); 4915 } 4916 } 4917 4918 /* determine minimum noise strength among 3 chains */ 4919 if (!gain_diff_p->disconnect_chain[0]) { 4920 min_noise_chain_n = 0; 4921 4922 for (i = 0; i < RX_CHAINS_NUM; i++) { 4923 if (!gain_diff_p->disconnect_chain[i] && 4924 (noise_aver[i] <= 4925 noise_aver[min_noise_chain_n])) { 4926 min_noise_chain_n = i; 4927 } 4928 4929 } 4930 } else { 4931 min_noise_chain_n = 1; 4932 4933 for (i = 0; i < RX_CHAINS_NUM; i++) { 4934 if (!gain_diff_p->disconnect_chain[i] && 4935 (noise_aver[i] <= 4936 noise_aver[min_noise_chain_n])) { 4937 min_noise_chain_n = i; 4938 } 4939 } 4940 } 4941 4942 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0; 4943 4944 /* determine gain difference between chains */ 4945 for (i = 0; i < RX_CHAINS_NUM; i++) { 4946 if (!gain_diff_p->disconnect_chain[i] && 4947 (CHAIN_GAIN_DIFF_INIT_VAL == 4948 gain_diff_p->gain_diff_chain[i])) { 4949 4950 noise_diff = noise_aver[i] - 4951 noise_aver[min_noise_chain_n]; 4952 gain_diff_p->gain_diff_chain[i] = 4953 (uint8_t)((noise_diff * 10) / 15); 4954 4955 if (gain_diff_p->gain_diff_chain[i] > 3) { 4956 gain_diff_p->gain_diff_chain[i] = 3; 4957 } 4958 4959 gain_diff_p->gain_diff_chain[i] |= (1 << 2); 4960 } else { 4961 gain_diff_p->gain_diff_chain[i] = 0; 4962 } 4963 } 4964 4965 if (!gain_diff_p->gain_diff_send) { 4966 gain_diff_p->gain_diff_send = 1; 4967 4968 (void) memset(&cmd, 0, sizeof (cmd)); 4969 4970 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4971 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0]; 4972 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1]; 4973 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2]; 4974 4975 /* 4976 * send out PHY calibration command to 4977 * adjust every chain's Rx gain 4978 */ 4979 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 4980 &cmd, sizeof (cmd), 1); 4981 if (rv) { 4982 return (rv); 4983 } 4984 4985 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED; 4986 } 4987 4988 gain_diff_p->beacon_stren_a = 0; 4989 gain_diff_p->beacon_stren_b = 0; 4990 gain_diff_p->beacon_stren_c = 0; 4991 4992 gain_diff_p->noise_stren_a = 0; 4993 gain_diff_p->noise_stren_b = 0; 4994 gain_diff_p->noise_stren_c = 0; 4995 } 4996 4997 return (IWK_SUCCESS); 4998 } 4999 5000 /* Make necessary preparation for Receiver sensitivity calibration */ 5001 static int iwk_rx_sens_init(iwk_sc_t *sc) 5002 { 5003 int i, rv; 5004 struct iwk_rx_sensitivity_cmd cmd; 5005 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5006 5007 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd)); 5008 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity)); 5009 5010 rx_sens_p->auto_corr_ofdm_x4 = 90; 5011 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170; 5012 rx_sens_p->auto_corr_ofdm_x1 = 105; 5013 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220; 5014 5015 rx_sens_p->auto_corr_cck_x4 = 125; 5016 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5017 rx_sens_p->min_energy_det_cck = 100; 5018 5019 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK); 5020 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5021 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5022 5023 rx_sens_p->last_bad_plcp_cnt_ofdm = 0; 5024 rx_sens_p->last_false_alarm_cnt_ofdm = 0; 5025 rx_sens_p->last_bad_plcp_cnt_cck = 0; 5026 rx_sens_p->last_false_alarm_cnt_cck = 0; 5027 5028 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5029 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM; 5030 rx_sens_p->cck_no_false_alarm_num = 0; 5031 rx_sens_p->cck_beacon_idx = 0; 5032 5033 for (i = 0; i < 10; i++) { 5034 rx_sens_p->cck_beacon_min[i] = 0; 5035 } 5036 5037 rx_sens_p->cck_noise_idx = 0; 5038 rx_sens_p->cck_noise_ref = 0; 5039 5040 for (i = 0; i < 20; i++) { 5041 rx_sens_p->cck_noise_max[i] = 0; 5042 } 5043 5044 rx_sens_p->cck_noise_diff = 0; 5045 rx_sens_p->cck_no_false_alarm_num = 0; 5046 5047 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5048 5049 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5050 rx_sens_p->auto_corr_ofdm_x4; 5051 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5052 rx_sens_p->auto_corr_mrc_ofdm_x4; 5053 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5054 rx_sens_p->auto_corr_ofdm_x1; 5055 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5056 rx_sens_p->auto_corr_mrc_ofdm_x1; 5057 5058 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5059 rx_sens_p->auto_corr_cck_x4; 5060 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5061 rx_sens_p->auto_corr_mrc_cck_x4; 5062 cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck; 5063 5064 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5065 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5066 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5067 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5068 5069 /* at first, set up Rx to maximum sensitivity */ 5070 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5071 if (rv) { 5072 cmn_err(CE_WARN, "iwk_rx_sens_init(): " 5073 "in the process of initialization, " 5074 "failed to send rx sensitivity command\n"); 5075 return (rv); 5076 } 5077 5078 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK; 5079 5080 return (IWK_SUCCESS); 5081 } 5082 5083 /* 5084 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity. 5085 * for more infomation, please refer to iwk_calibration.h file 5086 */ 5087 static int iwk_rx_sens(iwk_sc_t *sc) 5088 { 5089 int rv; 5090 uint32_t actual_rx_time; 5091 struct statistics_rx_non_phy *rx_general_p = 5092 &sc->sc_statistics.rx.general; 5093 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5094 struct iwk_rx_sensitivity_cmd cmd; 5095 5096 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) { 5097 cmn_err(CE_WARN, "iwk_rx_sens(): " 5098 "sensitivity initialization has not finished.\n"); 5099 return (DDI_FAILURE); 5100 } 5101 5102 if (INTERFERENCE_DATA_AVAILABLE != 5103 rx_general_p->interference_data_flag) { 5104 cmn_err(CE_WARN, "iwk_rx_sens(): " 5105 "can't make rx sensitivity calibration," 5106 "because of invalid statistics\n"); 5107 return (DDI_FAILURE); 5108 } 5109 5110 actual_rx_time = rx_general_p->channel_load; 5111 if (!actual_rx_time) { 5112 IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): " 5113 "can't make rx sensitivity calibration," 5114 "because has not enough rx time\n")); 5115 return (DDI_FAILURE); 5116 } 5117 5118 /* make Rx sensitivity calibration for OFDM mode */ 5119 rv = iwk_ofdm_sens(sc, actual_rx_time); 5120 if (rv) { 5121 return (rv); 5122 } 5123 5124 /* make Rx sensitivity calibration for CCK mode */ 5125 rv = iwk_cck_sens(sc, actual_rx_time); 5126 if (rv) { 5127 return (rv); 5128 } 5129 5130 /* 5131 * if the sum of false alarm had not changed, nothing will be done 5132 */ 5133 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) && 5134 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) { 5135 return (IWK_SUCCESS); 5136 } 5137 5138 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5139 5140 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5141 rx_sens_p->auto_corr_ofdm_x4; 5142 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5143 rx_sens_p->auto_corr_mrc_ofdm_x4; 5144 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5145 rx_sens_p->auto_corr_ofdm_x1; 5146 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5147 rx_sens_p->auto_corr_mrc_ofdm_x1; 5148 5149 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5150 rx_sens_p->auto_corr_cck_x4; 5151 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5152 rx_sens_p->auto_corr_mrc_cck_x4; 5153 cmd.table[MIN_ENERGY_CCK_DET_IDX] = 5154 rx_sens_p->min_energy_det_cck; 5155 5156 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5157 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5158 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5159 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5160 5161 /* 5162 * send sensitivity command to complete actual sensitivity calibration 5163 */ 5164 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5165 if (rv) { 5166 cmn_err(CE_WARN, "iwk_rx_sens(): " 5167 "fail to send rx sensitivity command\n"); 5168 return (rv); 5169 } 5170 5171 return (IWK_SUCCESS); 5172 5173 } 5174 5175 /* 5176 * make Rx sensitivity calibration for CCK mode. 5177 * This is preparing parameters for Sensitivity command 5178 */ 5179 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5180 { 5181 int i; 5182 uint8_t noise_a, noise_b, noise_c; 5183 uint8_t max_noise_abc, max_noise_20; 5184 uint32_t beacon_a, beacon_b, beacon_c; 5185 uint32_t min_beacon_abc, max_beacon_10; 5186 uint32_t cck_fa, cck_bp; 5187 uint32_t cck_sum_fa_bp; 5188 uint32_t temp; 5189 struct statistics_rx_non_phy *rx_general_p = 5190 &sc->sc_statistics.rx.general; 5191 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5192 5193 cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt; 5194 cck_bp = sc->sc_statistics.rx.cck.plcp_err; 5195 5196 /* accumulate false alarm */ 5197 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) { 5198 temp = rx_sens_p->last_false_alarm_cnt_cck; 5199 rx_sens_p->last_false_alarm_cnt_cck = cck_fa; 5200 cck_fa += (0xFFFFFFFF - temp); 5201 } else { 5202 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck; 5203 rx_sens_p->last_false_alarm_cnt_cck += cck_fa; 5204 } 5205 5206 /* accumulate bad plcp */ 5207 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) { 5208 temp = rx_sens_p->last_bad_plcp_cnt_cck; 5209 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp; 5210 cck_bp += (0xFFFFFFFF - temp); 5211 } else { 5212 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck; 5213 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp; 5214 } 5215 5216 /* 5217 * calculate relative value 5218 */ 5219 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024; 5220 rx_sens_p->cck_noise_diff = 0; 5221 5222 noise_a = 5223 (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8); 5224 noise_b = 5225 (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8); 5226 noise_c = 5227 (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8); 5228 5229 beacon_a = rx_general_p->beacon_energy_a; 5230 beacon_b = rx_general_p->beacon_energy_b; 5231 beacon_c = rx_general_p->beacon_energy_c; 5232 5233 /* determine maximum noise among 3 chains */ 5234 if ((noise_a >= noise_b) && (noise_a >= noise_c)) { 5235 max_noise_abc = noise_a; 5236 } else if (noise_b >= noise_c) { 5237 max_noise_abc = noise_b; 5238 } else { 5239 max_noise_abc = noise_c; 5240 } 5241 5242 /* record maximum noise among 3 chains */ 5243 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc; 5244 rx_sens_p->cck_noise_idx++; 5245 if (rx_sens_p->cck_noise_idx >= 20) { 5246 rx_sens_p->cck_noise_idx = 0; 5247 } 5248 5249 /* determine maximum noise among 20 max noise */ 5250 max_noise_20 = rx_sens_p->cck_noise_max[0]; 5251 for (i = 0; i < 20; i++) { 5252 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) { 5253 max_noise_20 = rx_sens_p->cck_noise_max[i]; 5254 } 5255 } 5256 5257 /* determine minimum beacon among 3 chains */ 5258 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) { 5259 min_beacon_abc = beacon_a; 5260 } else if (beacon_b <= beacon_c) { 5261 min_beacon_abc = beacon_b; 5262 } else { 5263 min_beacon_abc = beacon_c; 5264 } 5265 5266 /* record miminum beacon among 3 chains */ 5267 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc; 5268 rx_sens_p->cck_beacon_idx++; 5269 if (rx_sens_p->cck_beacon_idx >= 10) { 5270 rx_sens_p->cck_beacon_idx = 0; 5271 } 5272 5273 /* determine maximum beacon among 10 miminum beacon among 3 chains */ 5274 max_beacon_10 = rx_sens_p->cck_beacon_min[0]; 5275 for (i = 0; i < 10; i++) { 5276 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) { 5277 max_beacon_10 = rx_sens_p->cck_beacon_min[i]; 5278 } 5279 } 5280 5281 /* add a little margin */ 5282 max_beacon_10 += 6; 5283 5284 /* record the count of having no false alarms */ 5285 if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5286 rx_sens_p->cck_no_false_alarm_num++; 5287 } else { 5288 rx_sens_p->cck_no_false_alarm_num = 0; 5289 } 5290 5291 /* 5292 * adjust parameters in sensitivity command 5293 * according to different status. 5294 * for more infomation, please refer to iwk_calibration.h file 5295 */ 5296 if (cck_sum_fa_bp > (50 * actual_rx_time)) { 5297 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5298 5299 if (rx_sens_p->auto_corr_cck_x4 > 160) { 5300 rx_sens_p->cck_noise_ref = max_noise_20; 5301 5302 if (rx_sens_p->min_energy_det_cck > 2) { 5303 rx_sens_p->min_energy_det_cck -= 2; 5304 } 5305 } 5306 5307 if (rx_sens_p->auto_corr_cck_x4 < 160) { 5308 rx_sens_p->auto_corr_cck_x4 = 160 + 1; 5309 } else { 5310 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) { 5311 rx_sens_p->auto_corr_cck_x4 += 3; 5312 } else { 5313 rx_sens_p->auto_corr_cck_x4 = 200; 5314 } 5315 } 5316 5317 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) { 5318 rx_sens_p->auto_corr_mrc_cck_x4 += 3; 5319 } else { 5320 rx_sens_p->auto_corr_mrc_cck_x4 = 400; 5321 } 5322 5323 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5324 5325 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5326 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM; 5327 5328 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref - 5329 (int32_t)max_noise_20; 5330 5331 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) && 5332 ((rx_sens_p->cck_noise_diff > 2) || 5333 (rx_sens_p->cck_no_false_alarm_num > 100))) { 5334 if ((rx_sens_p->min_energy_det_cck + 2) < 97) { 5335 rx_sens_p->min_energy_det_cck += 2; 5336 } else { 5337 rx_sens_p->min_energy_det_cck = 97; 5338 } 5339 5340 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) { 5341 rx_sens_p->auto_corr_cck_x4 -= 3; 5342 } else { 5343 rx_sens_p->auto_corr_cck_x4 = 125; 5344 } 5345 5346 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) { 5347 rx_sens_p->auto_corr_mrc_cck_x4 -= 3; 5348 } else { 5349 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5350 } 5351 5352 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5353 } else { 5354 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5355 } 5356 } else { 5357 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM; 5358 5359 rx_sens_p->cck_noise_ref = max_noise_20; 5360 5361 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) { 5362 rx_sens_p->min_energy_det_cck -= 8; 5363 } 5364 5365 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5366 } 5367 5368 if (rx_sens_p->min_energy_det_cck < max_beacon_10) { 5369 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10; 5370 } 5371 5372 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state; 5373 5374 return (IWK_SUCCESS); 5375 } 5376 5377 /* 5378 * make Rx sensitivity calibration for OFDM mode. 5379 * This is preparing parameters for Sensitivity command 5380 */ 5381 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5382 { 5383 uint32_t temp; 5384 uint16_t temp1; 5385 uint32_t ofdm_fa, ofdm_bp; 5386 uint32_t ofdm_sum_fa_bp; 5387 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5388 5389 ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt; 5390 ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err; 5391 5392 /* accumulate false alarm */ 5393 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) { 5394 temp = rx_sens_p->last_false_alarm_cnt_ofdm; 5395 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa; 5396 ofdm_fa += (0xFFFFFFFF - temp); 5397 } else { 5398 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm; 5399 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa; 5400 } 5401 5402 /* accumulate bad plcp */ 5403 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) { 5404 temp = rx_sens_p->last_bad_plcp_cnt_ofdm; 5405 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp; 5406 ofdm_bp += (0xFFFFFFFF - temp); 5407 } else { 5408 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm; 5409 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp; 5410 } 5411 5412 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */ 5413 5414 /* 5415 * adjust parameter in sensitivity command according to different status 5416 */ 5417 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) { 5418 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1; 5419 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120; 5420 5421 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1; 5422 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5423 (temp1 <= 210) ? temp1 : 210; 5424 5425 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1; 5426 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140; 5427 5428 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1; 5429 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5430 (temp1 <= 270) ? temp1 : 270; 5431 5432 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5433 5434 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) { 5435 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1; 5436 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85; 5437 5438 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1; 5439 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5440 (temp1 >= 170) ? temp1 : 170; 5441 5442 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1; 5443 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105; 5444 5445 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1; 5446 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5447 (temp1 >= 220) ? temp1 : 220; 5448 5449 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5450 5451 } else { 5452 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5453 } 5454 5455 return (IWK_SUCCESS); 5456 } 5457 5458 /* 5459 * 1) log_event_table_ptr indicates base of the event log. This traces 5460 * a 256-entry history of uCode execution within a circular buffer. 5461 * Its header format is: 5462 * 5463 * uint32_t log_size; log capacity (in number of entries) 5464 * uint32_t type; (1) timestamp with each entry, (0) no timestamp 5465 * uint32_t wraps; # times uCode has wrapped to top of circular buffer 5466 * uint32_t write_index; next circular buffer entry that uCode would fill 5467 * 5468 * The header is followed by the circular buffer of log entries. Entries 5469 * with timestamps have the following format: 5470 * 5471 * uint32_t event_id; range 0 - 1500 5472 * uint32_t timestamp; low 32 bits of TSF (of network, if associated) 5473 * uint32_t data; event_id-specific data value 5474 * 5475 * Entries without timestamps contain only event_id and data. 5476 */ 5477 5478 /* 5479 * iwk_write_event_log - Write event log to dmesg 5480 */ 5481 static void iwk_write_event_log(iwk_sc_t *sc) 5482 { 5483 uint32_t log_event_table_ptr; /* Start address of event table */ 5484 uint32_t startptr; /* Start address of log data */ 5485 uint32_t logptr; /* address of log data entry */ 5486 uint32_t i, n, num_events; 5487 uint32_t event_id, data1, data2; /* log data */ 5488 5489 uint32_t log_size; /* log capacity (in number of entries) */ 5490 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */ 5491 uint32_t wraps; /* # times uCode has wrapped to */ 5492 /* the top of circular buffer */ 5493 uint32_t idx; /* index of entry to be filled in next */ 5494 5495 log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr; 5496 if (!(log_event_table_ptr)) { 5497 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n")); 5498 return; 5499 } 5500 5501 iwk_mac_access_enter(sc); 5502 5503 /* Read log header */ 5504 log_size = iwk_mem_read(sc, log_event_table_ptr); 5505 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */ 5506 type = iwk_mem_read(sc, log_event_table_ptr); 5507 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */ 5508 wraps = iwk_mem_read(sc, log_event_table_ptr); 5509 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */ 5510 idx = iwk_mem_read(sc, log_event_table_ptr); 5511 startptr = log_event_table_ptr + 5512 sizeof (uint32_t); /* addr of start of log data */ 5513 if (!log_size & !wraps) { 5514 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n")); 5515 iwk_mac_access_exit(sc); 5516 return; 5517 } 5518 5519 if (!wraps) { 5520 num_events = idx; 5521 logptr = startptr; 5522 } else { 5523 num_events = log_size - idx; 5524 n = type ? 2 : 3; 5525 logptr = startptr + (idx * n * sizeof (uint32_t)); 5526 } 5527 5528 for (i = 0; i < num_events; i++) { 5529 event_id = iwk_mem_read(sc, logptr); 5530 logptr += sizeof (uint32_t); 5531 data1 = iwk_mem_read(sc, logptr); 5532 logptr += sizeof (uint32_t); 5533 if (type == 0) { /* no timestamp */ 5534 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x", 5535 event_id, data1)); 5536 } else { /* timestamp */ 5537 data2 = iwk_mem_read(sc, logptr); 5538 printf("Time=%d, Event ID=%d, Data=0x%x\n", 5539 data1, event_id, data2); 5540 IWK_DBG((IWK_DEBUG_EEPROM, 5541 "Time=%d, Event ID=%d, Data=0x%x\n", 5542 data1, event_id, data2)); 5543 logptr += sizeof (uint32_t); 5544 } 5545 } 5546 5547 /* 5548 * Print the wrapped around entries, if any 5549 */ 5550 if (wraps) { 5551 logptr = startptr; 5552 for (i = 0; i < idx; i++) { 5553 event_id = iwk_mem_read(sc, logptr); 5554 logptr += sizeof (uint32_t); 5555 data1 = iwk_mem_read(sc, logptr); 5556 logptr += sizeof (uint32_t); 5557 if (type == 0) { /* no timestamp */ 5558 IWK_DBG((IWK_DEBUG_EEPROM, 5559 "Event ID=%d, Data=%x0x", event_id, data1)); 5560 } else { /* timestamp */ 5561 data2 = iwk_mem_read(sc, logptr); 5562 IWK_DBG((IWK_DEBUG_EEPROM, 5563 "Time = %d, Event ID=%d, Data=0x%x\n", 5564 data1, event_id, data2)); 5565 logptr += sizeof (uint32_t); 5566 } 5567 } 5568 } 5569 5570 iwk_mac_access_exit(sc); 5571 } 5572 5573 /* 5574 * error_event_table_ptr indicates base of the error log. This contains 5575 * information about any uCode error that occurs. For 4965, the format is: 5576 * 5577 * uint32_t valid; (nonzero) valid, (0) log is empty 5578 * uint32_t error_id; type of error 5579 * uint32_t pc; program counter 5580 * uint32_t blink1; branch link 5581 * uint32_t blink2; branch link 5582 * uint32_t ilink1; interrupt link 5583 * uint32_t ilink2; interrupt link 5584 * uint32_t data1; error-specific data 5585 * uint32_t data2; error-specific data 5586 * uint32_t line; source code line of error 5587 * uint32_t bcon_time; beacon timer 5588 * uint32_t tsf_low; network timestamp function timer 5589 * uint32_t tsf_hi; network timestamp function timer 5590 */ 5591 /* 5592 * iwk_write_error_log - Write error log to dmesg 5593 */ 5594 static void iwk_write_error_log(iwk_sc_t *sc) 5595 { 5596 uint32_t err_ptr; /* Start address of error log */ 5597 uint32_t valid; /* is error log valid */ 5598 5599 err_ptr = sc->sc_card_alive_run.error_event_table_ptr; 5600 if (!(err_ptr)) { 5601 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n")); 5602 return; 5603 } 5604 5605 iwk_mac_access_enter(sc); 5606 5607 valid = iwk_mem_read(sc, err_ptr); 5608 if (!(valid)) { 5609 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n")); 5610 iwk_mac_access_exit(sc); 5611 return; 5612 } 5613 err_ptr += sizeof (uint32_t); 5614 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr))); 5615 err_ptr += sizeof (uint32_t); 5616 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr))); 5617 err_ptr += sizeof (uint32_t); 5618 IWK_DBG((IWK_DEBUG_EEPROM, 5619 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5620 err_ptr += sizeof (uint32_t); 5621 IWK_DBG((IWK_DEBUG_EEPROM, 5622 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5623 err_ptr += sizeof (uint32_t); 5624 IWK_DBG((IWK_DEBUG_EEPROM, 5625 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5626 err_ptr += sizeof (uint32_t); 5627 IWK_DBG((IWK_DEBUG_EEPROM, 5628 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5629 err_ptr += sizeof (uint32_t); 5630 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr))); 5631 err_ptr += sizeof (uint32_t); 5632 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr))); 5633 err_ptr += sizeof (uint32_t); 5634 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr))); 5635 err_ptr += sizeof (uint32_t); 5636 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr))); 5637 err_ptr += sizeof (uint32_t); 5638 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr))); 5639 err_ptr += sizeof (uint32_t); 5640 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr))); 5641 5642 iwk_mac_access_exit(sc); 5643 } 5644