1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2007, Intel Corporation 8 * All rights reserved. 9 */ 10 11 /* 12 * Copyright (c) 2006 13 * Copyright (c) 2007 14 * Damien Bergamini <damien.bergamini@free.fr> 15 * 16 * Permission to use, copy, modify, and distribute this software for any 17 * purpose with or without fee is hereby granted, provided that the above 18 * copyright notice and this permission notice appear in all copies. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 27 */ 28 29 /* 30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/byteorder.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/strsubr.h> 41 #include <sys/ethernet.h> 42 #include <inet/common.h> 43 #include <inet/nd.h> 44 #include <inet/mi.h> 45 #include <sys/note.h> 46 #include <sys/stream.h> 47 #include <sys/strsun.h> 48 #include <sys/modctl.h> 49 #include <sys/devops.h> 50 #include <sys/dlpi.h> 51 #include <sys/mac.h> 52 #include <sys/mac_wifi.h> 53 #include <sys/net80211.h> 54 #include <sys/net80211_proto.h> 55 #include <sys/varargs.h> 56 #include <sys/policy.h> 57 #include <sys/pci.h> 58 59 #include "iwk_calibration.h" 60 #include "iwk_hw.h" 61 #include "iwk_eeprom.h" 62 #include "iwk2_var.h" 63 #include <inet/wifi_ioctl.h> 64 65 #ifdef DEBUG 66 #define IWK_DEBUG_80211 (1 << 0) 67 #define IWK_DEBUG_CMD (1 << 1) 68 #define IWK_DEBUG_DMA (1 << 2) 69 #define IWK_DEBUG_EEPROM (1 << 3) 70 #define IWK_DEBUG_FW (1 << 4) 71 #define IWK_DEBUG_HW (1 << 5) 72 #define IWK_DEBUG_INTR (1 << 6) 73 #define IWK_DEBUG_MRR (1 << 7) 74 #define IWK_DEBUG_PIO (1 << 8) 75 #define IWK_DEBUG_RX (1 << 9) 76 #define IWK_DEBUG_SCAN (1 << 10) 77 #define IWK_DEBUG_TX (1 << 11) 78 #define IWK_DEBUG_RATECTL (1 << 12) 79 #define IWK_DEBUG_RADIO (1 << 13) 80 #define IWK_DEBUG_RESUME (1 << 14) 81 #define IWK_DEBUG_CALIBRATION (1 << 15) 82 uint32_t iwk_dbg_flags = 0; 83 #define IWK_DBG(x) \ 84 iwk_dbg x 85 #else 86 #define IWK_DBG(x) 87 #endif 88 89 static void *iwk_soft_state_p = NULL; 90 static uint8_t iwk_fw_bin [] = { 91 #include "fw-iw/iw4965.ucode.hex" 92 }; 93 94 /* DMA attributes for a shared page */ 95 static ddi_dma_attr_t sh_dma_attr = { 96 DMA_ATTR_V0, /* version of this structure */ 97 0, /* lowest usable address */ 98 0xffffffffU, /* highest usable address */ 99 0xffffffffU, /* maximum DMAable byte count */ 100 0x1000, /* alignment in bytes */ 101 0x1000, /* burst sizes (any?) */ 102 1, /* minimum transfer */ 103 0xffffffffU, /* maximum transfer */ 104 0xffffffffU, /* maximum segment length */ 105 1, /* maximum number of segments */ 106 1, /* granularity */ 107 0, /* flags (reserved) */ 108 }; 109 110 /* DMA attributes for a keep warm DRAM descriptor */ 111 static ddi_dma_attr_t kw_dma_attr = { 112 DMA_ATTR_V0, /* version of this structure */ 113 0, /* lowest usable address */ 114 0xffffffffU, /* highest usable address */ 115 0xffffffffU, /* maximum DMAable byte count */ 116 0x1000, /* alignment in bytes */ 117 0x1000, /* burst sizes (any?) */ 118 1, /* minimum transfer */ 119 0xffffffffU, /* maximum transfer */ 120 0xffffffffU, /* maximum segment length */ 121 1, /* maximum number of segments */ 122 1, /* granularity */ 123 0, /* flags (reserved) */ 124 }; 125 126 /* DMA attributes for a ring descriptor */ 127 static ddi_dma_attr_t ring_desc_dma_attr = { 128 DMA_ATTR_V0, /* version of this structure */ 129 0, /* lowest usable address */ 130 0xffffffffU, /* highest usable address */ 131 0xffffffffU, /* maximum DMAable byte count */ 132 0x100, /* alignment in bytes */ 133 0x100, /* burst sizes (any?) */ 134 1, /* minimum transfer */ 135 0xffffffffU, /* maximum transfer */ 136 0xffffffffU, /* maximum segment length */ 137 1, /* maximum number of segments */ 138 1, /* granularity */ 139 0, /* flags (reserved) */ 140 }; 141 142 /* DMA attributes for a cmd */ 143 static ddi_dma_attr_t cmd_dma_attr = { 144 DMA_ATTR_V0, /* version of this structure */ 145 0, /* lowest usable address */ 146 0xffffffffU, /* highest usable address */ 147 0xffffffffU, /* maximum DMAable byte count */ 148 4, /* alignment in bytes */ 149 0x100, /* burst sizes (any?) */ 150 1, /* minimum transfer */ 151 0xffffffffU, /* maximum transfer */ 152 0xffffffffU, /* maximum segment length */ 153 1, /* maximum number of segments */ 154 1, /* granularity */ 155 0, /* flags (reserved) */ 156 }; 157 158 /* DMA attributes for a rx buffer */ 159 static ddi_dma_attr_t rx_buffer_dma_attr = { 160 DMA_ATTR_V0, /* version of this structure */ 161 0, /* lowest usable address */ 162 0xffffffffU, /* highest usable address */ 163 0xffffffffU, /* maximum DMAable byte count */ 164 0x100, /* alignment in bytes */ 165 0x100, /* burst sizes (any?) */ 166 1, /* minimum transfer */ 167 0xffffffffU, /* maximum transfer */ 168 0xffffffffU, /* maximum segment length */ 169 1, /* maximum number of segments */ 170 1, /* granularity */ 171 0, /* flags (reserved) */ 172 }; 173 174 /* 175 * DMA attributes for a tx buffer. 176 * the maximum number of segments is 4 for the hardware. 177 * now all the wifi drivers put the whole frame in a single 178 * descriptor, so we define the maximum number of segments 1, 179 * just the same as the rx_buffer. we consider leverage the HW 180 * ability in the future, that is why we don't define rx and tx 181 * buffer_dma_attr as the same. 182 */ 183 static ddi_dma_attr_t tx_buffer_dma_attr = { 184 DMA_ATTR_V0, /* version of this structure */ 185 0, /* lowest usable address */ 186 0xffffffffU, /* highest usable address */ 187 0xffffffffU, /* maximum DMAable byte count */ 188 4, /* alignment in bytes */ 189 0x100, /* burst sizes (any?) */ 190 1, /* minimum transfer */ 191 0xffffffffU, /* maximum transfer */ 192 0xffffffffU, /* maximum segment length */ 193 1, /* maximum number of segments */ 194 1, /* granularity */ 195 0, /* flags (reserved) */ 196 }; 197 198 /* DMA attributes for text and data part in the firmware */ 199 static ddi_dma_attr_t fw_dma_attr = { 200 DMA_ATTR_V0, /* version of this structure */ 201 0, /* lowest usable address */ 202 0xffffffffU, /* highest usable address */ 203 0x7fffffff, /* maximum DMAable byte count */ 204 0x10, /* alignment in bytes */ 205 0x100, /* burst sizes (any?) */ 206 1, /* minimum transfer */ 207 0xffffffffU, /* maximum transfer */ 208 0xffffffffU, /* maximum segment length */ 209 1, /* maximum number of segments */ 210 1, /* granularity */ 211 0, /* flags (reserved) */ 212 }; 213 214 215 /* regs access attributes */ 216 static ddi_device_acc_attr_t iwk_reg_accattr = { 217 DDI_DEVICE_ATTR_V0, 218 DDI_STRUCTURE_LE_ACC, 219 DDI_STRICTORDER_ACC, 220 DDI_DEFAULT_ACC 221 }; 222 223 /* DMA access attributes */ 224 static ddi_device_acc_attr_t iwk_dma_accattr = { 225 DDI_DEVICE_ATTR_V0, 226 DDI_NEVERSWAP_ACC, 227 DDI_STRICTORDER_ACC, 228 DDI_DEFAULT_ACC 229 }; 230 231 static int iwk_ring_init(iwk_sc_t *); 232 static void iwk_ring_free(iwk_sc_t *); 233 static int iwk_alloc_shared(iwk_sc_t *); 234 static void iwk_free_shared(iwk_sc_t *); 235 static int iwk_alloc_kw(iwk_sc_t *); 236 static void iwk_free_kw(iwk_sc_t *); 237 static int iwk_alloc_fw_dma(iwk_sc_t *); 238 static void iwk_free_fw_dma(iwk_sc_t *); 239 static int iwk_alloc_rx_ring(iwk_sc_t *); 240 static void iwk_reset_rx_ring(iwk_sc_t *); 241 static void iwk_free_rx_ring(iwk_sc_t *); 242 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *, 243 int, int); 244 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 245 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 246 247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *); 248 static void iwk_node_free(ieee80211_node_t *); 249 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int); 250 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *, 251 const uint8_t mac[IEEE80211_ADDR_LEN]); 252 static void iwk_mac_access_enter(iwk_sc_t *); 253 static void iwk_mac_access_exit(iwk_sc_t *); 254 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t); 255 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t); 256 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t, 257 uint32_t *, int); 258 static int iwk_load_firmware(iwk_sc_t *); 259 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *, 260 iwk_rx_data_t *); 261 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *, 262 iwk_rx_data_t *); 263 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *); 264 static uint_t iwk_intr(caddr_t, caddr_t); 265 static int iwk_eep_load(iwk_sc_t *sc); 266 static void iwk_get_mac_from_eep(iwk_sc_t *sc); 267 static int iwk_eep_sem_down(iwk_sc_t *sc); 268 static void iwk_eep_sem_up(iwk_sc_t *sc); 269 static uint_t iwk_rx_softintr(caddr_t, caddr_t); 270 static uint8_t iwk_rate_to_plcp(int); 271 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int); 272 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t); 273 static int iwk_hw_set_before_auth(iwk_sc_t *); 274 static int iwk_scan(iwk_sc_t *); 275 static int iwk_config(iwk_sc_t *); 276 static void iwk_stop_master(iwk_sc_t *); 277 static int iwk_power_up(iwk_sc_t *); 278 static int iwk_preinit(iwk_sc_t *); 279 static int iwk_init(iwk_sc_t *); 280 static void iwk_stop(iwk_sc_t *); 281 static void iwk_amrr_init(iwk_amrr_t *); 282 static void iwk_amrr_timeout(iwk_sc_t *); 283 static void iwk_amrr_ratectl(void *, ieee80211_node_t *); 284 static int32_t iwk_curr_tempera(iwk_sc_t *sc); 285 static int iwk_tx_power_calibration(iwk_sc_t *sc); 286 static inline int iwk_is_24G_band(iwk_sc_t *sc); 287 static inline int iwk_is_fat_channel(iwk_sc_t *sc); 288 static int iwk_txpower_grp(uint16_t channel); 289 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 290 uint16_t channel, 291 int is_24G, int is_fat, int is_hi_chan); 292 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel); 293 static int iwk_division(int32_t num, int32_t denom, int32_t *res); 294 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 295 int32_t x2, int32_t y2); 296 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 297 struct iwk_eep_calib_channel_info *chan_info); 298 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 299 int32_t curr_voltage); 300 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G); 301 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 302 struct iwk_tx_power_db *tp_db); 303 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc); 304 static int iwk_is_associated(iwk_sc_t *sc); 305 static int iwk_rxgain_diff_init(iwk_sc_t *sc); 306 static int iwk_rxgain_diff(iwk_sc_t *sc); 307 static int iwk_rx_sens_init(iwk_sc_t *sc); 308 static int iwk_rx_sens(iwk_sc_t *sc); 309 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 310 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 311 312 static void iwk_write_event_log(iwk_sc_t *); 313 static void iwk_write_error_log(iwk_sc_t *); 314 315 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 316 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 317 static int iwk_quiesce(dev_info_t *dip); 318 319 /* 320 * GLD specific operations 321 */ 322 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val); 323 static int iwk_m_start(void *arg); 324 static void iwk_m_stop(void *arg); 325 static int iwk_m_unicst(void *arg, const uint8_t *macaddr); 326 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m); 327 static int iwk_m_promisc(void *arg, boolean_t on); 328 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp); 329 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp); 330 static int iwk_m_setprop(void *arg, const char *pr_name, 331 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf); 332 static int iwk_m_getprop(void *arg, const char *pr_name, 333 mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length, 334 void *wldp_buf); 335 static void iwk_destroy_locks(iwk_sc_t *sc); 336 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type); 337 static void iwk_thread(iwk_sc_t *sc); 338 339 /* 340 * Supported rates for 802.11b/g modes (in 500Kbps unit). 341 * 11a and 11n support will be added later. 342 */ 343 static const struct ieee80211_rateset iwk_rateset_11b = 344 { 4, { 2, 4, 11, 22 } }; 345 346 static const struct ieee80211_rateset iwk_rateset_11g = 347 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 348 349 /* 350 * For mfthread only 351 */ 352 extern pri_t minclsyspri; 353 354 #define DRV_NAME_4965 "iwk" 355 356 /* 357 * Module Loading Data & Entry Points 358 */ 359 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach, 360 iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce); 361 362 static struct modldrv iwk_modldrv = { 363 &mod_driverops, 364 "Intel(R) 4965AGN driver(N)", 365 &iwk_devops 366 }; 367 368 static struct modlinkage iwk_modlinkage = { 369 MODREV_1, 370 &iwk_modldrv, 371 NULL 372 }; 373 374 int 375 _init(void) 376 { 377 int status; 378 379 status = ddi_soft_state_init(&iwk_soft_state_p, 380 sizeof (iwk_sc_t), 1); 381 if (status != DDI_SUCCESS) 382 return (status); 383 384 mac_init_ops(&iwk_devops, DRV_NAME_4965); 385 status = mod_install(&iwk_modlinkage); 386 if (status != DDI_SUCCESS) { 387 mac_fini_ops(&iwk_devops); 388 ddi_soft_state_fini(&iwk_soft_state_p); 389 } 390 391 return (status); 392 } 393 394 int 395 _fini(void) 396 { 397 int status; 398 399 status = mod_remove(&iwk_modlinkage); 400 if (status == DDI_SUCCESS) { 401 mac_fini_ops(&iwk_devops); 402 ddi_soft_state_fini(&iwk_soft_state_p); 403 } 404 405 return (status); 406 } 407 408 int 409 _info(struct modinfo *mip) 410 { 411 return (mod_info(&iwk_modlinkage, mip)); 412 } 413 414 /* 415 * Mac Call Back entries 416 */ 417 mac_callbacks_t iwk_m_callbacks = { 418 MC_IOCTL | MC_SETPROP | MC_GETPROP, 419 iwk_m_stat, 420 iwk_m_start, 421 iwk_m_stop, 422 iwk_m_promisc, 423 iwk_m_multicst, 424 iwk_m_unicst, 425 iwk_m_tx, 426 NULL, 427 iwk_m_ioctl, 428 NULL, 429 NULL, 430 NULL, 431 iwk_m_setprop, 432 iwk_m_getprop 433 }; 434 435 #ifdef DEBUG 436 void 437 iwk_dbg(uint32_t flags, const char *fmt, ...) 438 { 439 va_list ap; 440 441 if (flags & iwk_dbg_flags) { 442 va_start(ap, fmt); 443 vcmn_err(CE_NOTE, fmt, ap); 444 va_end(ap); 445 } 446 } 447 #endif 448 449 /* 450 * device operations 451 */ 452 int 453 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 454 { 455 iwk_sc_t *sc; 456 ieee80211com_t *ic; 457 int instance, err, i; 458 char strbuf[32]; 459 wifi_data_t wd = { 0 }; 460 mac_register_t *macp; 461 462 int intr_type; 463 int intr_count; 464 int intr_actual; 465 466 switch (cmd) { 467 case DDI_ATTACH: 468 break; 469 case DDI_RESUME: 470 sc = ddi_get_soft_state(iwk_soft_state_p, 471 ddi_get_instance(dip)); 472 ASSERT(sc != NULL); 473 mutex_enter(&sc->sc_glock); 474 sc->sc_flags &= ~IWK_F_SUSPEND; 475 mutex_exit(&sc->sc_glock); 476 if (sc->sc_flags & IWK_F_RUNNING) { 477 (void) iwk_init(sc); 478 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 479 } 480 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n")); 481 return (DDI_SUCCESS); 482 default: 483 err = DDI_FAILURE; 484 goto attach_fail1; 485 } 486 487 instance = ddi_get_instance(dip); 488 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance); 489 if (err != DDI_SUCCESS) { 490 cmn_err(CE_WARN, 491 "iwk_attach(): failed to allocate soft state\n"); 492 goto attach_fail1; 493 } 494 sc = ddi_get_soft_state(iwk_soft_state_p, instance); 495 sc->sc_dip = dip; 496 497 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0, 498 &iwk_reg_accattr, &sc->sc_cfg_handle); 499 if (err != DDI_SUCCESS) { 500 cmn_err(CE_WARN, 501 "iwk_attach(): failed to map config spaces regs\n"); 502 goto attach_fail2; 503 } 504 sc->sc_rev = ddi_get8(sc->sc_cfg_handle, 505 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID)); 506 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0); 507 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle, 508 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ)); 509 if (!sc->sc_clsz) 510 sc->sc_clsz = 16; 511 sc->sc_clsz = (sc->sc_clsz << 2); 512 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) + 513 IEEE80211_MTU + IEEE80211_CRC_LEN + 514 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 515 IEEE80211_WEP_CRCLEN), sc->sc_clsz); 516 /* 517 * Map operating registers 518 */ 519 err = ddi_regs_map_setup(dip, 1, &sc->sc_base, 520 0, 0, &iwk_reg_accattr, &sc->sc_handle); 521 if (err != DDI_SUCCESS) { 522 cmn_err(CE_WARN, 523 "iwk_attach(): failed to map device regs\n"); 524 goto attach_fail2a; 525 } 526 527 err = ddi_intr_get_supported_types(dip, &intr_type); 528 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) { 529 cmn_err(CE_WARN, "iwk_attach(): " 530 "Fixed type interrupt is not supported\n"); 531 goto attach_fail_intr_a; 532 } 533 534 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count); 535 if ((err != DDI_SUCCESS) || (intr_count != 1)) { 536 cmn_err(CE_WARN, "iwk_attach(): " 537 "No fixed interrupts\n"); 538 goto attach_fail_intr_a; 539 } 540 541 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 542 543 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 544 intr_count, &intr_actual, 0); 545 if ((err != DDI_SUCCESS) || (intr_actual != 1)) { 546 cmn_err(CE_WARN, "iwk_attach(): " 547 "ddi_intr_alloc() failed 0x%x\n", err); 548 goto attach_fail_intr_b; 549 } 550 551 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri); 552 if (err != DDI_SUCCESS) { 553 cmn_err(CE_WARN, "iwk_attach(): " 554 "ddi_intr_get_pri() failed 0x%x\n", err); 555 goto attach_fail_intr_c; 556 } 557 558 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, 559 DDI_INTR_PRI(sc->sc_intr_pri)); 560 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, 561 DDI_INTR_PRI(sc->sc_intr_pri)); 562 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER, 563 DDI_INTR_PRI(sc->sc_intr_pri)); 564 565 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL); 566 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL); 567 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL); 568 /* 569 * initialize the mfthread 570 */ 571 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL); 572 sc->sc_mf_thread = NULL; 573 sc->sc_mf_thread_switch = 0; 574 575 /* 576 * Allocate shared page. 577 */ 578 err = iwk_alloc_shared(sc); 579 if (err != DDI_SUCCESS) { 580 cmn_err(CE_WARN, "iwk_attach(): " 581 "failed to allocate shared page\n"); 582 goto attach_fail3; 583 } 584 585 /* 586 * Allocate keep warm page. 587 */ 588 err = iwk_alloc_kw(sc); 589 if (err != DDI_SUCCESS) { 590 cmn_err(CE_WARN, "iwk_attach(): " 591 "failed to allocate keep warm page\n"); 592 goto attach_fail3a; 593 } 594 595 /* 596 * Do some necessary hardware initializations. 597 */ 598 err = iwk_preinit(sc); 599 if (err != DDI_SUCCESS) { 600 cmn_err(CE_WARN, "iwk_attach(): " 601 "failed to init hardware\n"); 602 goto attach_fail4; 603 } 604 605 /* initialize EEPROM */ 606 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */ 607 if (err != 0) { 608 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n"); 609 goto attach_fail4; 610 } 611 612 if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) { 613 cmn_err(CE_WARN, "older EEPROM detected\n"); 614 goto attach_fail4; 615 } 616 617 iwk_get_mac_from_eep(sc); 618 619 err = iwk_ring_init(sc); 620 if (err != DDI_SUCCESS) { 621 cmn_err(CE_WARN, "iwk_attach(): " 622 "failed to allocate and initialize ring\n"); 623 goto attach_fail4; 624 } 625 626 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin; 627 628 err = iwk_alloc_fw_dma(sc); 629 if (err != DDI_SUCCESS) { 630 cmn_err(CE_WARN, "iwk_attach(): " 631 "failed to allocate firmware dma\n"); 632 goto attach_fail5; 633 } 634 635 /* 636 * Initialize the wifi part, which will be used by 637 * generic layer 638 */ 639 ic = &sc->sc_ic; 640 ic->ic_phytype = IEEE80211_T_OFDM; 641 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 642 ic->ic_state = IEEE80211_S_INIT; 643 ic->ic_maxrssi = 100; /* experimental number */ 644 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT | 645 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT; 646 /* 647 * use software WEP and TKIP, hardware CCMP; 648 */ 649 ic->ic_caps |= IEEE80211_C_AES_CCM; 650 /* 651 * Support WPA/WPA2 652 */ 653 ic->ic_caps |= IEEE80211_C_WPA; 654 655 /* set supported .11b and .11g rates */ 656 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b; 657 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g; 658 659 /* set supported .11b and .11g channels (1 through 11) */ 660 for (i = 1; i <= 11; i++) { 661 ic->ic_sup_channels[i].ich_freq = 662 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 663 ic->ic_sup_channels[i].ich_flags = 664 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 665 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ | 666 IEEE80211_CHAN_PASSIVE; 667 } 668 669 ic->ic_xmit = iwk_send; 670 /* 671 * init Wifi layer 672 */ 673 ieee80211_attach(ic); 674 675 /* 676 * different instance has different WPA door 677 */ 678 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 679 ddi_driver_name(dip), 680 ddi_get_instance(dip)); 681 682 /* 683 * Override 80211 default routines 684 */ 685 sc->sc_newstate = ic->ic_newstate; 686 ic->ic_newstate = iwk_newstate; 687 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 688 ic->ic_node_alloc = iwk_node_alloc; 689 ic->ic_node_free = iwk_node_free; 690 ic->ic_crypto.cs_key_set = iwk_key_set; 691 ieee80211_media_init(ic); 692 /* 693 * initialize default tx key 694 */ 695 ic->ic_def_txkey = 0; 696 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX, 697 iwk_rx_softintr, (caddr_t)sc); 698 if (err != DDI_SUCCESS) { 699 cmn_err(CE_WARN, "iwk_attach(): " 700 "add soft interrupt failed\n"); 701 goto attach_fail7; 702 } 703 704 /* 705 * Add the interrupt handler 706 */ 707 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr, 708 (caddr_t)sc, NULL); 709 if (err != DDI_SUCCESS) { 710 cmn_err(CE_WARN, "iwk_attach(): " 711 "ddi_intr_add_handle() failed\n"); 712 goto attach_fail8; 713 } 714 715 err = ddi_intr_enable(sc->sc_intr_htable[0]); 716 if (err != DDI_SUCCESS) { 717 cmn_err(CE_WARN, "iwk_attach(): " 718 "ddi_intr_enable() failed\n"); 719 goto attach_fail_intr_d; 720 } 721 722 /* 723 * Initialize pointer to device specific functions 724 */ 725 wd.wd_secalloc = WIFI_SEC_NONE; 726 wd.wd_opmode = ic->ic_opmode; 727 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr); 728 729 macp = mac_alloc(MAC_VERSION); 730 if (err != DDI_SUCCESS) { 731 cmn_err(CE_WARN, 732 "iwk_attach(): failed to do mac_alloc()\n"); 733 goto attach_fail9; 734 } 735 736 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 737 macp->m_driver = sc; 738 macp->m_dip = dip; 739 macp->m_src_addr = ic->ic_macaddr; 740 macp->m_callbacks = &iwk_m_callbacks; 741 macp->m_min_sdu = 0; 742 macp->m_max_sdu = IEEE80211_MTU; 743 macp->m_pdata = &wd; 744 macp->m_pdata_size = sizeof (wd); 745 746 /* 747 * Register the macp to mac 748 */ 749 err = mac_register(macp, &ic->ic_mach); 750 mac_free(macp); 751 if (err != DDI_SUCCESS) { 752 cmn_err(CE_WARN, 753 "iwk_attach(): failed to do mac_register()\n"); 754 goto attach_fail9; 755 } 756 757 /* 758 * Create minor node of type DDI_NT_NET_WIFI 759 */ 760 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance); 761 err = ddi_create_minor_node(dip, strbuf, S_IFCHR, 762 instance + 1, DDI_NT_NET_WIFI, 0); 763 if (err != DDI_SUCCESS) 764 cmn_err(CE_WARN, 765 "iwk_attach(): failed to do ddi_create_minor_node()\n"); 766 767 /* 768 * Notify link is down now 769 */ 770 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 771 772 /* 773 * create the mf thread to handle the link status, 774 * recovery fatal error, etc. 775 */ 776 sc->sc_mf_thread_switch = 1; 777 if (sc->sc_mf_thread == NULL) 778 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0, 779 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri); 780 781 sc->sc_flags |= IWK_F_ATTACHED; 782 783 return (DDI_SUCCESS); 784 attach_fail9: 785 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 786 attach_fail_intr_d: 787 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 788 789 attach_fail8: 790 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 791 sc->sc_soft_hdl = NULL; 792 attach_fail7: 793 ieee80211_detach(ic); 794 attach_fail6: 795 iwk_free_fw_dma(sc); 796 attach_fail5: 797 iwk_ring_free(sc); 798 attach_fail4: 799 iwk_free_kw(sc); 800 attach_fail3a: 801 iwk_free_shared(sc); 802 attach_fail3: 803 iwk_destroy_locks(sc); 804 attach_fail_intr_c: 805 (void) ddi_intr_free(sc->sc_intr_htable[0]); 806 attach_fail_intr_b: 807 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 808 attach_fail_intr_a: 809 ddi_regs_map_free(&sc->sc_handle); 810 attach_fail2a: 811 ddi_regs_map_free(&sc->sc_cfg_handle); 812 attach_fail2: 813 ddi_soft_state_free(iwk_soft_state_p, instance); 814 attach_fail1: 815 return (err); 816 } 817 818 int 819 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 820 { 821 iwk_sc_t *sc; 822 int err; 823 824 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 825 ASSERT(sc != NULL); 826 827 switch (cmd) { 828 case DDI_DETACH: 829 break; 830 case DDI_SUSPEND: 831 if (sc->sc_flags & IWK_F_RUNNING) { 832 iwk_stop(sc); 833 } 834 mutex_enter(&sc->sc_glock); 835 sc->sc_flags |= IWK_F_SUSPEND; 836 mutex_exit(&sc->sc_glock); 837 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n")); 838 return (DDI_SUCCESS); 839 default: 840 return (DDI_FAILURE); 841 } 842 843 if (!(sc->sc_flags & IWK_F_ATTACHED)) 844 return (DDI_FAILURE); 845 846 err = mac_disable(sc->sc_ic.ic_mach); 847 if (err != DDI_SUCCESS) 848 return (err); 849 850 /* 851 * Destroy the mf_thread 852 */ 853 mutex_enter(&sc->sc_mt_lock); 854 sc->sc_mf_thread_switch = 0; 855 while (sc->sc_mf_thread != NULL) { 856 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) 857 break; 858 } 859 mutex_exit(&sc->sc_mt_lock); 860 861 iwk_stop(sc); 862 DELAY(500000); 863 864 /* 865 * Unregiste from the MAC layer subsystem 866 */ 867 (void) mac_unregister(sc->sc_ic.ic_mach); 868 869 mutex_enter(&sc->sc_glock); 870 iwk_free_fw_dma(sc); 871 iwk_ring_free(sc); 872 iwk_free_kw(sc); 873 iwk_free_shared(sc); 874 mutex_exit(&sc->sc_glock); 875 876 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 877 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 878 (void) ddi_intr_free(sc->sc_intr_htable[0]); 879 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 880 881 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 882 sc->sc_soft_hdl = NULL; 883 884 /* 885 * detach ieee80211 886 */ 887 ieee80211_detach(&sc->sc_ic); 888 889 iwk_destroy_locks(sc); 890 891 ddi_regs_map_free(&sc->sc_handle); 892 ddi_regs_map_free(&sc->sc_cfg_handle); 893 ddi_remove_minor_node(dip, NULL); 894 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip)); 895 896 return (DDI_SUCCESS); 897 } 898 899 /* 900 * quiesce(9E) entry point. 901 * 902 * This function is called when the system is single-threaded at high 903 * PIL with preemption disabled. Therefore, this function must not be 904 * blocked. 905 * 906 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 907 * DDI_FAILURE indicates an error condition and should almost never happen. 908 */ 909 int 910 iwk_quiesce(dev_info_t *dip) 911 { 912 iwk_sc_t *sc; 913 914 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 915 ASSERT(sc != NULL); 916 917 /* no message prints and no lock accquisition */ 918 #ifdef DEBUG 919 iwk_dbg_flags = 0; 920 #endif 921 sc->sc_flags |= IWK_F_QUIESCED; 922 923 iwk_stop(sc); 924 925 return (DDI_SUCCESS); 926 } 927 928 static void 929 iwk_destroy_locks(iwk_sc_t *sc) 930 { 931 cv_destroy(&sc->sc_mt_cv); 932 mutex_destroy(&sc->sc_mt_lock); 933 cv_destroy(&sc->sc_tx_cv); 934 cv_destroy(&sc->sc_cmd_cv); 935 cv_destroy(&sc->sc_fw_cv); 936 mutex_destroy(&sc->sc_tx_lock); 937 mutex_destroy(&sc->sc_glock); 938 } 939 940 /* 941 * Allocate an area of memory and a DMA handle for accessing it 942 */ 943 static int 944 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize, 945 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p, 946 uint_t dma_flags, iwk_dma_t *dma_p) 947 { 948 caddr_t vaddr; 949 int err; 950 951 /* 952 * Allocate handle 953 */ 954 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p, 955 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 956 if (err != DDI_SUCCESS) { 957 dma_p->dma_hdl = NULL; 958 return (DDI_FAILURE); 959 } 960 961 /* 962 * Allocate memory 963 */ 964 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 965 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 966 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 967 if (err != DDI_SUCCESS) { 968 ddi_dma_free_handle(&dma_p->dma_hdl); 969 dma_p->dma_hdl = NULL; 970 dma_p->acc_hdl = NULL; 971 return (DDI_FAILURE); 972 } 973 974 /* 975 * Bind the two together 976 */ 977 dma_p->mem_va = vaddr; 978 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 979 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 980 &dma_p->cookie, &dma_p->ncookies); 981 if (err != DDI_DMA_MAPPED) { 982 ddi_dma_mem_free(&dma_p->acc_hdl); 983 ddi_dma_free_handle(&dma_p->dma_hdl); 984 dma_p->acc_hdl = NULL; 985 dma_p->dma_hdl = NULL; 986 return (DDI_FAILURE); 987 } 988 989 dma_p->nslots = ~0U; 990 dma_p->size = ~0U; 991 dma_p->token = ~0U; 992 dma_p->offset = 0; 993 return (DDI_SUCCESS); 994 } 995 996 /* 997 * Free one allocated area of DMAable memory 998 */ 999 static void 1000 iwk_free_dma_mem(iwk_dma_t *dma_p) 1001 { 1002 if (dma_p->dma_hdl != NULL) { 1003 if (dma_p->ncookies) { 1004 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1005 dma_p->ncookies = 0; 1006 } 1007 ddi_dma_free_handle(&dma_p->dma_hdl); 1008 dma_p->dma_hdl = NULL; 1009 } 1010 1011 if (dma_p->acc_hdl != NULL) { 1012 ddi_dma_mem_free(&dma_p->acc_hdl); 1013 dma_p->acc_hdl = NULL; 1014 } 1015 } 1016 1017 /* 1018 * 1019 */ 1020 static int 1021 iwk_alloc_fw_dma(iwk_sc_t *sc) 1022 { 1023 int err = DDI_SUCCESS; 1024 iwk_dma_t *dma_p; 1025 char *t; 1026 1027 /* 1028 * firmware image layout: 1029 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->| 1030 */ 1031 t = (char *)(sc->sc_hdr + 1); 1032 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz), 1033 &fw_dma_attr, &iwk_dma_accattr, 1034 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1035 &sc->sc_dma_fw_text); 1036 dma_p = &sc->sc_dma_fw_text; 1037 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n", 1038 dma_p->ncookies, dma_p->cookie.dmac_address, 1039 dma_p->cookie.dmac_size)); 1040 if (err != DDI_SUCCESS) { 1041 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1042 " text dma memory"); 1043 goto fail; 1044 } 1045 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz)); 1046 1047 t += LE_32(sc->sc_hdr->textsz); 1048 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1049 &fw_dma_attr, &iwk_dma_accattr, 1050 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1051 &sc->sc_dma_fw_data); 1052 dma_p = &sc->sc_dma_fw_data; 1053 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n", 1054 dma_p->ncookies, dma_p->cookie.dmac_address, 1055 dma_p->cookie.dmac_size)); 1056 if (err != DDI_SUCCESS) { 1057 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1058 " data dma memory"); 1059 goto fail; 1060 } 1061 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1062 1063 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1064 &fw_dma_attr, &iwk_dma_accattr, 1065 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1066 &sc->sc_dma_fw_data_bak); 1067 dma_p = &sc->sc_dma_fw_data_bak; 1068 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx " 1069 "size:%lx]\n", 1070 dma_p->ncookies, dma_p->cookie.dmac_address, 1071 dma_p->cookie.dmac_size)); 1072 if (err != DDI_SUCCESS) { 1073 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1074 " data bakeup dma memory"); 1075 goto fail; 1076 } 1077 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1078 1079 t += LE_32(sc->sc_hdr->datasz); 1080 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz), 1081 &fw_dma_attr, &iwk_dma_accattr, 1082 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1083 &sc->sc_dma_fw_init_text); 1084 dma_p = &sc->sc_dma_fw_init_text; 1085 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx " 1086 "size:%lx]\n", 1087 dma_p->ncookies, dma_p->cookie.dmac_address, 1088 dma_p->cookie.dmac_size)); 1089 if (err != DDI_SUCCESS) { 1090 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1091 "init text dma memory"); 1092 goto fail; 1093 } 1094 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz)); 1095 1096 t += LE_32(sc->sc_hdr->init_textsz); 1097 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz), 1098 &fw_dma_attr, &iwk_dma_accattr, 1099 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1100 &sc->sc_dma_fw_init_data); 1101 dma_p = &sc->sc_dma_fw_init_data; 1102 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx " 1103 "size:%lx]\n", 1104 dma_p->ncookies, dma_p->cookie.dmac_address, 1105 dma_p->cookie.dmac_size)); 1106 if (err != DDI_SUCCESS) { 1107 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1108 "init data dma memory"); 1109 goto fail; 1110 } 1111 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz)); 1112 1113 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz); 1114 fail: 1115 return (err); 1116 } 1117 1118 static void 1119 iwk_free_fw_dma(iwk_sc_t *sc) 1120 { 1121 iwk_free_dma_mem(&sc->sc_dma_fw_text); 1122 iwk_free_dma_mem(&sc->sc_dma_fw_data); 1123 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak); 1124 iwk_free_dma_mem(&sc->sc_dma_fw_init_text); 1125 iwk_free_dma_mem(&sc->sc_dma_fw_init_data); 1126 } 1127 1128 /* 1129 * Allocate a shared page between host and NIC. 1130 */ 1131 static int 1132 iwk_alloc_shared(iwk_sc_t *sc) 1133 { 1134 iwk_dma_t *dma_p; 1135 int err = DDI_SUCCESS; 1136 1137 /* must be aligned on a 4K-page boundary */ 1138 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t), 1139 &sh_dma_attr, &iwk_dma_accattr, 1140 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1141 &sc->sc_dma_sh); 1142 if (err != DDI_SUCCESS) 1143 goto fail; 1144 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va; 1145 1146 dma_p = &sc->sc_dma_sh; 1147 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n", 1148 dma_p->ncookies, dma_p->cookie.dmac_address, 1149 dma_p->cookie.dmac_size)); 1150 1151 return (err); 1152 fail: 1153 iwk_free_shared(sc); 1154 return (err); 1155 } 1156 1157 static void 1158 iwk_free_shared(iwk_sc_t *sc) 1159 { 1160 iwk_free_dma_mem(&sc->sc_dma_sh); 1161 } 1162 1163 /* 1164 * Allocate a keep warm page. 1165 */ 1166 static int 1167 iwk_alloc_kw(iwk_sc_t *sc) 1168 { 1169 iwk_dma_t *dma_p; 1170 int err = DDI_SUCCESS; 1171 1172 /* must be aligned on a 4K-page boundary */ 1173 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE, 1174 &kw_dma_attr, &iwk_dma_accattr, 1175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1176 &sc->sc_dma_kw); 1177 if (err != DDI_SUCCESS) 1178 goto fail; 1179 1180 dma_p = &sc->sc_dma_kw; 1181 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n", 1182 dma_p->ncookies, dma_p->cookie.dmac_address, 1183 dma_p->cookie.dmac_size)); 1184 1185 return (err); 1186 fail: 1187 iwk_free_kw(sc); 1188 return (err); 1189 } 1190 1191 static void 1192 iwk_free_kw(iwk_sc_t *sc) 1193 { 1194 iwk_free_dma_mem(&sc->sc_dma_kw); 1195 } 1196 1197 static int 1198 iwk_alloc_rx_ring(iwk_sc_t *sc) 1199 { 1200 iwk_rx_ring_t *ring; 1201 iwk_rx_data_t *data; 1202 iwk_dma_t *dma_p; 1203 int i, err = DDI_SUCCESS; 1204 1205 ring = &sc->sc_rxq; 1206 ring->cur = 0; 1207 1208 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t), 1209 &ring_desc_dma_attr, &iwk_dma_accattr, 1210 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1211 &ring->dma_desc); 1212 if (err != DDI_SUCCESS) { 1213 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n"); 1214 goto fail; 1215 } 1216 ring->desc = (uint32_t *)ring->dma_desc.mem_va; 1217 dma_p = &ring->dma_desc; 1218 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n", 1219 dma_p->ncookies, dma_p->cookie.dmac_address, 1220 dma_p->cookie.dmac_size)); 1221 1222 /* 1223 * Allocate Rx buffers. 1224 */ 1225 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1226 data = &ring->data[i]; 1227 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1228 &rx_buffer_dma_attr, &iwk_dma_accattr, 1229 DDI_DMA_READ | DDI_DMA_STREAMING, 1230 &data->dma_data); 1231 if (err != DDI_SUCCESS) { 1232 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] " 1233 "failed\n", i); 1234 goto fail; 1235 } 1236 /* 1237 * the physical address bit [8-36] are used, 1238 * instead of bit [0-31] in 3945. 1239 */ 1240 ring->desc[i] = LE_32((uint32_t) 1241 (data->dma_data.cookie.dmac_address >> 8)); 1242 } 1243 dma_p = &ring->data[0].dma_data; 1244 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx " 1245 "size:%lx]\n", 1246 dma_p->ncookies, dma_p->cookie.dmac_address, 1247 dma_p->cookie.dmac_size)); 1248 1249 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1250 1251 return (err); 1252 1253 fail: 1254 iwk_free_rx_ring(sc); 1255 return (err); 1256 } 1257 1258 static void 1259 iwk_reset_rx_ring(iwk_sc_t *sc) 1260 { 1261 int n; 1262 1263 iwk_mac_access_enter(sc); 1264 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 1265 for (n = 0; n < 2000; n++) { 1266 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) 1267 break; 1268 DELAY(1000); 1269 } 1270 1271 if (n == 2000) 1272 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n")); 1273 1274 iwk_mac_access_exit(sc); 1275 1276 sc->sc_rxq.cur = 0; 1277 } 1278 1279 static void 1280 iwk_free_rx_ring(iwk_sc_t *sc) 1281 { 1282 int i; 1283 1284 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1285 if (sc->sc_rxq.data[i].dma_data.dma_hdl) 1286 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data, 1287 DDI_DMA_SYNC_FORCPU); 1288 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data); 1289 } 1290 1291 if (sc->sc_rxq.dma_desc.dma_hdl) 1292 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV); 1293 iwk_free_dma_mem(&sc->sc_rxq.dma_desc); 1294 } 1295 1296 static int 1297 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring, 1298 int slots, int qid) 1299 { 1300 iwk_tx_data_t *data; 1301 iwk_tx_desc_t *desc_h; 1302 uint32_t paddr_desc_h; 1303 iwk_cmd_t *cmd_h; 1304 uint32_t paddr_cmd_h; 1305 iwk_dma_t *dma_p; 1306 int i, err = DDI_SUCCESS; 1307 1308 ring->qid = qid; 1309 ring->count = TFD_QUEUE_SIZE_MAX; 1310 ring->window = slots; 1311 ring->queued = 0; 1312 ring->cur = 0; 1313 1314 err = iwk_alloc_dma_mem(sc, 1315 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t), 1316 &ring_desc_dma_attr, &iwk_dma_accattr, 1317 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1318 &ring->dma_desc); 1319 if (err != DDI_SUCCESS) { 1320 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] " 1321 "failed\n", qid); 1322 goto fail; 1323 } 1324 dma_p = &ring->dma_desc; 1325 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n", 1326 dma_p->ncookies, dma_p->cookie.dmac_address, 1327 dma_p->cookie.dmac_size)); 1328 1329 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va; 1330 paddr_desc_h = ring->dma_desc.cookie.dmac_address; 1331 1332 err = iwk_alloc_dma_mem(sc, 1333 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t), 1334 &cmd_dma_attr, &iwk_dma_accattr, 1335 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1336 &ring->dma_cmd); 1337 if (err != DDI_SUCCESS) { 1338 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] " 1339 "failed\n", qid); 1340 goto fail; 1341 } 1342 dma_p = &ring->dma_cmd; 1343 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n", 1344 dma_p->ncookies, dma_p->cookie.dmac_address, 1345 dma_p->cookie.dmac_size)); 1346 1347 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va; 1348 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address; 1349 1350 /* 1351 * Allocate Tx buffers. 1352 */ 1353 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX, 1354 KM_NOSLEEP); 1355 if (ring->data == NULL) { 1356 cmn_err(CE_WARN, "could not allocate tx data slots\n"); 1357 goto fail; 1358 } 1359 1360 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) { 1361 data = &ring->data[i]; 1362 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1363 &tx_buffer_dma_attr, &iwk_dma_accattr, 1364 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1365 &data->dma_data); 1366 if (err != DDI_SUCCESS) { 1367 cmn_err(CE_WARN, "dma alloc tx ring " 1368 "buf[%d] failed\n", i); 1369 goto fail; 1370 } 1371 1372 data->desc = desc_h + i; 1373 data->paddr_desc = paddr_desc_h + 1374 _PTRDIFF(data->desc, desc_h); 1375 data->cmd = cmd_h + i; /* (i % slots); */ 1376 /* ((i % slots) * sizeof (iwk_cmd_t)); */ 1377 data->paddr_cmd = paddr_cmd_h + 1378 _PTRDIFF(data->cmd, cmd_h); 1379 } 1380 dma_p = &ring->data[0].dma_data; 1381 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx " 1382 "size:%lx]\n", 1383 dma_p->ncookies, dma_p->cookie.dmac_address, 1384 dma_p->cookie.dmac_size)); 1385 1386 return (err); 1387 1388 fail: 1389 if (ring->data) 1390 kmem_free(ring->data, 1391 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX); 1392 iwk_free_tx_ring(sc, ring); 1393 return (err); 1394 } 1395 1396 static void 1397 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1398 { 1399 iwk_tx_data_t *data; 1400 int i, n; 1401 1402 iwk_mac_access_enter(sc); 1403 1404 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0); 1405 for (n = 0; n < 200; n++) { 1406 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) & 1407 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) 1408 break; 1409 DELAY(10); 1410 } 1411 if (n == 200) { 1412 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n", 1413 ring->qid)); 1414 } 1415 iwk_mac_access_exit(sc); 1416 1417 for (i = 0; i < ring->count; i++) { 1418 data = &ring->data[i]; 1419 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 1420 } 1421 1422 ring->queued = 0; 1423 ring->cur = 0; 1424 } 1425 1426 /*ARGSUSED*/ 1427 static void 1428 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1429 { 1430 int i; 1431 1432 if (ring->dma_desc.dma_hdl != NULL) 1433 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1434 iwk_free_dma_mem(&ring->dma_desc); 1435 1436 if (ring->dma_cmd.dma_hdl != NULL) 1437 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV); 1438 iwk_free_dma_mem(&ring->dma_cmd); 1439 1440 if (ring->data != NULL) { 1441 for (i = 0; i < ring->count; i++) { 1442 if (ring->data[i].dma_data.dma_hdl) 1443 IWK_DMA_SYNC(ring->data[i].dma_data, 1444 DDI_DMA_SYNC_FORDEV); 1445 iwk_free_dma_mem(&ring->data[i].dma_data); 1446 } 1447 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t)); 1448 } 1449 } 1450 1451 static int 1452 iwk_ring_init(iwk_sc_t *sc) 1453 { 1454 int i, err = DDI_SUCCESS; 1455 1456 for (i = 0; i < IWK_NUM_QUEUES; i++) { 1457 if (i == IWK_CMD_QUEUE_NUM) 1458 continue; 1459 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS, 1460 i); 1461 if (err != DDI_SUCCESS) 1462 goto fail; 1463 } 1464 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM], 1465 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM); 1466 if (err != DDI_SUCCESS) 1467 goto fail; 1468 err = iwk_alloc_rx_ring(sc); 1469 if (err != DDI_SUCCESS) 1470 goto fail; 1471 return (err); 1472 1473 fail: 1474 return (err); 1475 } 1476 1477 static void 1478 iwk_ring_free(iwk_sc_t *sc) 1479 { 1480 int i = IWK_NUM_QUEUES; 1481 1482 iwk_free_rx_ring(sc); 1483 while (--i >= 0) { 1484 iwk_free_tx_ring(sc, &sc->sc_txq[i]); 1485 } 1486 } 1487 1488 /* ARGSUSED */ 1489 static ieee80211_node_t * 1490 iwk_node_alloc(ieee80211com_t *ic) 1491 { 1492 iwk_amrr_t *amrr; 1493 1494 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP); 1495 if (amrr != NULL) 1496 iwk_amrr_init(amrr); 1497 return (&amrr->in); 1498 } 1499 1500 static void 1501 iwk_node_free(ieee80211_node_t *in) 1502 { 1503 ieee80211com_t *ic = in->in_ic; 1504 1505 ic->ic_node_cleanup(in); 1506 if (in->in_wpa_ie != NULL) 1507 ieee80211_free(in->in_wpa_ie); 1508 kmem_free(in, sizeof (iwk_amrr_t)); 1509 } 1510 1511 /*ARGSUSED*/ 1512 static int 1513 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1514 { 1515 iwk_sc_t *sc = (iwk_sc_t *)ic; 1516 ieee80211_node_t *in = ic->ic_bss; 1517 enum ieee80211_state ostate = ic->ic_state; 1518 int i, err = IWK_SUCCESS; 1519 1520 mutex_enter(&sc->sc_glock); 1521 switch (nstate) { 1522 case IEEE80211_S_SCAN: 1523 switch (ostate) { 1524 case IEEE80211_S_INIT: 1525 { 1526 iwk_add_sta_t node; 1527 1528 sc->sc_flags |= IWK_F_SCANNING; 1529 iwk_set_led(sc, 2, 10, 2); 1530 1531 /* 1532 * clear association to receive beacons from 1533 * all BSS'es 1534 */ 1535 sc->sc_config.assoc_id = 0; 1536 sc->sc_config.filter_flags &= 1537 ~LE_32(RXON_FILTER_ASSOC_MSK); 1538 1539 IWK_DBG((IWK_DEBUG_80211, "config chan %d " 1540 "flags %x filter_flags %x\n", sc->sc_config.chan, 1541 sc->sc_config.flags, sc->sc_config.filter_flags)); 1542 1543 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1544 sizeof (iwk_rxon_cmd_t), 1); 1545 if (err != IWK_SUCCESS) { 1546 cmn_err(CE_WARN, 1547 "could not clear association\n"); 1548 sc->sc_flags &= ~IWK_F_SCANNING; 1549 mutex_exit(&sc->sc_glock); 1550 return (err); 1551 } 1552 1553 /* add broadcast node to send probe request */ 1554 (void) memset(&node, 0, sizeof (node)); 1555 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN); 1556 node.id = IWK_BROADCAST_ID; 1557 err = iwk_cmd(sc, REPLY_ADD_STA, &node, 1558 sizeof (node), 1); 1559 if (err != IWK_SUCCESS) { 1560 cmn_err(CE_WARN, "could not add " 1561 "broadcast node\n"); 1562 sc->sc_flags &= ~IWK_F_SCANNING; 1563 mutex_exit(&sc->sc_glock); 1564 return (err); 1565 } 1566 break; 1567 } 1568 case IEEE80211_S_SCAN: 1569 mutex_exit(&sc->sc_glock); 1570 /* step to next channel before actual FW scan */ 1571 err = sc->sc_newstate(ic, nstate, arg); 1572 mutex_enter(&sc->sc_glock); 1573 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) { 1574 cmn_err(CE_WARN, 1575 "could not initiate scan\n"); 1576 sc->sc_flags &= ~IWK_F_SCANNING; 1577 ieee80211_cancel_scan(ic); 1578 } 1579 mutex_exit(&sc->sc_glock); 1580 return (err); 1581 default: 1582 break; 1583 1584 } 1585 sc->sc_clk = 0; 1586 break; 1587 1588 case IEEE80211_S_AUTH: 1589 if (ostate == IEEE80211_S_SCAN) { 1590 sc->sc_flags &= ~IWK_F_SCANNING; 1591 } 1592 1593 /* reset state to handle reassociations correctly */ 1594 sc->sc_config.assoc_id = 0; 1595 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 1596 1597 /* 1598 * before sending authentication and association request frame, 1599 * we need do something in the hardware, such as setting the 1600 * channel same to the target AP... 1601 */ 1602 if ((err = iwk_hw_set_before_auth(sc)) != 0) { 1603 cmn_err(CE_WARN, "could not setup firmware for " 1604 "authentication\n"); 1605 mutex_exit(&sc->sc_glock); 1606 return (err); 1607 } 1608 break; 1609 1610 case IEEE80211_S_RUN: 1611 if (ostate == IEEE80211_S_SCAN) { 1612 sc->sc_flags &= ~IWK_F_SCANNING; 1613 } 1614 1615 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 1616 /* let LED blink when monitoring */ 1617 iwk_set_led(sc, 2, 10, 10); 1618 break; 1619 } 1620 IWK_DBG((IWK_DEBUG_80211, "iwk: associated.")); 1621 1622 /* none IBSS mode */ 1623 if (ic->ic_opmode != IEEE80211_M_IBSS) { 1624 /* update adapter's configuration */ 1625 if (sc->sc_assoc_id != in->in_associd) { 1626 cmn_err(CE_WARN, 1627 "associate ID mismatch: expected %d, " 1628 "got %d\n", 1629 in->in_associd, sc->sc_assoc_id); 1630 } 1631 sc->sc_config.assoc_id = in->in_associd & 0x3fff; 1632 /* 1633 * short preamble/slot time are 1634 * negotiated when associating 1635 */ 1636 sc->sc_config.flags &= 1637 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 1638 RXON_FLG_SHORT_SLOT_MSK); 1639 1640 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1641 sc->sc_config.flags |= 1642 LE_32(RXON_FLG_SHORT_SLOT_MSK); 1643 1644 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 1645 sc->sc_config.flags |= 1646 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 1647 1648 sc->sc_config.filter_flags |= 1649 LE_32(RXON_FILTER_ASSOC_MSK); 1650 1651 if (ic->ic_opmode != IEEE80211_M_STA) 1652 sc->sc_config.filter_flags |= 1653 LE_32(RXON_FILTER_BCON_AWARE_MSK); 1654 1655 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x" 1656 " filter_flags %x\n", 1657 sc->sc_config.chan, sc->sc_config.flags, 1658 sc->sc_config.filter_flags)); 1659 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1660 sizeof (iwk_rxon_cmd_t), 1); 1661 if (err != IWK_SUCCESS) { 1662 cmn_err(CE_WARN, "could not update " 1663 "configuration\n"); 1664 mutex_exit(&sc->sc_glock); 1665 return (err); 1666 } 1667 } 1668 1669 /* obtain current temperature of chipset */ 1670 sc->sc_tempera = iwk_curr_tempera(sc); 1671 1672 /* 1673 * make Tx power calibration to determine 1674 * the gains of DSP and radio 1675 */ 1676 err = iwk_tx_power_calibration(sc); 1677 if (err) { 1678 cmn_err(CE_WARN, "iwk_newstate(): " 1679 "failed to set tx power table\n"); 1680 return (err); 1681 } 1682 1683 /* start automatic rate control */ 1684 mutex_enter(&sc->sc_mt_lock); 1685 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1686 sc->sc_flags |= IWK_F_RATE_AUTO_CTL; 1687 /* set rate to some reasonable initial value */ 1688 i = in->in_rates.ir_nrates - 1; 1689 while (i > 0 && IEEE80211_RATE(i) > 72) 1690 i--; 1691 in->in_txrate = i; 1692 } else { 1693 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 1694 } 1695 mutex_exit(&sc->sc_mt_lock); 1696 1697 /* set LED on after associated */ 1698 iwk_set_led(sc, 2, 0, 1); 1699 break; 1700 1701 case IEEE80211_S_INIT: 1702 if (ostate == IEEE80211_S_SCAN) { 1703 sc->sc_flags &= ~IWK_F_SCANNING; 1704 } 1705 1706 /* set LED off after init */ 1707 iwk_set_led(sc, 2, 1, 0); 1708 break; 1709 case IEEE80211_S_ASSOC: 1710 if (ostate == IEEE80211_S_SCAN) { 1711 sc->sc_flags &= ~IWK_F_SCANNING; 1712 } 1713 1714 break; 1715 } 1716 1717 mutex_exit(&sc->sc_glock); 1718 1719 err = sc->sc_newstate(ic, nstate, arg); 1720 1721 if (nstate == IEEE80211_S_RUN) { 1722 1723 mutex_enter(&sc->sc_glock); 1724 1725 /* 1726 * make initialization for Receiver 1727 * sensitivity calibration 1728 */ 1729 err = iwk_rx_sens_init(sc); 1730 if (err) { 1731 cmn_err(CE_WARN, "iwk_newstate(): " 1732 "failed to init RX sensitivity\n"); 1733 mutex_exit(&sc->sc_glock); 1734 return (err); 1735 } 1736 1737 /* make initialization for Receiver gain balance */ 1738 err = iwk_rxgain_diff_init(sc); 1739 if (err) { 1740 cmn_err(CE_WARN, "iwk_newstate(): " 1741 "failed to init phy calibration\n"); 1742 mutex_exit(&sc->sc_glock); 1743 return (err); 1744 } 1745 1746 mutex_exit(&sc->sc_glock); 1747 1748 } 1749 1750 return (err); 1751 } 1752 1753 /*ARGSUSED*/ 1754 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k, 1755 const uint8_t mac[IEEE80211_ADDR_LEN]) 1756 { 1757 iwk_sc_t *sc = (iwk_sc_t *)ic; 1758 iwk_add_sta_t node; 1759 int err; 1760 1761 switch (k->wk_cipher->ic_cipher) { 1762 case IEEE80211_CIPHER_WEP: 1763 case IEEE80211_CIPHER_TKIP: 1764 return (1); /* sofeware do it. */ 1765 case IEEE80211_CIPHER_AES_CCM: 1766 break; 1767 default: 1768 return (0); 1769 } 1770 sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK | 1771 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 1772 1773 mutex_enter(&sc->sc_glock); 1774 1775 /* update ap/multicast node */ 1776 (void) memset(&node, 0, sizeof (node)); 1777 if (IEEE80211_IS_MULTICAST(mac)) { 1778 (void) memset(node.bssid, 0xff, 6); 1779 node.id = IWK_BROADCAST_ID; 1780 } else { 1781 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid); 1782 node.id = IWK_AP_ID; 1783 } 1784 if (k->wk_flags & IEEE80211_KEY_XMIT) { 1785 node.key_flags = 0; 1786 node.keyp = k->wk_keyix; 1787 } else { 1788 node.key_flags = (1 << 14); 1789 node.keyp = k->wk_keyix + 4; 1790 } 1791 (void) memcpy(node.key, k->wk_key, k->wk_keylen); 1792 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8)); 1793 node.sta_mask = STA_MODIFY_KEY_MASK; 1794 node.control = 1; 1795 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 1796 if (err != IWK_SUCCESS) { 1797 cmn_err(CE_WARN, "iwk_key_set():" 1798 "failed to update ap node\n"); 1799 mutex_exit(&sc->sc_glock); 1800 return (0); 1801 } 1802 mutex_exit(&sc->sc_glock); 1803 return (1); 1804 } 1805 1806 /* 1807 * exclusive access to mac begin. 1808 */ 1809 static void 1810 iwk_mac_access_enter(iwk_sc_t *sc) 1811 { 1812 uint32_t tmp; 1813 int n; 1814 1815 tmp = IWK_READ(sc, CSR_GP_CNTRL); 1816 IWK_WRITE(sc, CSR_GP_CNTRL, 1817 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1818 1819 /* wait until we succeed */ 1820 for (n = 0; n < 1000; n++) { 1821 if ((IWK_READ(sc, CSR_GP_CNTRL) & 1822 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1823 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) == 1824 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) 1825 break; 1826 DELAY(10); 1827 } 1828 if (n == 1000) 1829 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n")); 1830 } 1831 1832 /* 1833 * exclusive access to mac end. 1834 */ 1835 static void 1836 iwk_mac_access_exit(iwk_sc_t *sc) 1837 { 1838 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL); 1839 IWK_WRITE(sc, CSR_GP_CNTRL, 1840 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1841 } 1842 1843 static uint32_t 1844 iwk_mem_read(iwk_sc_t *sc, uint32_t addr) 1845 { 1846 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr); 1847 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT)); 1848 } 1849 1850 static void 1851 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1852 { 1853 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr); 1854 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data); 1855 } 1856 1857 static uint32_t 1858 iwk_reg_read(iwk_sc_t *sc, uint32_t addr) 1859 { 1860 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); 1861 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT)); 1862 } 1863 1864 static void 1865 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1866 { 1867 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24)); 1868 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data); 1869 } 1870 1871 static void 1872 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr, 1873 uint32_t *data, int wlen) 1874 { 1875 for (; wlen > 0; wlen--, data++, addr += 4) 1876 iwk_reg_write(sc, addr, *data); 1877 } 1878 1879 1880 /* 1881 * ucode load/initialization steps: 1882 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image. 1883 * BSM contains a small memory that *always* stays powered up, so it can 1884 * retain the bootstrap program even when the card is in a power-saving 1885 * power-down state. The BSM loads the small program into ARC processor's 1886 * instruction memory when triggered by power-up. 1887 * 2) load Initialize image via bootstrap program. 1888 * The Initialize image sets up regulatory and calibration data for the 1889 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed. 1890 * The 4965 reply contains calibration data for temperature, voltage and tx gain 1891 * correction. 1892 */ 1893 static int 1894 iwk_load_firmware(iwk_sc_t *sc) 1895 { 1896 uint32_t *boot_fw = (uint32_t *)sc->sc_boot; 1897 uint32_t size = sc->sc_hdr->bootsz; 1898 int n, err = IWK_SUCCESS; 1899 1900 /* 1901 * The physical address bit [4-35] of the initialize uCode. 1902 * In the initialize alive notify interrupt the physical address of 1903 * the runtime ucode will be set for loading. 1904 */ 1905 iwk_mac_access_enter(sc); 1906 1907 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 1908 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4); 1909 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 1910 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4); 1911 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 1912 sc->sc_dma_fw_init_text.cookie.dmac_size); 1913 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 1914 sc->sc_dma_fw_init_data.cookie.dmac_size); 1915 1916 /* load bootstrap code into BSM memory */ 1917 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw, 1918 size / sizeof (uint32_t)); 1919 1920 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0); 1921 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); 1922 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t)); 1923 1924 /* 1925 * prepare to load initialize uCode 1926 */ 1927 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 1928 1929 /* wait while the adapter is busy loading the firmware */ 1930 for (n = 0; n < 1000; n++) { 1931 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) & 1932 BSM_WR_CTRL_REG_BIT_START)) 1933 break; 1934 DELAY(10); 1935 } 1936 if (n == 1000) { 1937 cmn_err(CE_WARN, "timeout transferring firmware\n"); 1938 err = ETIMEDOUT; 1939 return (err); 1940 } 1941 1942 /* for future power-save mode use */ 1943 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 1944 1945 iwk_mac_access_exit(sc); 1946 1947 return (err); 1948 } 1949 1950 /*ARGSUSED*/ 1951 static void 1952 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 1953 { 1954 ieee80211com_t *ic = &sc->sc_ic; 1955 iwk_rx_ring_t *ring = &sc->sc_rxq; 1956 iwk_rx_phy_res_t *stat; 1957 ieee80211_node_t *in; 1958 uint32_t *tail; 1959 struct ieee80211_frame *wh; 1960 mblk_t *mp; 1961 uint16_t len, rssi, mrssi, agc; 1962 int16_t t; 1963 uint32_t ants, i; 1964 struct iwk_rx_non_cfg_phy *phyinfo; 1965 1966 /* assuming not 11n here. cope with 11n in phase-II */ 1967 stat = (iwk_rx_phy_res_t *)(desc + 1); 1968 if (stat->cfg_phy_cnt > 20) { 1969 return; 1970 } 1971 1972 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy; 1973 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS; 1974 mrssi = 0; 1975 ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >> 1976 RX_PHY_FLAGS_ANTENNAE_OFFSET; 1977 for (i = 0; i < 3; i++) { 1978 if (ants & (1 << i)) 1979 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]); 1980 } 1981 t = mrssi - agc - 44; /* t is the dBM value */ 1982 /* 1983 * convert dBm to percentage ??? 1984 */ 1985 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) / 1986 (75 * 75); 1987 if (rssi > 100) 1988 rssi = 100; 1989 if (rssi < 1) 1990 rssi = 1; 1991 len = stat->byte_count; 1992 tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len); 1993 1994 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d " 1995 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x " 1996 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat), 1997 len, stat->rate.r.s.rate, stat->channel, 1998 LE_32(stat->timestampl), stat->non_cfg_phy_cnt, 1999 stat->cfg_phy_cnt, LE_32(*tail))); 2000 2001 if ((len < 16) || (len > sc->sc_dmabuf_sz)) { 2002 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n")); 2003 return; 2004 } 2005 2006 /* 2007 * discard Rx frames with bad CRC 2008 */ 2009 if ((LE_32(*tail) & 2010 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) != 2011 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) { 2012 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n", 2013 LE_32(*tail))); 2014 sc->sc_rx_err++; 2015 return; 2016 } 2017 2018 wh = (struct ieee80211_frame *) 2019 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt); 2020 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) { 2021 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2); 2022 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n", 2023 sc->sc_assoc_id)); 2024 } 2025 #ifdef DEBUG 2026 if (iwk_dbg_flags & IWK_DEBUG_RX) 2027 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0); 2028 #endif 2029 in = ieee80211_find_rxnode(ic, wh); 2030 mp = allocb(len, BPRI_MED); 2031 if (mp) { 2032 (void) memcpy(mp->b_wptr, wh, len); 2033 mp->b_wptr += len; 2034 2035 /* send the frame to the 802.11 layer */ 2036 (void) ieee80211_input(ic, mp, in, rssi, 0); 2037 } else { 2038 sc->sc_rx_nobuf++; 2039 IWK_DBG((IWK_DEBUG_RX, 2040 "iwk_rx_intr(): alloc rx buf failed\n")); 2041 } 2042 /* release node reference */ 2043 ieee80211_free_node(in); 2044 } 2045 2046 /*ARGSUSED*/ 2047 static void 2048 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 2049 { 2050 ieee80211com_t *ic = &sc->sc_ic; 2051 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3]; 2052 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1); 2053 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss; 2054 2055 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d" 2056 " retries=%d frame_count=%x nkill=%d " 2057 "rate=%x duration=%d status=%x\n", 2058 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count, 2059 stat->bt_kill_count, stat->rate.r.s.rate, 2060 LE_32(stat->duration), LE_32(stat->status))); 2061 2062 amrr->txcnt++; 2063 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt)); 2064 if (stat->ntries > 0) { 2065 amrr->retrycnt++; 2066 sc->sc_tx_retries++; 2067 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n", 2068 sc->sc_tx_retries)); 2069 } 2070 2071 sc->sc_tx_timer = 0; 2072 2073 mutex_enter(&sc->sc_tx_lock); 2074 ring->queued--; 2075 if (ring->queued < 0) 2076 ring->queued = 0; 2077 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) { 2078 sc->sc_need_reschedule = 0; 2079 mutex_exit(&sc->sc_tx_lock); 2080 mac_tx_update(ic->ic_mach); 2081 mutex_enter(&sc->sc_tx_lock); 2082 } 2083 mutex_exit(&sc->sc_tx_lock); 2084 } 2085 2086 static void 2087 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2088 { 2089 if ((desc->hdr.qid & 7) != 4) { 2090 return; 2091 } 2092 mutex_enter(&sc->sc_glock); 2093 sc->sc_flags |= IWK_F_CMD_DONE; 2094 cv_signal(&sc->sc_cmd_cv); 2095 mutex_exit(&sc->sc_glock); 2096 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: " 2097 "qid=%x idx=%d flags=%x type=0x%x\n", 2098 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags, 2099 desc->hdr.type)); 2100 } 2101 2102 static void 2103 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2104 { 2105 uint32_t base, i; 2106 struct iwk_alive_resp *ar = 2107 (struct iwk_alive_resp *)(desc + 1); 2108 2109 /* the microcontroller is ready */ 2110 IWK_DBG((IWK_DEBUG_FW, 2111 "microcode alive notification minor: %x major: %x type:" 2112 " %x subtype: %x\n", 2113 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype)); 2114 2115 if (LE_32(ar->is_valid) != UCODE_VALID_OK) { 2116 IWK_DBG((IWK_DEBUG_FW, 2117 "microcontroller initialization failed\n")); 2118 } 2119 if (ar->ver_subtype == INITIALIZE_SUBTYPE) { 2120 IWK_DBG((IWK_DEBUG_FW, 2121 "initialization alive received.\n")); 2122 (void) memcpy(&sc->sc_card_alive_init, ar, 2123 sizeof (struct iwk_init_alive_resp)); 2124 /* XXX get temperature */ 2125 iwk_mac_access_enter(sc); 2126 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 2127 sc->sc_dma_fw_text.cookie.dmac_address >> 4); 2128 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 2129 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4); 2130 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 2131 sc->sc_dma_fw_data.cookie.dmac_size); 2132 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 2133 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000); 2134 iwk_mac_access_exit(sc); 2135 } else { 2136 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n")); 2137 (void) memcpy(&sc->sc_card_alive_run, ar, 2138 sizeof (struct iwk_alive_resp)); 2139 2140 /* 2141 * Init SCD related registers to make Tx work. XXX 2142 */ 2143 iwk_mac_access_enter(sc); 2144 2145 /* read sram address of data base */ 2146 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR); 2147 2148 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */ 2149 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0; 2150 i < 128; i += 4) 2151 iwk_mem_write(sc, base + i, 0); 2152 2153 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */ 2154 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET; 2155 i < 256; i += 4) 2156 iwk_mem_write(sc, base + i, 0); 2157 2158 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */ 2159 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET; 2160 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4) 2161 iwk_mem_write(sc, base + i, 0); 2162 2163 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR, 2164 sc->sc_dma_sh.cookie.dmac_address >> 10); 2165 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0); 2166 2167 /* initiate the tx queues */ 2168 for (i = 0; i < IWK_NUM_QUEUES; i++) { 2169 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0); 2170 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8)); 2171 iwk_mem_write(sc, sc->sc_scd_base + 2172 SCD_CONTEXT_QUEUE_OFFSET(i), 2173 (SCD_WIN_SIZE & 0x7f)); 2174 iwk_mem_write(sc, sc->sc_scd_base + 2175 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t), 2176 (SCD_FRAME_LIMIT & 0x7f) << 16); 2177 } 2178 /* interrupt enable on each queue0-7 */ 2179 iwk_reg_write(sc, SCD_INTERRUPT_MASK, 2180 (1 << IWK_NUM_QUEUES) - 1); 2181 /* enable each channel 0-7 */ 2182 iwk_reg_write(sc, SCD_TXFACT, 2183 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 2184 /* 2185 * queue 0-7 maps to FIFO 0-7 and 2186 * all queues work under FIFO mode (none-scheduler-ack) 2187 */ 2188 for (i = 0; i < 7; i++) { 2189 iwk_reg_write(sc, 2190 SCD_QUEUE_STATUS_BITS(i), 2191 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2192 (i << SCD_QUEUE_STTS_REG_POS_TXF)| 2193 SCD_QUEUE_STTS_REG_MSK); 2194 } 2195 iwk_mac_access_exit(sc); 2196 2197 sc->sc_flags |= IWK_F_FW_INIT; 2198 cv_signal(&sc->sc_fw_cv); 2199 } 2200 2201 } 2202 2203 static uint_t 2204 /* LINTED: argument unused in function: unused */ 2205 iwk_rx_softintr(caddr_t arg, caddr_t unused) 2206 { 2207 iwk_sc_t *sc = (iwk_sc_t *)arg; 2208 ieee80211com_t *ic = &sc->sc_ic; 2209 iwk_rx_desc_t *desc; 2210 iwk_rx_data_t *data; 2211 uint32_t index; 2212 2213 mutex_enter(&sc->sc_glock); 2214 if (sc->sc_rx_softint_pending != 1) { 2215 mutex_exit(&sc->sc_glock); 2216 return (DDI_INTR_UNCLAIMED); 2217 } 2218 /* disable interrupts */ 2219 IWK_WRITE(sc, CSR_INT_MASK, 0); 2220 mutex_exit(&sc->sc_glock); 2221 2222 /* 2223 * firmware has moved the index of the rx queue, driver get it, 2224 * and deal with it. 2225 */ 2226 index = LE_32(sc->sc_shared->val0) & 0xfff; 2227 2228 while (sc->sc_rxq.cur != index) { 2229 data = &sc->sc_rxq.data[sc->sc_rxq.cur]; 2230 desc = (iwk_rx_desc_t *)data->dma_data.mem_va; 2231 2232 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d" 2233 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n", 2234 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx, 2235 desc->hdr.flags, desc->hdr.type, LE_32(desc->len))); 2236 2237 /* a command other than a tx need to be replied */ 2238 if (!(desc->hdr.qid & 0x80) && 2239 (desc->hdr.type != REPLY_RX_PHY_CMD) && 2240 (desc->hdr.type != REPLY_TX) && 2241 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) && 2242 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) && 2243 (desc->hdr.type != SENSITIVITY_CMD)) 2244 iwk_cmd_intr(sc, desc); 2245 2246 switch (desc->hdr.type) { 2247 case REPLY_4965_RX: 2248 iwk_rx_intr(sc, desc, data); 2249 break; 2250 2251 case REPLY_TX: 2252 iwk_tx_intr(sc, desc, data); 2253 break; 2254 2255 case REPLY_ALIVE: 2256 iwk_ucode_alive(sc, desc); 2257 break; 2258 2259 case CARD_STATE_NOTIFICATION: 2260 { 2261 uint32_t *status = (uint32_t *)(desc + 1); 2262 2263 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n", 2264 LE_32(*status))); 2265 2266 if (LE_32(*status) & 1) { 2267 /* 2268 * the radio button has to be pushed(OFF). It 2269 * is considered as a hw error, the 2270 * iwk_thread() tries to recover it after the 2271 * button is pushed again(ON) 2272 */ 2273 cmn_err(CE_NOTE, 2274 "iwk_rx_softintr(): " 2275 "Radio transmitter is off\n"); 2276 sc->sc_ostate = sc->sc_ic.ic_state; 2277 ieee80211_new_state(&sc->sc_ic, 2278 IEEE80211_S_INIT, -1); 2279 sc->sc_flags |= 2280 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF); 2281 } 2282 break; 2283 } 2284 case SCAN_START_NOTIFICATION: 2285 { 2286 iwk_start_scan_t *scan = 2287 (iwk_start_scan_t *)(desc + 1); 2288 2289 IWK_DBG((IWK_DEBUG_SCAN, 2290 "scanning channel %d status %x\n", 2291 scan->chan, LE_32(scan->status))); 2292 2293 ic->ic_curchan = &ic->ic_sup_channels[scan->chan]; 2294 break; 2295 } 2296 case SCAN_COMPLETE_NOTIFICATION: 2297 { 2298 iwk_stop_scan_t *scan = 2299 (iwk_stop_scan_t *)(desc + 1); 2300 2301 IWK_DBG((IWK_DEBUG_SCAN, 2302 "completed channel %d (burst of %d) status %02x\n", 2303 scan->chan, scan->nchan, scan->status)); 2304 2305 sc->sc_scan_pending++; 2306 break; 2307 } 2308 case STATISTICS_NOTIFICATION: 2309 /* handle statistics notification */ 2310 iwk_statistics_notify(sc, desc); 2311 break; 2312 } 2313 2314 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE; 2315 } 2316 2317 /* 2318 * driver dealt with what reveived in rx queue and tell the information 2319 * to the firmware. 2320 */ 2321 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1; 2322 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7)); 2323 2324 mutex_enter(&sc->sc_glock); 2325 /* re-enable interrupts */ 2326 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2327 sc->sc_rx_softint_pending = 0; 2328 mutex_exit(&sc->sc_glock); 2329 2330 return (DDI_INTR_CLAIMED); 2331 } 2332 2333 static uint_t 2334 /* LINTED: argument unused in function: unused */ 2335 iwk_intr(caddr_t arg, caddr_t unused) 2336 { 2337 iwk_sc_t *sc = (iwk_sc_t *)arg; 2338 uint32_t r, rfh; 2339 2340 mutex_enter(&sc->sc_glock); 2341 2342 if (sc->sc_flags & IWK_F_SUSPEND) { 2343 mutex_exit(&sc->sc_glock); 2344 return (DDI_INTR_UNCLAIMED); 2345 } 2346 2347 r = IWK_READ(sc, CSR_INT); 2348 if (r == 0 || r == 0xffffffff) { 2349 mutex_exit(&sc->sc_glock); 2350 return (DDI_INTR_UNCLAIMED); 2351 } 2352 2353 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r)); 2354 2355 rfh = IWK_READ(sc, CSR_FH_INT_STATUS); 2356 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh)); 2357 /* disable interrupts */ 2358 IWK_WRITE(sc, CSR_INT_MASK, 0); 2359 /* ack interrupts */ 2360 IWK_WRITE(sc, CSR_INT, r); 2361 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh); 2362 2363 if (sc->sc_soft_hdl == NULL) { 2364 mutex_exit(&sc->sc_glock); 2365 return (DDI_INTR_CLAIMED); 2366 } 2367 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) { 2368 cmn_err(CE_WARN, "fatal firmware error\n"); 2369 mutex_exit(&sc->sc_glock); 2370 #ifdef DEBUG 2371 /* dump event and error logs to dmesg */ 2372 iwk_write_error_log(sc); 2373 iwk_write_event_log(sc); 2374 #endif /* DEBUG */ 2375 iwk_stop(sc); 2376 sc->sc_ostate = sc->sc_ic.ic_state; 2377 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 2378 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2379 return (DDI_INTR_CLAIMED); 2380 } 2381 2382 if (r & BIT_INT_RF_KILL) { 2383 IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n")); 2384 } 2385 2386 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) || 2387 (rfh & FH_INT_RX_MASK)) { 2388 sc->sc_rx_softint_pending = 1; 2389 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL); 2390 } 2391 2392 if (r & BIT_INT_ALIVE) { 2393 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n")); 2394 } 2395 2396 /* re-enable interrupts */ 2397 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2398 mutex_exit(&sc->sc_glock); 2399 2400 return (DDI_INTR_CLAIMED); 2401 } 2402 2403 static uint8_t 2404 iwk_rate_to_plcp(int rate) 2405 { 2406 uint8_t ret; 2407 2408 switch (rate) { 2409 /* CCK rates */ 2410 case 2: 2411 ret = 0xa; 2412 break; 2413 case 4: 2414 ret = 0x14; 2415 break; 2416 case 11: 2417 ret = 0x37; 2418 break; 2419 case 22: 2420 ret = 0x6e; 2421 break; 2422 /* OFDM rates */ 2423 case 12: 2424 ret = 0xd; 2425 break; 2426 case 18: 2427 ret = 0xf; 2428 break; 2429 case 24: 2430 ret = 0x5; 2431 break; 2432 case 36: 2433 ret = 0x7; 2434 break; 2435 case 48: 2436 ret = 0x9; 2437 break; 2438 case 72: 2439 ret = 0xb; 2440 break; 2441 case 96: 2442 ret = 0x1; 2443 break; 2444 case 108: 2445 ret = 0x3; 2446 break; 2447 default: 2448 ret = 0; 2449 break; 2450 } 2451 return (ret); 2452 } 2453 2454 static mblk_t * 2455 iwk_m_tx(void *arg, mblk_t *mp) 2456 { 2457 iwk_sc_t *sc = (iwk_sc_t *)arg; 2458 ieee80211com_t *ic = &sc->sc_ic; 2459 mblk_t *next; 2460 2461 if (sc->sc_flags & IWK_F_SUSPEND) { 2462 freemsgchain(mp); 2463 return (NULL); 2464 } 2465 2466 if (ic->ic_state != IEEE80211_S_RUN) { 2467 freemsgchain(mp); 2468 return (NULL); 2469 } 2470 2471 while (mp != NULL) { 2472 next = mp->b_next; 2473 mp->b_next = NULL; 2474 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) { 2475 mp->b_next = next; 2476 break; 2477 } 2478 mp = next; 2479 } 2480 return (mp); 2481 } 2482 2483 /* ARGSUSED */ 2484 static int 2485 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 2486 { 2487 iwk_sc_t *sc = (iwk_sc_t *)ic; 2488 iwk_tx_ring_t *ring; 2489 iwk_tx_desc_t *desc; 2490 iwk_tx_data_t *data; 2491 iwk_cmd_t *cmd; 2492 iwk_tx_cmd_t *tx; 2493 ieee80211_node_t *in; 2494 struct ieee80211_frame *wh; 2495 struct ieee80211_key *k = NULL; 2496 mblk_t *m, *m0; 2497 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS; 2498 uint16_t masks = 0; 2499 2500 ring = &sc->sc_txq[0]; 2501 data = &ring->data[ring->cur]; 2502 desc = data->desc; 2503 cmd = data->cmd; 2504 bzero(desc, sizeof (*desc)); 2505 bzero(cmd, sizeof (*cmd)); 2506 2507 mutex_enter(&sc->sc_tx_lock); 2508 if (sc->sc_flags & IWK_F_SUSPEND) { 2509 mutex_exit(&sc->sc_tx_lock); 2510 if ((type & IEEE80211_FC0_TYPE_MASK) != 2511 IEEE80211_FC0_TYPE_DATA) { 2512 freemsg(mp); 2513 } 2514 err = IWK_FAIL; 2515 goto exit; 2516 } 2517 2518 if (ring->queued > ring->count - 64) { 2519 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n")); 2520 sc->sc_need_reschedule = 1; 2521 mutex_exit(&sc->sc_tx_lock); 2522 if ((type & IEEE80211_FC0_TYPE_MASK) != 2523 IEEE80211_FC0_TYPE_DATA) { 2524 freemsg(mp); 2525 } 2526 sc->sc_tx_nobuf++; 2527 err = IWK_FAIL; 2528 goto exit; 2529 } 2530 mutex_exit(&sc->sc_tx_lock); 2531 2532 hdrlen = sizeof (struct ieee80211_frame); 2533 2534 m = allocb(msgdsize(mp) + 32, BPRI_MED); 2535 if (m == NULL) { /* can not alloc buf, drop this package */ 2536 cmn_err(CE_WARN, 2537 "iwk_send(): failed to allocate msgbuf\n"); 2538 freemsg(mp); 2539 err = IWK_SUCCESS; 2540 goto exit; 2541 } 2542 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) { 2543 mblen = MBLKL(m0); 2544 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen); 2545 off += mblen; 2546 } 2547 m->b_wptr += off; 2548 freemsg(mp); 2549 2550 wh = (struct ieee80211_frame *)m->b_rptr; 2551 2552 in = ieee80211_find_txnode(ic, wh->i_addr1); 2553 if (in == NULL) { 2554 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n"); 2555 freemsg(m); 2556 sc->sc_tx_err++; 2557 err = IWK_SUCCESS; 2558 goto exit; 2559 } 2560 (void) ieee80211_encap(ic, m, in); 2561 2562 cmd->hdr.type = REPLY_TX; 2563 cmd->hdr.flags = 0; 2564 cmd->hdr.qid = ring->qid; 2565 cmd->hdr.idx = ring->cur; 2566 2567 tx = (iwk_tx_cmd_t *)cmd->data; 2568 tx->tx_flags = 0; 2569 2570 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2571 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK)); 2572 } else { 2573 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2574 } 2575 2576 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2577 k = ieee80211_crypto_encap(ic, m); 2578 if (k == NULL) { 2579 freemsg(m); 2580 sc->sc_tx_err++; 2581 err = IWK_SUCCESS; 2582 goto exit; 2583 } 2584 2585 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 2586 tx->sec_ctl = 2; /* for CCMP */ 2587 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2588 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen); 2589 } 2590 2591 /* packet header may have moved, reset our local pointer */ 2592 wh = (struct ieee80211_frame *)m->b_rptr; 2593 } 2594 2595 len = msgdsize(m); 2596 2597 #ifdef DEBUG 2598 if (iwk_dbg_flags & IWK_DEBUG_TX) 2599 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0); 2600 #endif 2601 2602 /* pickup a rate */ 2603 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2604 IEEE80211_FC0_TYPE_MGT) { 2605 /* mgmt frames are sent at 1M */ 2606 rate = in->in_rates.ir_rates[0]; 2607 } else { 2608 /* 2609 * do it here for the software way rate control. 2610 * later for rate scaling in hardware. 2611 * maybe like the following, for management frame: 2612 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; 2613 * for data frame: 2614 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK)); 2615 * rate = in->in_rates.ir_rates[in->in_txrate]; 2616 * tx->initial_rate_index = 1; 2617 * 2618 * now the txrate is determined in tx cmd flags, set to the 2619 * max value 54M for 11g and 11M for 11b. 2620 */ 2621 2622 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { 2623 rate = ic->ic_fixed_rate; 2624 } else { 2625 rate = in->in_rates.ir_rates[in->in_txrate]; 2626 } 2627 } 2628 rate &= IEEE80211_RATE_VAL; 2629 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x", 2630 in->in_txrate, in->in_rates.ir_nrates, rate)); 2631 2632 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK)); 2633 2634 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4); 2635 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen)) 2636 tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 2637 2638 /* retrieve destination node's id */ 2639 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2640 tx->sta_id = IWK_BROADCAST_ID; 2641 } else { 2642 if (ic->ic_opmode != IEEE80211_M_IBSS) 2643 tx->sta_id = IWK_AP_ID; 2644 } 2645 2646 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2647 IEEE80211_FC0_TYPE_MGT) { 2648 /* tell h/w to set timestamp in probe responses */ 2649 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2650 IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2651 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK); 2652 2653 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2654 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) || 2655 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2656 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) 2657 tx->timeout.pm_frame_timeout = 3; 2658 else 2659 tx->timeout.pm_frame_timeout = 2; 2660 } else 2661 tx->timeout.pm_frame_timeout = 0; 2662 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 2663 masks |= RATE_MCS_CCK_MSK; 2664 2665 masks |= RATE_MCS_ANT_B_MSK; 2666 tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks); 2667 2668 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x", 2669 tx->tx_flags)); 2670 2671 tx->rts_retry_limit = 60; 2672 tx->data_retry_limit = 15; 2673 2674 tx->stop_time.life_time = LE_32(0xffffffff); 2675 2676 tx->len = LE_16(len); 2677 2678 tx->dram_lsb_ptr = 2679 data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch); 2680 tx->dram_msb_ptr = 0; 2681 tx->driver_txop = 0; 2682 tx->next_frame_len = 0; 2683 2684 (void) memcpy(tx + 1, m->b_rptr, hdrlen); 2685 m->b_rptr += hdrlen; 2686 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen); 2687 2688 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d", 2689 ring->qid, ring->cur, len)); 2690 2691 /* 2692 * first segment includes the tx cmd plus the 802.11 header, 2693 * the second includes the remaining of the 802.11 frame. 2694 */ 2695 desc->val0 = LE_32(2 << 24); 2696 desc->pa[0].tb1_addr = LE_32(data->paddr_cmd); 2697 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) | 2698 ((data->dma_data.cookie.dmac_address & 0xffff) << 16); 2699 desc->pa[0].val2 = 2700 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) | 2701 ((len - hdrlen) << 20); 2702 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x " 2703 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x", 2704 data->paddr_cmd, data->dma_data.cookie.dmac_address, 2705 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2)); 2706 2707 mutex_enter(&sc->sc_tx_lock); 2708 ring->queued++; 2709 mutex_exit(&sc->sc_tx_lock); 2710 2711 /* kick ring */ 2712 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2713 tfd_offset[ring->cur].val = 8 + len; 2714 if (ring->cur < IWK_MAX_WIN_SIZE) { 2715 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2716 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len; 2717 } 2718 2719 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 2720 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 2721 2722 ring->cur = (ring->cur + 1) % ring->count; 2723 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2724 freemsg(m); 2725 /* release node reference */ 2726 ieee80211_free_node(in); 2727 2728 ic->ic_stats.is_tx_bytes += len; 2729 ic->ic_stats.is_tx_frags++; 2730 2731 if (sc->sc_tx_timer == 0) 2732 sc->sc_tx_timer = 10; 2733 exit: 2734 return (err); 2735 } 2736 2737 static void 2738 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp) 2739 { 2740 iwk_sc_t *sc = (iwk_sc_t *)arg; 2741 ieee80211com_t *ic = &sc->sc_ic; 2742 int err; 2743 2744 err = ieee80211_ioctl(ic, wq, mp); 2745 2746 if (err == ENETRESET) { 2747 /* 2748 * This is special for the hidden AP connection. 2749 * In any case, we should make sure only one 'scan' 2750 * in the driver for a 'connect' CLI command. So 2751 * when connecting to a hidden AP, the scan is just 2752 * sent out to the air when we know the desired 2753 * essid of the AP we want to connect. 2754 */ 2755 if (ic->ic_des_esslen) { 2756 if (sc->sc_flags & IWK_F_RUNNING) { 2757 iwk_m_stop(sc); 2758 (void) iwk_m_start(sc); 2759 (void) ieee80211_new_state(ic, 2760 IEEE80211_S_SCAN, -1); 2761 } 2762 } 2763 } 2764 } 2765 2766 /* 2767 * callback functions for set/get properties 2768 */ 2769 static int 2770 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2771 uint_t pr_flags, uint_t wldp_length, void *wldp_buf) 2772 { 2773 int err = 0; 2774 iwk_sc_t *sc = (iwk_sc_t *)arg; 2775 2776 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num, 2777 pr_flags, wldp_length, wldp_buf); 2778 2779 return (err); 2780 } 2781 static int 2782 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 2783 uint_t wldp_length, const void *wldp_buf) 2784 { 2785 int err; 2786 iwk_sc_t *sc = (iwk_sc_t *)arg; 2787 ieee80211com_t *ic = &sc->sc_ic; 2788 2789 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length, 2790 wldp_buf); 2791 2792 if (err == ENETRESET) { 2793 if (ic->ic_des_esslen) { 2794 if (sc->sc_flags & IWK_F_RUNNING) { 2795 iwk_m_stop(sc); 2796 (void) iwk_m_start(sc); 2797 (void) ieee80211_new_state(ic, 2798 IEEE80211_S_SCAN, -1); 2799 } 2800 } 2801 err = 0; 2802 } 2803 2804 return (err); 2805 } 2806 2807 /*ARGSUSED*/ 2808 static int 2809 iwk_m_stat(void *arg, uint_t stat, uint64_t *val) 2810 { 2811 iwk_sc_t *sc = (iwk_sc_t *)arg; 2812 ieee80211com_t *ic = &sc->sc_ic; 2813 ieee80211_node_t *in = ic->ic_bss; 2814 struct ieee80211_rateset *rs = &in->in_rates; 2815 2816 mutex_enter(&sc->sc_glock); 2817 switch (stat) { 2818 case MAC_STAT_IFSPEED: 2819 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ? 2820 (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) 2821 : ic->ic_fixed_rate) /2 * 1000000; 2822 break; 2823 case MAC_STAT_NOXMTBUF: 2824 *val = sc->sc_tx_nobuf; 2825 break; 2826 case MAC_STAT_NORCVBUF: 2827 *val = sc->sc_rx_nobuf; 2828 break; 2829 case MAC_STAT_IERRORS: 2830 *val = sc->sc_rx_err; 2831 break; 2832 case MAC_STAT_RBYTES: 2833 *val = ic->ic_stats.is_rx_bytes; 2834 break; 2835 case MAC_STAT_IPACKETS: 2836 *val = ic->ic_stats.is_rx_frags; 2837 break; 2838 case MAC_STAT_OBYTES: 2839 *val = ic->ic_stats.is_tx_bytes; 2840 break; 2841 case MAC_STAT_OPACKETS: 2842 *val = ic->ic_stats.is_tx_frags; 2843 break; 2844 case MAC_STAT_OERRORS: 2845 case WIFI_STAT_TX_FAILED: 2846 *val = sc->sc_tx_err; 2847 break; 2848 case WIFI_STAT_TX_RETRANS: 2849 *val = sc->sc_tx_retries; 2850 break; 2851 case WIFI_STAT_FCS_ERRORS: 2852 case WIFI_STAT_WEP_ERRORS: 2853 case WIFI_STAT_TX_FRAGS: 2854 case WIFI_STAT_MCAST_TX: 2855 case WIFI_STAT_RTS_SUCCESS: 2856 case WIFI_STAT_RTS_FAILURE: 2857 case WIFI_STAT_ACK_FAILURE: 2858 case WIFI_STAT_RX_FRAGS: 2859 case WIFI_STAT_MCAST_RX: 2860 case WIFI_STAT_RX_DUPS: 2861 mutex_exit(&sc->sc_glock); 2862 return (ieee80211_stat(ic, stat, val)); 2863 default: 2864 mutex_exit(&sc->sc_glock); 2865 return (ENOTSUP); 2866 } 2867 mutex_exit(&sc->sc_glock); 2868 2869 return (IWK_SUCCESS); 2870 2871 } 2872 2873 static int 2874 iwk_m_start(void *arg) 2875 { 2876 iwk_sc_t *sc = (iwk_sc_t *)arg; 2877 ieee80211com_t *ic = &sc->sc_ic; 2878 int err; 2879 2880 err = iwk_init(sc); 2881 2882 if (err != IWK_SUCCESS) { 2883 /* 2884 * The hw init err(eg. RF is OFF). Return Success to make 2885 * the 'plumb' succeed. The iwk_thread() tries to re-init 2886 * background. 2887 */ 2888 cmn_err(CE_WARN, "iwk_m_start(): failed to initialize " 2889 "hardware\n"); 2890 mutex_enter(&sc->sc_glock); 2891 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2892 mutex_exit(&sc->sc_glock); 2893 return (IWK_SUCCESS); 2894 } 2895 2896 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2897 2898 mutex_enter(&sc->sc_glock); 2899 sc->sc_flags |= IWK_F_RUNNING; 2900 mutex_exit(&sc->sc_glock); 2901 2902 return (IWK_SUCCESS); 2903 } 2904 2905 static void 2906 iwk_m_stop(void *arg) 2907 { 2908 iwk_sc_t *sc = (iwk_sc_t *)arg; 2909 ieee80211com_t *ic = &sc->sc_ic; 2910 2911 iwk_stop(sc); 2912 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 2913 mutex_enter(&sc->sc_mt_lock); 2914 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 2915 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 2916 mutex_exit(&sc->sc_mt_lock); 2917 mutex_enter(&sc->sc_glock); 2918 sc->sc_flags &= ~IWK_F_RUNNING; 2919 sc->sc_flags &= ~IWK_F_SCANNING; 2920 mutex_exit(&sc->sc_glock); 2921 } 2922 2923 /*ARGSUSED*/ 2924 static int 2925 iwk_m_unicst(void *arg, const uint8_t *macaddr) 2926 { 2927 iwk_sc_t *sc = (iwk_sc_t *)arg; 2928 ieee80211com_t *ic = &sc->sc_ic; 2929 int err; 2930 2931 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) { 2932 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); 2933 mutex_enter(&sc->sc_glock); 2934 err = iwk_config(sc); 2935 mutex_exit(&sc->sc_glock); 2936 if (err != IWK_SUCCESS) { 2937 cmn_err(CE_WARN, 2938 "iwk_m_unicst(): " 2939 "failed to configure device\n"); 2940 goto fail; 2941 } 2942 } 2943 return (IWK_SUCCESS); 2944 fail: 2945 return (err); 2946 } 2947 2948 /*ARGSUSED*/ 2949 static int 2950 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m) 2951 { 2952 return (IWK_SUCCESS); 2953 } 2954 2955 /*ARGSUSED*/ 2956 static int 2957 iwk_m_promisc(void *arg, boolean_t on) 2958 { 2959 return (IWK_SUCCESS); 2960 } 2961 2962 static void 2963 iwk_thread(iwk_sc_t *sc) 2964 { 2965 ieee80211com_t *ic = &sc->sc_ic; 2966 clock_t clk; 2967 int times = 0, err, n = 0, timeout = 0; 2968 uint32_t tmp; 2969 2970 mutex_enter(&sc->sc_mt_lock); 2971 while (sc->sc_mf_thread_switch) { 2972 tmp = IWK_READ(sc, CSR_GP_CNTRL); 2973 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) { 2974 sc->sc_flags &= ~IWK_F_RADIO_OFF; 2975 } else { 2976 sc->sc_flags |= IWK_F_RADIO_OFF; 2977 } 2978 /* 2979 * If in SUSPEND or the RF is OFF, do nothing 2980 */ 2981 if ((sc->sc_flags & IWK_F_SUSPEND) || 2982 (sc->sc_flags & IWK_F_RADIO_OFF)) { 2983 mutex_exit(&sc->sc_mt_lock); 2984 delay(drv_usectohz(100000)); 2985 mutex_enter(&sc->sc_mt_lock); 2986 continue; 2987 } 2988 2989 /* 2990 * recovery fatal error 2991 */ 2992 if (ic->ic_mach && 2993 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) { 2994 2995 IWK_DBG((IWK_DEBUG_FW, 2996 "iwk_thread(): " 2997 "try to recover fatal hw error: %d\n", times++)); 2998 2999 iwk_stop(sc); 3000 3001 mutex_exit(&sc->sc_mt_lock); 3002 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3003 delay(drv_usectohz(2000000 + n*500000)); 3004 mutex_enter(&sc->sc_mt_lock); 3005 3006 err = iwk_init(sc); 3007 if (err != IWK_SUCCESS) { 3008 n++; 3009 if (n < 20) 3010 continue; 3011 } 3012 n = 0; 3013 if (!err) 3014 sc->sc_flags |= IWK_F_RUNNING; 3015 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 3016 mutex_exit(&sc->sc_mt_lock); 3017 delay(drv_usectohz(2000000)); 3018 if (sc->sc_ostate != IEEE80211_S_INIT) 3019 ieee80211_new_state(ic, IEEE80211_S_SCAN, 0); 3020 mutex_enter(&sc->sc_mt_lock); 3021 } 3022 3023 if (ic->ic_mach && 3024 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) { 3025 3026 IWK_DBG((IWK_DEBUG_SCAN, 3027 "iwk_thread(): " 3028 "wait for probe response\n")); 3029 3030 sc->sc_scan_pending--; 3031 mutex_exit(&sc->sc_mt_lock); 3032 delay(drv_usectohz(200000)); 3033 ieee80211_next_scan(ic); 3034 mutex_enter(&sc->sc_mt_lock); 3035 } 3036 3037 /* 3038 * rate ctl 3039 */ 3040 if (ic->ic_mach && 3041 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) { 3042 clk = ddi_get_lbolt(); 3043 if (clk > sc->sc_clk + drv_usectohz(500000)) { 3044 iwk_amrr_timeout(sc); 3045 } 3046 } 3047 3048 mutex_exit(&sc->sc_mt_lock); 3049 delay(drv_usectohz(100000)); 3050 mutex_enter(&sc->sc_mt_lock); 3051 3052 if (sc->sc_tx_timer) { 3053 timeout++; 3054 if (timeout == 10) { 3055 sc->sc_tx_timer--; 3056 if (sc->sc_tx_timer == 0) { 3057 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 3058 sc->sc_ostate = IEEE80211_S_RUN; 3059 IWK_DBG((IWK_DEBUG_FW, 3060 "iwk_thread(): try to recover from" 3061 " 'send fail\n")); 3062 } 3063 timeout = 0; 3064 } 3065 } 3066 3067 } 3068 sc->sc_mf_thread = NULL; 3069 cv_signal(&sc->sc_mt_cv); 3070 mutex_exit(&sc->sc_mt_lock); 3071 } 3072 3073 3074 /* 3075 * Send a command to the firmware. 3076 */ 3077 static int 3078 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async) 3079 { 3080 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3081 iwk_tx_desc_t *desc; 3082 iwk_cmd_t *cmd; 3083 clock_t clk; 3084 3085 ASSERT(size <= sizeof (cmd->data)); 3086 ASSERT(mutex_owned(&sc->sc_glock)); 3087 3088 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code)); 3089 desc = ring->data[ring->cur].desc; 3090 cmd = ring->data[ring->cur].cmd; 3091 3092 cmd->hdr.type = (uint8_t)code; 3093 cmd->hdr.flags = 0; 3094 cmd->hdr.qid = ring->qid; 3095 cmd->hdr.idx = ring->cur; 3096 (void) memcpy(cmd->data, buf, size); 3097 (void) memset(desc, 0, sizeof (*desc)); 3098 3099 desc->val0 = LE_32(1 << 24); 3100 desc->pa[0].tb1_addr = 3101 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff); 3102 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0; 3103 3104 /* kick cmd ring XXX */ 3105 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3106 tfd_offset[ring->cur].val = 8; 3107 if (ring->cur < IWK_MAX_WIN_SIZE) { 3108 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3109 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3110 } 3111 ring->cur = (ring->cur + 1) % ring->count; 3112 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3113 3114 if (async) 3115 return (IWK_SUCCESS); 3116 else { 3117 sc->sc_flags &= ~IWK_F_CMD_DONE; 3118 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3119 while (!(sc->sc_flags & IWK_F_CMD_DONE)) { 3120 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) < 3121 0) 3122 break; 3123 } 3124 if (sc->sc_flags & IWK_F_CMD_DONE) 3125 return (IWK_SUCCESS); 3126 else 3127 return (IWK_FAIL); 3128 } 3129 } 3130 3131 static void 3132 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on) 3133 { 3134 iwk_led_cmd_t led; 3135 3136 led.interval = LE_32(100000); /* unit: 100ms */ 3137 led.id = id; 3138 led.off = off; 3139 led.on = on; 3140 3141 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1); 3142 } 3143 3144 static int 3145 iwk_hw_set_before_auth(iwk_sc_t *sc) 3146 { 3147 ieee80211com_t *ic = &sc->sc_ic; 3148 ieee80211_node_t *in = ic->ic_bss; 3149 iwk_add_sta_t node; 3150 iwk_link_quality_cmd_t link_quality; 3151 struct ieee80211_rateset rs; 3152 uint16_t masks = 0, rate; 3153 int i, err; 3154 3155 /* update adapter's configuration according the info of target AP */ 3156 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 3157 sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan); 3158 if (ic->ic_curmode == IEEE80211_MODE_11B) { 3159 sc->sc_config.cck_basic_rates = 0x03; 3160 sc->sc_config.ofdm_basic_rates = 0; 3161 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 3162 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 3163 sc->sc_config.cck_basic_rates = 0; 3164 sc->sc_config.ofdm_basic_rates = 0x15; 3165 } else { /* assume 802.11b/g */ 3166 sc->sc_config.cck_basic_rates = 0x0f; 3167 sc->sc_config.ofdm_basic_rates = 0xff; 3168 } 3169 3170 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 3171 RXON_FLG_SHORT_SLOT_MSK); 3172 3173 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3174 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK); 3175 else 3176 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK); 3177 3178 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3179 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3180 else 3181 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK); 3182 3183 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x " 3184 "filter_flags %x cck %x ofdm %x" 3185 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n", 3186 sc->sc_config.chan, sc->sc_config.flags, 3187 sc->sc_config.filter_flags, 3188 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates, 3189 sc->sc_config.bssid[0], sc->sc_config.bssid[1], 3190 sc->sc_config.bssid[2], sc->sc_config.bssid[3], 3191 sc->sc_config.bssid[4], sc->sc_config.bssid[5])); 3192 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3193 sizeof (iwk_rxon_cmd_t), 1); 3194 if (err != IWK_SUCCESS) { 3195 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3196 " failed to config chan%d\n", 3197 sc->sc_config.chan); 3198 return (err); 3199 } 3200 3201 /* obtain current temperature of chipset */ 3202 sc->sc_tempera = iwk_curr_tempera(sc); 3203 3204 /* make Tx power calibration to determine the gains of DSP and radio */ 3205 err = iwk_tx_power_calibration(sc); 3206 if (err) { 3207 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3208 "failed to set tx power table\n"); 3209 return (err); 3210 } 3211 3212 /* add default AP node */ 3213 (void) memset(&node, 0, sizeof (node)); 3214 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid); 3215 node.id = IWK_AP_ID; 3216 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 3217 if (err != IWK_SUCCESS) { 3218 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3219 "failed to add BSS node\n"); 3220 return (err); 3221 } 3222 3223 /* TX_LINK_QUALITY cmd ? */ 3224 (void) memset(&link_quality, 0, sizeof (link_quality)); 3225 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)]; 3226 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3227 if (i < rs.ir_nrates) 3228 rate = rs.ir_rates[rs.ir_nrates - i]; 3229 else 3230 rate = 2; 3231 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 3232 masks |= RATE_MCS_CCK_MSK; 3233 masks |= RATE_MCS_ANT_B_MSK; 3234 masks &= ~RATE_MCS_ANT_A_MSK; 3235 link_quality.rate_n_flags[i] = 3236 iwk_rate_to_plcp(rate) | masks; 3237 } 3238 3239 link_quality.general_params.single_stream_ant_msk = 2; 3240 link_quality.general_params.dual_stream_ant_msk = 3; 3241 link_quality.agg_params.agg_dis_start_th = 3; 3242 link_quality.agg_params.agg_time_limit = LE_16(4000); 3243 link_quality.sta_id = IWK_AP_ID; 3244 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3245 sizeof (link_quality), 1); 3246 if (err != IWK_SUCCESS) { 3247 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3248 "failed to config link quality table\n"); 3249 return (err); 3250 } 3251 3252 return (IWK_SUCCESS); 3253 } 3254 3255 /* 3256 * Send a scan request(assembly scan cmd) to the firmware. 3257 */ 3258 static int 3259 iwk_scan(iwk_sc_t *sc) 3260 { 3261 ieee80211com_t *ic = &sc->sc_ic; 3262 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3263 iwk_tx_desc_t *desc; 3264 iwk_tx_data_t *data; 3265 iwk_cmd_t *cmd; 3266 iwk_scan_hdr_t *hdr; 3267 iwk_scan_chan_t *chan; 3268 struct ieee80211_frame *wh; 3269 ieee80211_node_t *in = ic->ic_bss; 3270 uint8_t essid[IEEE80211_NWID_LEN+1]; 3271 struct ieee80211_rateset *rs; 3272 enum ieee80211_phymode mode; 3273 uint8_t *frm; 3274 int i, pktlen, nrates; 3275 3276 data = &ring->data[ring->cur]; 3277 desc = data->desc; 3278 cmd = (iwk_cmd_t *)data->dma_data.mem_va; 3279 3280 cmd->hdr.type = REPLY_SCAN_CMD; 3281 cmd->hdr.flags = 0; 3282 cmd->hdr.qid = ring->qid; 3283 cmd->hdr.idx = ring->cur | 0x40; 3284 3285 hdr = (iwk_scan_hdr_t *)cmd->data; 3286 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t)); 3287 hdr->nchan = 1; 3288 hdr->quiet_time = LE_16(50); 3289 hdr->quiet_plcp_th = LE_16(1); 3290 3291 hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 3292 hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3293 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3294 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3295 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3296 3297 hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 3298 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID; 3299 hdr->tx_cmd.stop_time.life_time = 0xffffffff; 3300 hdr->tx_cmd.tx_flags |= (0x200); 3301 hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2); 3302 hdr->tx_cmd.rate.r.rate_n_flags |= 3303 (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); 3304 hdr->direct_scan[0].len = ic->ic_des_esslen; 3305 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID; 3306 3307 if (ic->ic_des_esslen) { 3308 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen); 3309 essid[ic->ic_des_esslen] = '\0'; 3310 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid)); 3311 3312 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid, 3313 ic->ic_des_esslen); 3314 } else { 3315 bzero(hdr->direct_scan[0].ssid, 3316 sizeof (hdr->direct_scan[0].ssid)); 3317 } 3318 /* 3319 * a probe request frame is required after the REPLY_SCAN_CMD 3320 */ 3321 wh = (struct ieee80211_frame *)(hdr + 1); 3322 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3323 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3324 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3325 (void) memset(wh->i_addr1, 0xff, 6); 3326 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr); 3327 (void) memset(wh->i_addr3, 0xff, 6); 3328 *(uint16_t *)&wh->i_dur[0] = 0; 3329 *(uint16_t *)&wh->i_seq[0] = 0; 3330 3331 frm = (uint8_t *)(wh + 1); 3332 3333 /* essid IE */ 3334 if (in->in_esslen) { 3335 bcopy(in->in_essid, essid, in->in_esslen); 3336 essid[in->in_esslen] = '\0'; 3337 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n", 3338 essid)); 3339 } 3340 *frm++ = IEEE80211_ELEMID_SSID; 3341 *frm++ = in->in_esslen; 3342 (void) memcpy(frm, in->in_essid, in->in_esslen); 3343 frm += in->in_esslen; 3344 3345 mode = ieee80211_chan2mode(ic, ic->ic_curchan); 3346 rs = &ic->ic_sup_rates[mode]; 3347 3348 /* supported rates IE */ 3349 *frm++ = IEEE80211_ELEMID_RATES; 3350 nrates = rs->ir_nrates; 3351 if (nrates > IEEE80211_RATE_SIZE) 3352 nrates = IEEE80211_RATE_SIZE; 3353 *frm++ = (uint8_t)nrates; 3354 (void) memcpy(frm, rs->ir_rates, nrates); 3355 frm += nrates; 3356 3357 /* supported xrates IE */ 3358 if (rs->ir_nrates > IEEE80211_RATE_SIZE) { 3359 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE; 3360 *frm++ = IEEE80211_ELEMID_XRATES; 3361 *frm++ = (uint8_t)nrates; 3362 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates); 3363 frm += nrates; 3364 } 3365 3366 /* optionnal IE (usually for wpa) */ 3367 if (ic->ic_opt_ie != NULL) { 3368 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len); 3369 frm += ic->ic_opt_ie_len; 3370 } 3371 3372 /* setup length of probe request */ 3373 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh)); 3374 hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) + 3375 hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t); 3376 3377 /* 3378 * the attribute of the scan channels are required after the probe 3379 * request frame. 3380 */ 3381 chan = (iwk_scan_chan_t *)frm; 3382 for (i = 1; i <= hdr->nchan; i++, chan++) { 3383 if (ic->ic_des_esslen) { 3384 chan->type = 3; 3385 } else { 3386 chan->type = 1; 3387 } 3388 3389 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3390 chan->tpc.tx_gain = 0x3f; 3391 chan->tpc.dsp_atten = 110; 3392 chan->active_dwell = LE_16(50); 3393 chan->passive_dwell = LE_16(120); 3394 3395 frm += sizeof (iwk_scan_chan_t); 3396 } 3397 3398 pktlen = _PTRDIFF(frm, cmd); 3399 3400 (void) memset(desc, 0, sizeof (*desc)); 3401 desc->val0 = LE_32(1 << 24); 3402 desc->pa[0].tb1_addr = 3403 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff); 3404 desc->pa[0].val1 = (pktlen << 4) & 0xfff0; 3405 3406 /* 3407 * maybe for cmd, filling the byte cnt table is not necessary. 3408 * anyway, we fill it here. 3409 */ 3410 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3411 tfd_offset[ring->cur].val = 8; 3412 if (ring->cur < IWK_MAX_WIN_SIZE) { 3413 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3414 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3415 } 3416 3417 /* kick cmd ring */ 3418 ring->cur = (ring->cur + 1) % ring->count; 3419 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3420 3421 return (IWK_SUCCESS); 3422 } 3423 3424 static int 3425 iwk_config(iwk_sc_t *sc) 3426 { 3427 ieee80211com_t *ic = &sc->sc_ic; 3428 iwk_powertable_cmd_t powertable; 3429 iwk_bt_cmd_t bt; 3430 iwk_add_sta_t node; 3431 iwk_link_quality_cmd_t link_quality; 3432 int i, err; 3433 uint16_t masks = 0; 3434 3435 /* 3436 * set power mode. Disable power management at present, do it later 3437 */ 3438 (void) memset(&powertable, 0, sizeof (powertable)); 3439 powertable.flags = LE_16(0x8); 3440 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable, 3441 sizeof (powertable), 0); 3442 if (err != IWK_SUCCESS) { 3443 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n"); 3444 return (err); 3445 } 3446 3447 /* configure bt coexistence */ 3448 (void) memset(&bt, 0, sizeof (bt)); 3449 bt.flags = 3; 3450 bt.lead_time = 0xaa; 3451 bt.max_kill = 1; 3452 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt, 3453 sizeof (bt), 0); 3454 if (err != IWK_SUCCESS) { 3455 cmn_err(CE_WARN, 3456 "iwk_config(): " 3457 "failed to configurate bt coexistence\n"); 3458 return (err); 3459 } 3460 3461 /* configure rxon */ 3462 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t)); 3463 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 3464 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 3465 sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3466 sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | 3467 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK); 3468 sc->sc_config.flags &= (~RXON_FLG_CCK_MSK); 3469 switch (ic->ic_opmode) { 3470 case IEEE80211_M_STA: 3471 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 3472 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3473 RXON_FILTER_DIS_DECRYPT_MSK | 3474 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3475 break; 3476 case IEEE80211_M_AHDEMO: 3477 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 3478 sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3479 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3480 RXON_FILTER_DIS_DECRYPT_MSK | 3481 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3482 break; 3483 case IEEE80211_M_HOSTAP: 3484 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 3485 break; 3486 case IEEE80211_M_MONITOR: 3487 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 3488 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3489 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); 3490 break; 3491 } 3492 sc->sc_config.cck_basic_rates = 0x0f; 3493 sc->sc_config.ofdm_basic_rates = 0xff; 3494 3495 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff; 3496 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff; 3497 3498 /* set antenna */ 3499 3500 sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3501 LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) | 3502 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3503 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3504 3505 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3506 sizeof (iwk_rxon_cmd_t), 0); 3507 if (err != IWK_SUCCESS) { 3508 cmn_err(CE_WARN, "iwk_config(): " 3509 "failed to set configure command\n"); 3510 return (err); 3511 } 3512 /* obtain current temperature of chipset */ 3513 sc->sc_tempera = iwk_curr_tempera(sc); 3514 3515 /* make Tx power calibration to determine the gains of DSP and radio */ 3516 err = iwk_tx_power_calibration(sc); 3517 if (err) { 3518 cmn_err(CE_WARN, "iwk_config(): " 3519 "failed to set tx power table\n"); 3520 return (err); 3521 } 3522 3523 /* add broadcast node so that we can send broadcast frame */ 3524 (void) memset(&node, 0, sizeof (node)); 3525 (void) memset(node.bssid, 0xff, 6); 3526 node.id = IWK_BROADCAST_ID; 3527 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0); 3528 if (err != IWK_SUCCESS) { 3529 cmn_err(CE_WARN, "iwk_config(): " 3530 "failed to add broadcast node\n"); 3531 return (err); 3532 } 3533 3534 /* TX_LINK_QUALITY cmd ? */ 3535 (void) memset(&link_quality, 0, sizeof (link_quality)); 3536 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3537 masks |= RATE_MCS_CCK_MSK; 3538 masks |= RATE_MCS_ANT_B_MSK; 3539 masks &= ~RATE_MCS_ANT_A_MSK; 3540 link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks; 3541 } 3542 3543 link_quality.general_params.single_stream_ant_msk = 2; 3544 link_quality.general_params.dual_stream_ant_msk = 3; 3545 link_quality.agg_params.agg_dis_start_th = 3; 3546 link_quality.agg_params.agg_time_limit = LE_16(4000); 3547 link_quality.sta_id = IWK_BROADCAST_ID; 3548 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3549 sizeof (link_quality), 0); 3550 if (err != IWK_SUCCESS) { 3551 cmn_err(CE_WARN, "iwk_config(): " 3552 "failed to config link quality table\n"); 3553 return (err); 3554 } 3555 3556 return (IWK_SUCCESS); 3557 } 3558 3559 static void 3560 iwk_stop_master(iwk_sc_t *sc) 3561 { 3562 uint32_t tmp; 3563 int n; 3564 3565 tmp = IWK_READ(sc, CSR_RESET); 3566 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER); 3567 3568 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3569 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) == 3570 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) 3571 return; 3572 3573 for (n = 0; n < 2000; n++) { 3574 if (IWK_READ(sc, CSR_RESET) & 3575 CSR_RESET_REG_FLAG_MASTER_DISABLED) 3576 break; 3577 DELAY(1000); 3578 } 3579 if (n == 2000) 3580 IWK_DBG((IWK_DEBUG_HW, 3581 "timeout waiting for master stop\n")); 3582 } 3583 3584 static int 3585 iwk_power_up(iwk_sc_t *sc) 3586 { 3587 uint32_t tmp; 3588 3589 iwk_mac_access_enter(sc); 3590 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3591 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC; 3592 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN; 3593 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3594 iwk_mac_access_exit(sc); 3595 3596 DELAY(5000); 3597 return (IWK_SUCCESS); 3598 } 3599 3600 static int 3601 iwk_preinit(iwk_sc_t *sc) 3602 { 3603 uint32_t tmp; 3604 int n; 3605 uint8_t vlink; 3606 3607 /* clear any pending interrupts */ 3608 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3609 3610 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS); 3611 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS, 3612 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 3613 3614 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3615 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3616 3617 /* wait for clock ready */ 3618 for (n = 0; n < 1000; n++) { 3619 if (IWK_READ(sc, CSR_GP_CNTRL) & 3620 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) 3621 break; 3622 DELAY(10); 3623 } 3624 if (n == 1000) { 3625 cmn_err(CE_WARN, 3626 "iwk_preinit(): timeout waiting for clock ready\n"); 3627 return (ETIMEDOUT); 3628 } 3629 iwk_mac_access_enter(sc); 3630 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG); 3631 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp | 3632 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT); 3633 3634 DELAY(20); 3635 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT); 3636 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp | 3637 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE); 3638 iwk_mac_access_exit(sc); 3639 3640 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */ 3641 3642 (void) iwk_power_up(sc); 3643 3644 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) { 3645 tmp = ddi_get32(sc->sc_cfg_handle, 3646 (uint32_t *)(sc->sc_cfg_base + 0xe8)); 3647 ddi_put32(sc->sc_cfg_handle, 3648 (uint32_t *)(sc->sc_cfg_base + 0xe8), 3649 tmp & ~(1 << 11)); 3650 } 3651 3652 3653 vlink = ddi_get8(sc->sc_cfg_handle, 3654 (uint8_t *)(sc->sc_cfg_base + 0xf0)); 3655 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0), 3656 vlink & ~2); 3657 3658 tmp = IWK_READ(sc, CSR_SW_VER); 3659 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 3660 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 3661 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R; 3662 IWK_WRITE(sc, CSR_SW_VER, tmp); 3663 3664 /* make sure power supply on each part of the hardware */ 3665 iwk_mac_access_enter(sc); 3666 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3667 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3668 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3669 DELAY(5); 3670 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3671 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 3672 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3673 iwk_mac_access_exit(sc); 3674 return (IWK_SUCCESS); 3675 } 3676 3677 /* 3678 * set up semphore flag to own EEPROM 3679 */ 3680 static int iwk_eep_sem_down(iwk_sc_t *sc) 3681 { 3682 int count1, count2; 3683 uint32_t tmp; 3684 3685 for (count1 = 0; count1 < 1000; count1++) { 3686 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3687 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3688 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM); 3689 3690 for (count2 = 0; count2 < 2; count2++) { 3691 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) & 3692 CSR_HW_IF_CONFIG_REG_EEP_SEM) 3693 return (IWK_SUCCESS); 3694 DELAY(10000); 3695 } 3696 } 3697 return (IWK_FAIL); 3698 } 3699 3700 /* 3701 * reset semphore flag to release EEPROM 3702 */ 3703 static void iwk_eep_sem_up(iwk_sc_t *sc) 3704 { 3705 uint32_t tmp; 3706 3707 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 3708 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 3709 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM)); 3710 } 3711 3712 /* 3713 * This function load all infomation in eeprom into iwk_eep 3714 * structure in iwk_sc_t structure 3715 */ 3716 static int iwk_eep_load(iwk_sc_t *sc) 3717 { 3718 int i, rr; 3719 uint32_t rv, tmp, eep_gp; 3720 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map); 3721 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map; 3722 3723 /* read eeprom gp register in CSR */ 3724 eep_gp = IWK_READ(sc, CSR_EEPROM_GP); 3725 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) == 3726 CSR_EEPROM_GP_BAD_SIGNATURE) { 3727 cmn_err(CE_WARN, "EEPROM not found\n"); 3728 return (IWK_FAIL); 3729 } 3730 3731 rr = iwk_eep_sem_down(sc); 3732 if (rr != 0) { 3733 cmn_err(CE_WARN, "failed to own EEPROM\n"); 3734 return (IWK_FAIL); 3735 } 3736 3737 for (addr = 0; addr < eep_sz; addr += 2) { 3738 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1); 3739 tmp = IWK_READ(sc, CSR_EEPROM_REG); 3740 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2)); 3741 3742 for (i = 0; i < 10; i++) { 3743 rv = IWK_READ(sc, CSR_EEPROM_REG); 3744 if (rv & 1) 3745 break; 3746 DELAY(10); 3747 } 3748 3749 if (!(rv & 1)) { 3750 cmn_err(CE_WARN, "time out when read EEPROM\n"); 3751 iwk_eep_sem_up(sc); 3752 return (IWK_FAIL); 3753 } 3754 3755 eep_p[addr/2] = rv >> 16; 3756 } 3757 3758 iwk_eep_sem_up(sc); 3759 return (IWK_SUCCESS); 3760 } 3761 3762 /* 3763 * init mac address in ieee80211com_t struct 3764 */ 3765 static void iwk_get_mac_from_eep(iwk_sc_t *sc) 3766 { 3767 ieee80211com_t *ic = &sc->sc_ic; 3768 struct iwk_eep *ep = &sc->sc_eep_map; 3769 3770 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address); 3771 3772 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n", 3773 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2], 3774 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5])); 3775 } 3776 3777 static int 3778 iwk_init(iwk_sc_t *sc) 3779 { 3780 int qid, n, err; 3781 clock_t clk; 3782 uint32_t tmp; 3783 3784 mutex_enter(&sc->sc_glock); 3785 sc->sc_flags &= ~IWK_F_FW_INIT; 3786 3787 (void) iwk_preinit(sc); 3788 3789 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3790 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) { 3791 cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n"); 3792 goto fail1; 3793 } 3794 3795 /* init Rx ring */ 3796 iwk_mac_access_enter(sc); 3797 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 3798 3799 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 3800 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 3801 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8); 3802 3803 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG, 3804 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address + 3805 offsetof(struct iwk_shared, val0)) >> 4)); 3806 3807 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 3808 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 3809 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 3810 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 3811 (RX_QUEUE_SIZE_LOG << 3812 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 3813 iwk_mac_access_exit(sc); 3814 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 3815 (RX_QUEUE_SIZE - 1) & ~0x7); 3816 3817 /* init Tx rings */ 3818 iwk_mac_access_enter(sc); 3819 iwk_reg_write(sc, SCD_TXFACT, 0); 3820 3821 /* keep warm page */ 3822 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG, 3823 sc->sc_dma_kw.cookie.dmac_address >> 4); 3824 3825 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) { 3826 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid), 3827 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8); 3828 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid), 3829 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 3830 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 3831 } 3832 iwk_mac_access_exit(sc); 3833 3834 /* clear "radio off" and "disable command" bits */ 3835 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3836 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, 3837 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3838 3839 /* clear any pending interrupts */ 3840 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3841 3842 /* enable interrupts */ 3843 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 3844 3845 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3846 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 3847 3848 /* 3849 * backup ucode data part for future use. 3850 */ 3851 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va, 3852 sc->sc_dma_fw_data.mem_va, 3853 sc->sc_dma_fw_data.alength); 3854 3855 for (n = 0; n < 2; n++) { 3856 /* load firmware init segment into NIC */ 3857 err = iwk_load_firmware(sc); 3858 if (err != IWK_SUCCESS) { 3859 cmn_err(CE_WARN, "iwk_init(): " 3860 "failed to setup boot firmware\n"); 3861 continue; 3862 } 3863 3864 /* now press "execute" start running */ 3865 IWK_WRITE(sc, CSR_RESET, 0); 3866 break; 3867 } 3868 if (n == 2) { 3869 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n"); 3870 goto fail1; 3871 } 3872 /* ..and wait at most one second for adapter to initialize */ 3873 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3874 while (!(sc->sc_flags & IWK_F_FW_INIT)) { 3875 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0) 3876 break; 3877 } 3878 if (!(sc->sc_flags & IWK_F_FW_INIT)) { 3879 cmn_err(CE_WARN, 3880 "iwk_init(): timeout waiting for firmware init\n"); 3881 goto fail1; 3882 } 3883 3884 /* 3885 * at this point, the firmware is loaded OK, then config the hardware 3886 * with the ucode API, including rxon, txpower, etc. 3887 */ 3888 err = iwk_config(sc); 3889 if (err) { 3890 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n"); 3891 goto fail1; 3892 } 3893 3894 /* at this point, hardware may receive beacons :) */ 3895 mutex_exit(&sc->sc_glock); 3896 return (IWK_SUCCESS); 3897 3898 fail1: 3899 err = IWK_FAIL; 3900 mutex_exit(&sc->sc_glock); 3901 return (err); 3902 } 3903 3904 static void 3905 iwk_stop(iwk_sc_t *sc) 3906 { 3907 uint32_t tmp; 3908 int i; 3909 3910 if (!(sc->sc_flags & IWK_F_QUIESCED)) 3911 mutex_enter(&sc->sc_glock); 3912 3913 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3914 /* disable interrupts */ 3915 IWK_WRITE(sc, CSR_INT_MASK, 0); 3916 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK); 3917 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff); 3918 3919 /* reset all Tx rings */ 3920 for (i = 0; i < IWK_NUM_QUEUES; i++) 3921 iwk_reset_tx_ring(sc, &sc->sc_txq[i]); 3922 3923 /* reset Rx ring */ 3924 iwk_reset_rx_ring(sc); 3925 3926 iwk_mac_access_enter(sc); 3927 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT); 3928 iwk_mac_access_exit(sc); 3929 3930 DELAY(5); 3931 3932 iwk_stop_master(sc); 3933 3934 sc->sc_tx_timer = 0; 3935 tmp = IWK_READ(sc, CSR_RESET); 3936 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET); 3937 3938 if (!(sc->sc_flags & IWK_F_QUIESCED)) 3939 mutex_exit(&sc->sc_glock); 3940 } 3941 3942 /* 3943 * Naive implementation of the Adaptive Multi Rate Retry algorithm: 3944 * "IEEE 802.11 Rate Adaptation: A Practical Approach" 3945 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti 3946 * INRIA Sophia - Projet Planete 3947 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html 3948 */ 3949 #define is_success(amrr) \ 3950 ((amrr)->retrycnt < (amrr)->txcnt / 10) 3951 #define is_failure(amrr) \ 3952 ((amrr)->retrycnt > (amrr)->txcnt / 3) 3953 #define is_enough(amrr) \ 3954 ((amrr)->txcnt > 100) 3955 #define is_min_rate(in) \ 3956 ((in)->in_txrate == 0) 3957 #define is_max_rate(in) \ 3958 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1) 3959 #define increase_rate(in) \ 3960 ((in)->in_txrate++) 3961 #define decrease_rate(in) \ 3962 ((in)->in_txrate--) 3963 #define reset_cnt(amrr) \ 3964 { (amrr)->txcnt = (amrr)->retrycnt = 0; } 3965 3966 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1 3967 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15 3968 3969 static void 3970 iwk_amrr_init(iwk_amrr_t *amrr) 3971 { 3972 amrr->success = 0; 3973 amrr->recovery = 0; 3974 amrr->txcnt = amrr->retrycnt = 0; 3975 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD; 3976 } 3977 3978 static void 3979 iwk_amrr_timeout(iwk_sc_t *sc) 3980 { 3981 ieee80211com_t *ic = &sc->sc_ic; 3982 3983 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n")); 3984 if (ic->ic_opmode == IEEE80211_M_STA) 3985 iwk_amrr_ratectl(NULL, ic->ic_bss); 3986 else 3987 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL); 3988 sc->sc_clk = ddi_get_lbolt(); 3989 } 3990 3991 /* ARGSUSED */ 3992 static void 3993 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in) 3994 { 3995 iwk_amrr_t *amrr = (iwk_amrr_t *)in; 3996 int need_change = 0; 3997 3998 if (is_success(amrr) && is_enough(amrr)) { 3999 amrr->success++; 4000 if (amrr->success >= amrr->success_threshold && 4001 !is_max_rate(in)) { 4002 amrr->recovery = 1; 4003 amrr->success = 0; 4004 increase_rate(in); 4005 IWK_DBG((IWK_DEBUG_RATECTL, 4006 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n", 4007 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4008 need_change = 1; 4009 } else { 4010 amrr->recovery = 0; 4011 } 4012 } else if (is_failure(amrr)) { 4013 amrr->success = 0; 4014 if (!is_min_rate(in)) { 4015 if (amrr->recovery) { 4016 amrr->success_threshold++; 4017 if (amrr->success_threshold > 4018 IWK_AMRR_MAX_SUCCESS_THRESHOLD) 4019 amrr->success_threshold = 4020 IWK_AMRR_MAX_SUCCESS_THRESHOLD; 4021 } else { 4022 amrr->success_threshold = 4023 IWK_AMRR_MIN_SUCCESS_THRESHOLD; 4024 } 4025 decrease_rate(in); 4026 IWK_DBG((IWK_DEBUG_RATECTL, 4027 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n", 4028 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4029 need_change = 1; 4030 } 4031 amrr->recovery = 0; /* paper is incorrect */ 4032 } 4033 4034 if (is_enough(amrr) || need_change) 4035 reset_cnt(amrr); 4036 } 4037 4038 /* 4039 * calculate 4965 chipset's kelvin temperature according to 4040 * the data of init alive and satistics notification. 4041 * The details is described in iwk_calibration.h file 4042 */ 4043 static int32_t iwk_curr_tempera(iwk_sc_t *sc) 4044 { 4045 int32_t tempera; 4046 int32_t r1, r2, r3; 4047 uint32_t r4_u; 4048 int32_t r4_s; 4049 4050 if (iwk_is_fat_channel(sc)) { 4051 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]); 4052 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]); 4053 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]); 4054 r4_u = sc->sc_card_alive_init.therm_r4[1]; 4055 } else { 4056 r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]); 4057 r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]); 4058 r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]); 4059 r4_u = sc->sc_card_alive_init.therm_r4[0]; 4060 } 4061 4062 if (sc->sc_flags & IWK_F_STATISTICS) { 4063 r4_s = (int32_t)(sc->sc_statistics.general.temperature << 4064 (31-23)) >> (31-23); 4065 } else { 4066 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23); 4067 } 4068 4069 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n", 4070 r1, r2, r3, r4_s)); 4071 4072 if (r3 == r1) { 4073 cmn_err(CE_WARN, "iwk_curr_tempera(): " 4074 "failed to calculate temperature" 4075 "because r3 = r1\n"); 4076 return (DDI_FAILURE); 4077 } 4078 4079 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2); 4080 tempera /= (r3 - r1); 4081 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; 4082 4083 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n", 4084 tempera, KELVIN_TO_CELSIUS(tempera))); 4085 4086 return (tempera); 4087 } 4088 4089 /* Determine whether 4965 is using 2.4 GHz band */ 4090 static inline int iwk_is_24G_band(iwk_sc_t *sc) 4091 { 4092 return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK); 4093 } 4094 4095 /* Determine whether 4965 is using fat channel */ 4096 static inline int iwk_is_fat_channel(iwk_sc_t *sc) 4097 { 4098 return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 4099 (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK)); 4100 } 4101 4102 /* 4103 * In MIMO mode, determine which group 4965's current channel belong to. 4104 * For more infomation about "channel group", 4105 * please refer to iwk_calibration.h file 4106 */ 4107 static int iwk_txpower_grp(uint16_t channel) 4108 { 4109 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH && 4110 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) { 4111 return (CALIB_CH_GROUP_5); 4112 } 4113 4114 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH && 4115 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) { 4116 return (CALIB_CH_GROUP_1); 4117 } 4118 4119 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH && 4120 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) { 4121 return (CALIB_CH_GROUP_2); 4122 } 4123 4124 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH && 4125 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) { 4126 return (CALIB_CH_GROUP_3); 4127 } 4128 4129 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH && 4130 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) { 4131 return (CALIB_CH_GROUP_4); 4132 } 4133 4134 cmn_err(CE_WARN, "iwk_txpower_grp(): " 4135 "can't find txpower group for channel %d.\n", channel); 4136 4137 return (DDI_FAILURE); 4138 } 4139 4140 /* 2.4 GHz */ 4141 static uint16_t iwk_eep_band_1[14] = { 4142 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 4143 }; 4144 4145 /* 5.2 GHz bands */ 4146 static uint16_t iwk_eep_band_2[13] = { 4147 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 4148 }; 4149 4150 static uint16_t iwk_eep_band_3[12] = { 4151 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 4152 }; 4153 4154 static uint16_t iwk_eep_band_4[11] = { 4155 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 4156 }; 4157 4158 static uint16_t iwk_eep_band_5[6] = { 4159 145, 149, 153, 157, 161, 165 4160 }; 4161 4162 static uint16_t iwk_eep_band_6[7] = { 4163 1, 2, 3, 4, 5, 6, 7 4164 }; 4165 4166 static uint16_t iwk_eep_band_7[11] = { 4167 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 4168 }; 4169 4170 /* Get regulatory data from eeprom for a given channel */ 4171 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 4172 uint16_t channel, 4173 int is_24G, int is_fat, int is_hi_chan) 4174 { 4175 int32_t i; 4176 uint16_t chan; 4177 4178 if (is_fat) { /* 11n mode */ 4179 4180 if (is_hi_chan) { 4181 chan = channel - 4; 4182 } else { 4183 chan = channel; 4184 } 4185 4186 for (i = 0; i < 7; i++) { 4187 if (iwk_eep_band_6[i] == chan) { 4188 return (&sc->sc_eep_map.band_24_channels[i]); 4189 } 4190 } 4191 for (i = 0; i < 11; i++) { 4192 if (iwk_eep_band_7[i] == chan) { 4193 return (&sc->sc_eep_map.band_52_channels[i]); 4194 } 4195 } 4196 } else if (is_24G) { /* 2.4 GHz band */ 4197 for (i = 0; i < 14; i++) { 4198 if (iwk_eep_band_1[i] == channel) { 4199 return (&sc->sc_eep_map.band_1_channels[i]); 4200 } 4201 } 4202 } else { /* 5 GHz band */ 4203 for (i = 0; i < 13; i++) { 4204 if (iwk_eep_band_2[i] == channel) { 4205 return (&sc->sc_eep_map.band_2_channels[i]); 4206 } 4207 } 4208 for (i = 0; i < 12; i++) { 4209 if (iwk_eep_band_3[i] == channel) { 4210 return (&sc->sc_eep_map.band_3_channels[i]); 4211 } 4212 } 4213 for (i = 0; i < 11; i++) { 4214 if (iwk_eep_band_4[i] == channel) { 4215 return (&sc->sc_eep_map.band_4_channels[i]); 4216 } 4217 } 4218 for (i = 0; i < 6; i++) { 4219 if (iwk_eep_band_5[i] == channel) { 4220 return (&sc->sc_eep_map.band_5_channels[i]); 4221 } 4222 } 4223 } 4224 4225 return (NULL); 4226 } 4227 4228 /* 4229 * Determine which subband a given channel belongs 4230 * to in 2.4 GHz or 5 GHz band 4231 */ 4232 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel) 4233 { 4234 int32_t b_n = -1; 4235 4236 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) { 4237 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) { 4238 continue; 4239 } 4240 4241 if ((channel >= 4242 (uint16_t)sc->sc_eep_map.calib_info. 4243 band_info_tbl[b_n].ch_from) && 4244 (channel <= 4245 (uint16_t)sc->sc_eep_map.calib_info. 4246 band_info_tbl[b_n].ch_to)) { 4247 break; 4248 } 4249 } 4250 4251 return (b_n); 4252 } 4253 4254 /* Make a special division for interpolation operation */ 4255 static int iwk_division(int32_t num, int32_t denom, int32_t *res) 4256 { 4257 int32_t sign = 1; 4258 4259 if (num < 0) { 4260 sign = -sign; 4261 num = -num; 4262 } 4263 4264 if (denom < 0) { 4265 sign = -sign; 4266 denom = -denom; 4267 } 4268 4269 *res = ((num*2 + denom) / (denom*2)) * sign; 4270 4271 return (IWK_SUCCESS); 4272 } 4273 4274 /* Make interpolation operation */ 4275 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 4276 int32_t x2, int32_t y2) 4277 { 4278 int32_t val; 4279 4280 if (x2 == x1) { 4281 return (y1); 4282 } else { 4283 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val); 4284 return (val + y2); 4285 } 4286 } 4287 4288 /* Get interpolation measurement data of a given channel for all chains. */ 4289 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 4290 struct iwk_eep_calib_channel_info *chan_info) 4291 { 4292 int32_t ban_n; 4293 uint32_t ch1_n, ch2_n; 4294 int32_t c, m; 4295 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p; 4296 4297 /* determine subband number */ 4298 ban_n = iwk_band_number(sc, channel); 4299 if (ban_n >= EEP_TX_POWER_BANDS) { 4300 return (DDI_FAILURE); 4301 } 4302 4303 ch1_n = 4304 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num; 4305 ch2_n = 4306 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num; 4307 4308 chan_info->ch_num = (uint8_t)channel; /* given channel number */ 4309 4310 /* 4311 * go through all chains on chipset 4312 */ 4313 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) { 4314 /* 4315 * go through all factory measurements 4316 */ 4317 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) { 4318 m1_p = 4319 &(sc->sc_eep_map.calib_info. 4320 band_info_tbl[ban_n].ch1.measure[c][m]); 4321 m2_p = 4322 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n]. 4323 ch2.measure[c][m]); 4324 m_p = &(chan_info->measure[c][m]); 4325 4326 /* 4327 * make interpolation to get actual 4328 * Tx power for given channel 4329 */ 4330 m_p->actual_pow = iwk_interpolate_value(channel, 4331 ch1_n, m1_p->actual_pow, 4332 ch2_n, m2_p->actual_pow); 4333 4334 /* make interpolation to get index into gain table */ 4335 m_p->gain_idx = iwk_interpolate_value(channel, 4336 ch1_n, m1_p->gain_idx, 4337 ch2_n, m2_p->gain_idx); 4338 4339 /* make interpolation to get chipset temperature */ 4340 m_p->temperature = iwk_interpolate_value(channel, 4341 ch1_n, m1_p->temperature, 4342 ch2_n, m2_p->temperature); 4343 4344 /* 4345 * make interpolation to get power 4346 * amp detector level 4347 */ 4348 m_p->pa_det = iwk_interpolate_value(channel, ch1_n, 4349 m1_p->pa_det, 4350 ch2_n, m2_p->pa_det); 4351 } 4352 } 4353 4354 return (IWK_SUCCESS); 4355 } 4356 4357 /* 4358 * Calculate voltage compensation for Tx power. For more infomation, 4359 * please refer to iwk_calibration.h file 4360 */ 4361 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 4362 int32_t curr_voltage) 4363 { 4364 int32_t vol_comp = 0; 4365 4366 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) || 4367 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) { 4368 return (vol_comp); 4369 } 4370 4371 (void) iwk_division(curr_voltage-eep_voltage, 4372 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp); 4373 4374 if (curr_voltage > eep_voltage) { 4375 vol_comp *= 2; 4376 } 4377 if ((vol_comp < -2) || (vol_comp > 2)) { 4378 vol_comp = 0; 4379 } 4380 4381 return (vol_comp); 4382 } 4383 4384 /* 4385 * Thermal compensation values for txpower for various frequency ranges ... 4386 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust 4387 */ 4388 static struct iwk_txpower_tempera_comp { 4389 int32_t degrees_per_05db_a; 4390 int32_t degrees_per_05db_a_denom; 4391 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = { 4392 {9, 2}, /* group 0 5.2, ch 34-43 */ 4393 {4, 1}, /* group 1 5.2, ch 44-70 */ 4394 {4, 1}, /* group 2 5.2, ch 71-124 */ 4395 {4, 1}, /* group 3 5.2, ch 125-200 */ 4396 {3, 1} /* group 4 2.4, ch all */ 4397 }; 4398 4399 /* 4400 * bit-rate-dependent table to prevent Tx distortion, in half-dB units, 4401 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. 4402 */ 4403 static int32_t back_off_table[] = { 4404 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ 4405 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ 4406 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ 4407 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ 4408 10 /* CCK */ 4409 }; 4410 4411 /* determine minimum Tx power index in gain table */ 4412 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G) 4413 { 4414 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) { 4415 return (MIN_TX_GAIN_INDEX_52GHZ_EXT); 4416 } 4417 4418 return (MIN_TX_GAIN_INDEX); 4419 } 4420 4421 /* 4422 * Determine DSP and radio gain according to temperature and other factors. 4423 * This function is the majority of Tx power calibration 4424 */ 4425 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 4426 struct iwk_tx_power_db *tp_db) 4427 { 4428 int is_24G, is_fat, is_high_chan, is_mimo; 4429 int c, r; 4430 int32_t target_power; 4431 int32_t tx_grp = CALIB_CH_GROUP_MAX; 4432 uint16_t channel; 4433 uint8_t saturation_power; 4434 int32_t regu_power; 4435 int32_t curr_regu_power; 4436 struct iwk_eep_channel *eep_chan_p; 4437 struct iwk_eep_calib_channel_info eep_chan_calib; 4438 int32_t eep_voltage, init_voltage; 4439 int32_t voltage_compensation; 4440 int32_t temperature; 4441 int32_t degrees_per_05db_num; 4442 int32_t degrees_per_05db_denom; 4443 struct iwk_eep_calib_measure *measure_p; 4444 int32_t interpo_temp; 4445 int32_t power_limit; 4446 int32_t atten_value; 4447 int32_t tempera_comp[2]; 4448 int32_t interpo_gain_idx[2]; 4449 int32_t interpo_actual_pow[2]; 4450 union iwk_tx_power_dual_stream txpower_gains; 4451 int32_t txpower_gains_idx; 4452 4453 channel = sc->sc_config.chan; 4454 4455 /* 2.4 GHz or 5 GHz band */ 4456 is_24G = iwk_is_24G_band(sc); 4457 4458 /* fat channel or not */ 4459 is_fat = iwk_is_fat_channel(sc); 4460 4461 /* 4462 * using low half channel number or high half channel number 4463 * identify fat channel 4464 */ 4465 if (is_fat && (sc->sc_config.flags & 4466 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) { 4467 is_high_chan = 1; 4468 } 4469 4470 if ((channel > 0) && (channel < 200)) { 4471 /* get regulatory channel data from eeprom */ 4472 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G, 4473 is_fat, is_high_chan); 4474 if (NULL == eep_chan_p) { 4475 cmn_err(CE_WARN, 4476 "iwk_txpower_table_cmd_init(): " 4477 "can't get channel infomation\n"); 4478 return (DDI_FAILURE); 4479 } 4480 } else { 4481 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4482 "channel(%d) isn't in proper range\n", 4483 channel); 4484 return (DDI_FAILURE); 4485 } 4486 4487 /* initial value of Tx power */ 4488 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg; 4489 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) { 4490 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4491 "user TX power is too weak\n"); 4492 return (DDI_FAILURE); 4493 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) { 4494 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4495 "user TX power is too strong\n"); 4496 return (DDI_FAILURE); 4497 } 4498 4499 target_power = 2 * sc->sc_user_txpower; 4500 4501 /* determine which group current channel belongs to */ 4502 tx_grp = iwk_txpower_grp(channel); 4503 if (tx_grp < 0) { 4504 return (tx_grp); 4505 } 4506 4507 4508 if (is_fat) { 4509 if (is_high_chan) { 4510 channel -= 2; 4511 } else { 4512 channel += 2; 4513 } 4514 } 4515 4516 /* determine saturation power */ 4517 if (is_24G) { 4518 saturation_power = 4519 sc->sc_eep_map.calib_info.saturation_power24; 4520 } else { 4521 saturation_power = 4522 sc->sc_eep_map.calib_info.saturation_power52; 4523 } 4524 4525 if (saturation_power < IWK_TX_POWER_SATURATION_MIN || 4526 saturation_power > IWK_TX_POWER_SATURATION_MAX) { 4527 if (is_24G) { 4528 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24; 4529 } else { 4530 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52; 4531 } 4532 } 4533 4534 /* determine regulatory power */ 4535 regu_power = (int32_t)eep_chan_p->max_power_avg * 2; 4536 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) || 4537 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) { 4538 if (is_24G) { 4539 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24; 4540 } else { 4541 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52; 4542 } 4543 } 4544 4545 /* 4546 * get measurement data for current channel 4547 * suach as temperature,index to gain table,actual Tx power 4548 */ 4549 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib); 4550 4551 eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage; 4552 init_voltage = (int32_t)sc->sc_card_alive_init.voltage; 4553 4554 /* calculate voltage compensation to Tx power */ 4555 voltage_compensation = 4556 iwk_voltage_compensation(eep_voltage, init_voltage); 4557 4558 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) { 4559 temperature = sc->sc_tempera; 4560 } else { 4561 temperature = IWK_TX_POWER_TEMPERATURE_MIN; 4562 } 4563 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) { 4564 temperature = sc->sc_tempera; 4565 } else { 4566 temperature = IWK_TX_POWER_TEMPERATURE_MAX; 4567 } 4568 temperature = KELVIN_TO_CELSIUS(temperature); 4569 4570 degrees_per_05db_num = 4571 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a; 4572 degrees_per_05db_denom = 4573 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom; 4574 4575 for (c = 0; c < 2; c++) { /* go through all chains */ 4576 measure_p = &eep_chan_calib.measure[c][1]; 4577 interpo_temp = measure_p->temperature; 4578 4579 /* determine temperature compensation to Tx power */ 4580 (void) iwk_division( 4581 (temperature-interpo_temp)*degrees_per_05db_denom, 4582 degrees_per_05db_num, &tempera_comp[c]); 4583 4584 interpo_gain_idx[c] = measure_p->gain_idx; 4585 interpo_actual_pow[c] = measure_p->actual_pow; 4586 } 4587 4588 /* 4589 * go through all rate entries in Tx power table 4590 */ 4591 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) { 4592 if (r & 0x8) { 4593 /* need to lower regulatory power for MIMO mode */ 4594 curr_regu_power = regu_power - 4595 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION; 4596 is_mimo = 1; 4597 } else { 4598 curr_regu_power = regu_power; 4599 is_mimo = 0; 4600 } 4601 4602 power_limit = saturation_power - back_off_table[r]; 4603 if (power_limit > curr_regu_power) { 4604 /* final Tx power limit */ 4605 power_limit = curr_regu_power; 4606 } 4607 4608 if (target_power > power_limit) { 4609 target_power = power_limit; /* final target Tx power */ 4610 } 4611 4612 for (c = 0; c < 2; c++) { /* go through all Tx chains */ 4613 if (is_mimo) { 4614 atten_value = 4615 sc->sc_card_alive_init.tx_atten[tx_grp][c]; 4616 } else { 4617 atten_value = 0; 4618 } 4619 4620 /* 4621 * calculate index in gain table 4622 * this step is very important 4623 */ 4624 txpower_gains_idx = interpo_gain_idx[c] - 4625 (target_power - interpo_actual_pow[c]) - 4626 tempera_comp[c] - voltage_compensation + 4627 atten_value; 4628 4629 if (txpower_gains_idx < 4630 iwk_min_power_index(r, is_24G)) { 4631 txpower_gains_idx = 4632 iwk_min_power_index(r, is_24G); 4633 } 4634 4635 if (!is_24G) { 4636 /* 4637 * support negative index for 5 GHz 4638 * band 4639 */ 4640 txpower_gains_idx += 9; 4641 } 4642 4643 if (POWER_TABLE_CCK_ENTRY == r) { 4644 /* for CCK mode, make necessary attenuaton */ 4645 txpower_gains_idx += 4646 IWK_TX_POWER_CCK_COMPENSATION_C_STEP; 4647 } 4648 4649 if (txpower_gains_idx > 107) { 4650 txpower_gains_idx = 107; 4651 } else if (txpower_gains_idx < 0) { 4652 txpower_gains_idx = 0; 4653 } 4654 4655 /* search DSP and radio gains in gain table */ 4656 txpower_gains.s.radio_tx_gain[c] = 4657 gains_table[is_24G][txpower_gains_idx].radio; 4658 txpower_gains.s.dsp_predis_atten[c] = 4659 gains_table[is_24G][txpower_gains_idx].dsp; 4660 4661 IWK_DBG((IWK_DEBUG_CALIBRATION, 4662 "rate_index: %d, " 4663 "gain_index %d, c: %d,is_mimo: %d\n", 4664 r, txpower_gains_idx, c, is_mimo)); 4665 } 4666 4667 /* initialize Tx power table */ 4668 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) { 4669 tp_db->ht_ofdm_power[r].dw = txpower_gains.dw; 4670 } else { 4671 tp_db->legacy_cck_power.dw = txpower_gains.dw; 4672 } 4673 } 4674 4675 return (IWK_SUCCESS); 4676 } 4677 4678 /* 4679 * make Tx power calibration to adjust Tx power. 4680 * This is completed by sending out Tx power table command. 4681 */ 4682 static int iwk_tx_power_calibration(iwk_sc_t *sc) 4683 { 4684 iwk_tx_power_table_cmd_t cmd; 4685 int rv; 4686 4687 if (sc->sc_flags & IWK_F_SCANNING) { 4688 return (IWK_SUCCESS); 4689 } 4690 4691 /* necessary initialization to Tx power table command */ 4692 cmd.band = (uint8_t)iwk_is_24G_band(sc); 4693 cmd.channel = sc->sc_config.chan; 4694 cmd.channel_normal_width = 0; 4695 4696 /* initialize Tx power table */ 4697 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power); 4698 if (rv) { 4699 cmn_err(CE_NOTE, "rv= %d\n", rv); 4700 return (rv); 4701 } 4702 4703 /* send out Tx power table command */ 4704 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1); 4705 if (rv) { 4706 return (rv); 4707 } 4708 4709 /* record current temperature */ 4710 sc->sc_last_tempera = sc->sc_tempera; 4711 4712 return (IWK_SUCCESS); 4713 } 4714 4715 /* This function is the handler of statistics notification from uCode */ 4716 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc) 4717 { 4718 int is_diff; 4719 struct iwk_notif_statistics *statistics_p = 4720 (struct iwk_notif_statistics *)(desc + 1); 4721 4722 mutex_enter(&sc->sc_glock); 4723 4724 is_diff = (sc->sc_statistics.general.temperature != 4725 statistics_p->general.temperature) || 4726 ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) != 4727 (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)); 4728 4729 /* update statistics data */ 4730 (void) memcpy(&sc->sc_statistics, statistics_p, 4731 sizeof (struct iwk_notif_statistics)); 4732 4733 sc->sc_flags |= IWK_F_STATISTICS; 4734 4735 if (!(sc->sc_flags & IWK_F_SCANNING)) { 4736 /* make Receiver gain balance calibration */ 4737 (void) iwk_rxgain_diff(sc); 4738 4739 /* make Receiver sensitivity calibration */ 4740 (void) iwk_rx_sens(sc); 4741 } 4742 4743 4744 if (!is_diff) { 4745 mutex_exit(&sc->sc_glock); 4746 return; 4747 } 4748 4749 /* calibration current temperature of 4965 chipset */ 4750 sc->sc_tempera = iwk_curr_tempera(sc); 4751 4752 /* distinct temperature change will trigger Tx power calibration */ 4753 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) || 4754 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) { 4755 /* make Tx power calibration */ 4756 (void) iwk_tx_power_calibration(sc); 4757 } 4758 4759 mutex_exit(&sc->sc_glock); 4760 } 4761 4762 /* Determine this station is in associated state or not */ 4763 static int iwk_is_associated(iwk_sc_t *sc) 4764 { 4765 return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK); 4766 } 4767 4768 /* Make necessary preparation for Receiver gain balance calibration */ 4769 static int iwk_rxgain_diff_init(iwk_sc_t *sc) 4770 { 4771 int i, rv; 4772 struct iwk_calibration_cmd cmd; 4773 struct iwk_rx_gain_diff *gain_diff_p; 4774 4775 gain_diff_p = &sc->sc_rxgain_diff; 4776 4777 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff)); 4778 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd)); 4779 4780 for (i = 0; i < RX_CHAINS_NUM; i++) { 4781 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL; 4782 } 4783 4784 if (iwk_is_associated(sc)) { 4785 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4786 cmd.diff_gain_a = 0; 4787 cmd.diff_gain_b = 0; 4788 cmd.diff_gain_c = 0; 4789 4790 /* assume the gains of every Rx chains is balanceable */ 4791 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd, 4792 sizeof (cmd), 1); 4793 if (rv) { 4794 return (rv); 4795 } 4796 4797 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE; 4798 } 4799 4800 return (IWK_SUCCESS); 4801 } 4802 4803 /* 4804 * make Receiver gain balance to balance Rx gain between Rx chains 4805 * and determine which chain is disconnected 4806 */ 4807 static int iwk_rxgain_diff(iwk_sc_t *sc) 4808 { 4809 int i, is_24G, rv; 4810 int max_beacon_chain_n; 4811 int min_noise_chain_n; 4812 uint16_t channel_n; 4813 int32_t beacon_diff; 4814 int32_t noise_diff; 4815 uint32_t noise_chain_a, noise_chain_b, noise_chain_c; 4816 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c; 4817 struct iwk_calibration_cmd cmd; 4818 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4819 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 4820 struct statistics_rx_non_phy *rx_general_p = 4821 &sc->sc_statistics.rx.general; 4822 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff; 4823 4824 if (INTERFERENCE_DATA_AVAILABLE != 4825 rx_general_p->interference_data_flag) { 4826 return (IWK_SUCCESS); 4827 } 4828 4829 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) { 4830 return (IWK_SUCCESS); 4831 } 4832 4833 is_24G = iwk_is_24G_band(sc); 4834 channel_n = sc->sc_config.chan; /* channel number */ 4835 4836 if ((channel_n != (sc->sc_statistics.flag >> 16)) || 4837 ((STATISTICS_REPLY_FLG_BAND_24G_MSK == 4838 (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && 4839 !is_24G)) { 4840 return (IWK_SUCCESS); 4841 } 4842 4843 /* Rx chain's noise strength from statistics notification */ 4844 noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF; 4845 noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF; 4846 noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF; 4847 4848 /* Rx chain's beacon strength from statistics notification */ 4849 beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF; 4850 beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF; 4851 beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF; 4852 4853 gain_diff_p->beacon_count++; 4854 4855 /* accumulate chain's noise strength */ 4856 gain_diff_p->noise_stren_a += noise_chain_a; 4857 gain_diff_p->noise_stren_b += noise_chain_b; 4858 gain_diff_p->noise_stren_c += noise_chain_c; 4859 4860 /* accumulate chain's beacon strength */ 4861 gain_diff_p->beacon_stren_a += beacon_chain_a; 4862 gain_diff_p->beacon_stren_b += beacon_chain_b; 4863 gain_diff_p->beacon_stren_c += beacon_chain_c; 4864 4865 if (BEACON_NUM_20 == gain_diff_p->beacon_count) { 4866 /* calculate average beacon strength */ 4867 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20; 4868 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20; 4869 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20; 4870 4871 /* calculate average noise strength */ 4872 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20; 4873 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4874 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 4875 4876 /* determine maximum beacon strength among 3 chains */ 4877 if ((beacon_aver[0] >= beacon_aver[1]) && 4878 (beacon_aver[0] >= beacon_aver[2])) { 4879 max_beacon_chain_n = 0; 4880 gain_diff_p->connected_chains = 1 << 0; 4881 } else if (beacon_aver[1] >= beacon_aver[2]) { 4882 max_beacon_chain_n = 1; 4883 gain_diff_p->connected_chains = 1 << 1; 4884 } else { 4885 max_beacon_chain_n = 2; 4886 gain_diff_p->connected_chains = 1 << 2; 4887 } 4888 4889 /* determine which chain is disconnected */ 4890 for (i = 0; i < RX_CHAINS_NUM; i++) { 4891 if (i != max_beacon_chain_n) { 4892 beacon_diff = beacon_aver[max_beacon_chain_n] - 4893 beacon_aver[i]; 4894 if (beacon_diff > MAX_ALLOWED_DIFF) { 4895 gain_diff_p->disconnect_chain[i] = 1; 4896 } else { 4897 gain_diff_p->connected_chains |= 4898 (1 << i); 4899 } 4900 } 4901 } 4902 4903 /* 4904 * if chain A and B are both disconnected, 4905 * assume the stronger in beacon strength is connected 4906 */ 4907 if (gain_diff_p->disconnect_chain[0] && 4908 gain_diff_p->disconnect_chain[1]) { 4909 if (beacon_aver[0] >= beacon_aver[1]) { 4910 gain_diff_p->disconnect_chain[0] = 0; 4911 gain_diff_p->connected_chains |= (1 << 0); 4912 } else { 4913 gain_diff_p->disconnect_chain[1] = 0; 4914 gain_diff_p->connected_chains |= (1 << 1); 4915 } 4916 } 4917 4918 /* determine minimum noise strength among 3 chains */ 4919 if (!gain_diff_p->disconnect_chain[0]) { 4920 min_noise_chain_n = 0; 4921 4922 for (i = 0; i < RX_CHAINS_NUM; i++) { 4923 if (!gain_diff_p->disconnect_chain[i] && 4924 (noise_aver[i] <= 4925 noise_aver[min_noise_chain_n])) { 4926 min_noise_chain_n = i; 4927 } 4928 4929 } 4930 } else { 4931 min_noise_chain_n = 1; 4932 4933 for (i = 0; i < RX_CHAINS_NUM; i++) { 4934 if (!gain_diff_p->disconnect_chain[i] && 4935 (noise_aver[i] <= 4936 noise_aver[min_noise_chain_n])) { 4937 min_noise_chain_n = i; 4938 } 4939 } 4940 } 4941 4942 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0; 4943 4944 /* determine gain difference between chains */ 4945 for (i = 0; i < RX_CHAINS_NUM; i++) { 4946 if (!gain_diff_p->disconnect_chain[i] && 4947 (CHAIN_GAIN_DIFF_INIT_VAL == 4948 gain_diff_p->gain_diff_chain[i])) { 4949 4950 noise_diff = noise_aver[i] - 4951 noise_aver[min_noise_chain_n]; 4952 gain_diff_p->gain_diff_chain[i] = 4953 (uint8_t)((noise_diff * 10) / 15); 4954 4955 if (gain_diff_p->gain_diff_chain[i] > 3) { 4956 gain_diff_p->gain_diff_chain[i] = 3; 4957 } 4958 4959 gain_diff_p->gain_diff_chain[i] |= (1 << 2); 4960 } else { 4961 gain_diff_p->gain_diff_chain[i] = 0; 4962 } 4963 } 4964 4965 if (!gain_diff_p->gain_diff_send) { 4966 gain_diff_p->gain_diff_send = 1; 4967 4968 (void) memset(&cmd, 0, sizeof (cmd)); 4969 4970 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 4971 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0]; 4972 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1]; 4973 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2]; 4974 4975 /* 4976 * send out PHY calibration command to 4977 * adjust every chain's Rx gain 4978 */ 4979 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 4980 &cmd, sizeof (cmd), 1); 4981 if (rv) { 4982 return (rv); 4983 } 4984 4985 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED; 4986 } 4987 4988 gain_diff_p->beacon_stren_a = 0; 4989 gain_diff_p->beacon_stren_b = 0; 4990 gain_diff_p->beacon_stren_c = 0; 4991 4992 gain_diff_p->noise_stren_a = 0; 4993 gain_diff_p->noise_stren_b = 0; 4994 gain_diff_p->noise_stren_c = 0; 4995 } 4996 4997 return (IWK_SUCCESS); 4998 } 4999 5000 /* Make necessary preparation for Receiver sensitivity calibration */ 5001 static int iwk_rx_sens_init(iwk_sc_t *sc) 5002 { 5003 int i, rv; 5004 struct iwk_rx_sensitivity_cmd cmd; 5005 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5006 5007 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd)); 5008 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity)); 5009 5010 rx_sens_p->auto_corr_ofdm_x4 = 90; 5011 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170; 5012 rx_sens_p->auto_corr_ofdm_x1 = 105; 5013 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220; 5014 5015 rx_sens_p->auto_corr_cck_x4 = 125; 5016 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5017 rx_sens_p->min_energy_det_cck = 100; 5018 5019 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK); 5020 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5021 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5022 5023 rx_sens_p->last_bad_plcp_cnt_ofdm = 0; 5024 rx_sens_p->last_false_alarm_cnt_ofdm = 0; 5025 rx_sens_p->last_bad_plcp_cnt_cck = 0; 5026 rx_sens_p->last_false_alarm_cnt_cck = 0; 5027 5028 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5029 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM; 5030 rx_sens_p->cck_no_false_alarm_num = 0; 5031 rx_sens_p->cck_beacon_idx = 0; 5032 5033 for (i = 0; i < 10; i++) { 5034 rx_sens_p->cck_beacon_min[i] = 0; 5035 } 5036 5037 rx_sens_p->cck_noise_idx = 0; 5038 rx_sens_p->cck_noise_ref = 0; 5039 5040 for (i = 0; i < 20; i++) { 5041 rx_sens_p->cck_noise_max[i] = 0; 5042 } 5043 5044 rx_sens_p->cck_noise_diff = 0; 5045 rx_sens_p->cck_no_false_alarm_num = 0; 5046 5047 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5048 5049 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5050 rx_sens_p->auto_corr_ofdm_x4; 5051 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5052 rx_sens_p->auto_corr_mrc_ofdm_x4; 5053 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5054 rx_sens_p->auto_corr_ofdm_x1; 5055 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5056 rx_sens_p->auto_corr_mrc_ofdm_x1; 5057 5058 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5059 rx_sens_p->auto_corr_cck_x4; 5060 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5061 rx_sens_p->auto_corr_mrc_cck_x4; 5062 cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck; 5063 5064 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5065 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5066 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5067 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5068 5069 /* at first, set up Rx to maximum sensitivity */ 5070 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5071 if (rv) { 5072 cmn_err(CE_WARN, "iwk_rx_sens_init(): " 5073 "in the process of initialization, " 5074 "failed to send rx sensitivity command\n"); 5075 return (rv); 5076 } 5077 5078 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK; 5079 5080 return (IWK_SUCCESS); 5081 } 5082 5083 /* 5084 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity. 5085 * for more infomation, please refer to iwk_calibration.h file 5086 */ 5087 static int iwk_rx_sens(iwk_sc_t *sc) 5088 { 5089 int rv; 5090 uint32_t actual_rx_time; 5091 struct statistics_rx_non_phy *rx_general_p = 5092 &sc->sc_statistics.rx.general; 5093 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5094 struct iwk_rx_sensitivity_cmd cmd; 5095 5096 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) { 5097 cmn_err(CE_WARN, "iwk_rx_sens(): " 5098 "sensitivity initialization has not finished.\n"); 5099 return (DDI_FAILURE); 5100 } 5101 5102 if (INTERFERENCE_DATA_AVAILABLE != 5103 rx_general_p->interference_data_flag) { 5104 cmn_err(CE_WARN, "iwk_rx_sens(): " 5105 "can't make rx sensitivity calibration," 5106 "because of invalid statistics\n"); 5107 return (DDI_FAILURE); 5108 } 5109 5110 actual_rx_time = rx_general_p->channel_load; 5111 if (!actual_rx_time) { 5112 IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): " 5113 "can't make rx sensitivity calibration," 5114 "because has not enough rx time\n")); 5115 return (DDI_FAILURE); 5116 } 5117 5118 /* make Rx sensitivity calibration for OFDM mode */ 5119 rv = iwk_ofdm_sens(sc, actual_rx_time); 5120 if (rv) { 5121 return (rv); 5122 } 5123 5124 /* make Rx sensitivity calibration for CCK mode */ 5125 rv = iwk_cck_sens(sc, actual_rx_time); 5126 if (rv) { 5127 return (rv); 5128 } 5129 5130 /* 5131 * if the sum of false alarm had not changed, nothing will be done 5132 */ 5133 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) && 5134 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) { 5135 return (IWK_SUCCESS); 5136 } 5137 5138 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5139 5140 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5141 rx_sens_p->auto_corr_ofdm_x4; 5142 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5143 rx_sens_p->auto_corr_mrc_ofdm_x4; 5144 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5145 rx_sens_p->auto_corr_ofdm_x1; 5146 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5147 rx_sens_p->auto_corr_mrc_ofdm_x1; 5148 5149 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5150 rx_sens_p->auto_corr_cck_x4; 5151 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5152 rx_sens_p->auto_corr_mrc_cck_x4; 5153 cmd.table[MIN_ENERGY_CCK_DET_IDX] = 5154 rx_sens_p->min_energy_det_cck; 5155 5156 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5157 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5158 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5159 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5160 5161 /* 5162 * send sensitivity command to complete actual sensitivity calibration 5163 */ 5164 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5165 if (rv) { 5166 cmn_err(CE_WARN, "iwk_rx_sens(): " 5167 "fail to send rx sensitivity command\n"); 5168 return (rv); 5169 } 5170 5171 return (IWK_SUCCESS); 5172 5173 } 5174 5175 /* 5176 * make Rx sensitivity calibration for CCK mode. 5177 * This is preparing parameters for Sensitivity command 5178 */ 5179 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5180 { 5181 int i; 5182 uint8_t noise_a, noise_b, noise_c; 5183 uint8_t max_noise_abc, max_noise_20; 5184 uint32_t beacon_a, beacon_b, beacon_c; 5185 uint32_t min_beacon_abc, max_beacon_10; 5186 uint32_t cck_fa, cck_bp; 5187 uint32_t cck_sum_fa_bp; 5188 uint32_t temp; 5189 struct statistics_rx_non_phy *rx_general_p = 5190 &sc->sc_statistics.rx.general; 5191 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5192 5193 cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt; 5194 cck_bp = sc->sc_statistics.rx.cck.plcp_err; 5195 5196 /* accumulate false alarm */ 5197 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) { 5198 temp = rx_sens_p->last_false_alarm_cnt_cck; 5199 rx_sens_p->last_false_alarm_cnt_cck = cck_fa; 5200 cck_fa += (0xFFFFFFFF - temp); 5201 } else { 5202 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck; 5203 rx_sens_p->last_false_alarm_cnt_cck += cck_fa; 5204 } 5205 5206 /* accumulate bad plcp */ 5207 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) { 5208 temp = rx_sens_p->last_bad_plcp_cnt_cck; 5209 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp; 5210 cck_bp += (0xFFFFFFFF - temp); 5211 } else { 5212 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck; 5213 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp; 5214 } 5215 5216 /* 5217 * calculate relative value 5218 */ 5219 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024; 5220 rx_sens_p->cck_noise_diff = 0; 5221 5222 noise_a = 5223 (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8); 5224 noise_b = 5225 (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8); 5226 noise_c = 5227 (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8); 5228 5229 beacon_a = rx_general_p->beacon_energy_a; 5230 beacon_b = rx_general_p->beacon_energy_b; 5231 beacon_c = rx_general_p->beacon_energy_c; 5232 5233 /* determine maximum noise among 3 chains */ 5234 if ((noise_a >= noise_b) && (noise_a >= noise_c)) { 5235 max_noise_abc = noise_a; 5236 } else if (noise_b >= noise_c) { 5237 max_noise_abc = noise_b; 5238 } else { 5239 max_noise_abc = noise_c; 5240 } 5241 5242 /* record maximum noise among 3 chains */ 5243 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc; 5244 rx_sens_p->cck_noise_idx++; 5245 if (rx_sens_p->cck_noise_idx >= 20) { 5246 rx_sens_p->cck_noise_idx = 0; 5247 } 5248 5249 /* determine maximum noise among 20 max noise */ 5250 max_noise_20 = rx_sens_p->cck_noise_max[0]; 5251 for (i = 0; i < 20; i++) { 5252 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) { 5253 max_noise_20 = rx_sens_p->cck_noise_max[i]; 5254 } 5255 } 5256 5257 /* determine minimum beacon among 3 chains */ 5258 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) { 5259 min_beacon_abc = beacon_a; 5260 } else if (beacon_b <= beacon_c) { 5261 min_beacon_abc = beacon_b; 5262 } else { 5263 min_beacon_abc = beacon_c; 5264 } 5265 5266 /* record miminum beacon among 3 chains */ 5267 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc; 5268 rx_sens_p->cck_beacon_idx++; 5269 if (rx_sens_p->cck_beacon_idx >= 10) { 5270 rx_sens_p->cck_beacon_idx = 0; 5271 } 5272 5273 /* determine maximum beacon among 10 miminum beacon among 3 chains */ 5274 max_beacon_10 = rx_sens_p->cck_beacon_min[0]; 5275 for (i = 0; i < 10; i++) { 5276 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) { 5277 max_beacon_10 = rx_sens_p->cck_beacon_min[i]; 5278 } 5279 } 5280 5281 /* add a little margin */ 5282 max_beacon_10 += 6; 5283 5284 /* record the count of having no false alarms */ 5285 if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5286 rx_sens_p->cck_no_false_alarm_num++; 5287 } else { 5288 rx_sens_p->cck_no_false_alarm_num = 0; 5289 } 5290 5291 /* 5292 * adjust parameters in sensitivity command 5293 * according to different status. 5294 * for more infomation, please refer to iwk_calibration.h file 5295 */ 5296 if (cck_sum_fa_bp > (50 * actual_rx_time)) { 5297 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5298 5299 if (rx_sens_p->auto_corr_cck_x4 > 160) { 5300 rx_sens_p->cck_noise_ref = max_noise_20; 5301 5302 if (rx_sens_p->min_energy_det_cck > 2) { 5303 rx_sens_p->min_energy_det_cck -= 2; 5304 } 5305 } 5306 5307 if (rx_sens_p->auto_corr_cck_x4 < 160) { 5308 rx_sens_p->auto_corr_cck_x4 = 160 + 1; 5309 } else { 5310 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) { 5311 rx_sens_p->auto_corr_cck_x4 += 3; 5312 } else { 5313 rx_sens_p->auto_corr_cck_x4 = 200; 5314 } 5315 } 5316 5317 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) { 5318 rx_sens_p->auto_corr_mrc_cck_x4 += 3; 5319 } else { 5320 rx_sens_p->auto_corr_mrc_cck_x4 = 400; 5321 } 5322 5323 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5324 5325 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5326 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM; 5327 5328 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref - 5329 (int32_t)max_noise_20; 5330 5331 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) && 5332 ((rx_sens_p->cck_noise_diff > 2) || 5333 (rx_sens_p->cck_no_false_alarm_num > 100))) { 5334 if ((rx_sens_p->min_energy_det_cck + 2) < 97) { 5335 rx_sens_p->min_energy_det_cck += 2; 5336 } else { 5337 rx_sens_p->min_energy_det_cck = 97; 5338 } 5339 5340 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) { 5341 rx_sens_p->auto_corr_cck_x4 -= 3; 5342 } else { 5343 rx_sens_p->auto_corr_cck_x4 = 125; 5344 } 5345 5346 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) { 5347 rx_sens_p->auto_corr_mrc_cck_x4 -= 3; 5348 } else { 5349 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5350 } 5351 5352 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5353 } else { 5354 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5355 } 5356 } else { 5357 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM; 5358 5359 rx_sens_p->cck_noise_ref = max_noise_20; 5360 5361 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) { 5362 rx_sens_p->min_energy_det_cck -= 8; 5363 } 5364 5365 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5366 } 5367 5368 if (rx_sens_p->min_energy_det_cck < max_beacon_10) { 5369 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10; 5370 } 5371 5372 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state; 5373 5374 return (IWK_SUCCESS); 5375 } 5376 5377 /* 5378 * make Rx sensitivity calibration for OFDM mode. 5379 * This is preparing parameters for Sensitivity command 5380 */ 5381 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5382 { 5383 uint32_t temp; 5384 uint16_t temp1; 5385 uint32_t ofdm_fa, ofdm_bp; 5386 uint32_t ofdm_sum_fa_bp; 5387 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5388 5389 ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt; 5390 ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err; 5391 5392 /* accumulate false alarm */ 5393 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) { 5394 temp = rx_sens_p->last_false_alarm_cnt_ofdm; 5395 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa; 5396 ofdm_fa += (0xFFFFFFFF - temp); 5397 } else { 5398 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm; 5399 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa; 5400 } 5401 5402 /* accumulate bad plcp */ 5403 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) { 5404 temp = rx_sens_p->last_bad_plcp_cnt_ofdm; 5405 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp; 5406 ofdm_bp += (0xFFFFFFFF - temp); 5407 } else { 5408 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm; 5409 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp; 5410 } 5411 5412 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */ 5413 5414 /* 5415 * adjust parameter in sensitivity command according to different status 5416 */ 5417 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) { 5418 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1; 5419 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120; 5420 5421 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1; 5422 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5423 (temp1 <= 210) ? temp1 : 210; 5424 5425 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1; 5426 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140; 5427 5428 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1; 5429 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5430 (temp1 <= 270) ? temp1 : 270; 5431 5432 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5433 5434 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) { 5435 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1; 5436 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85; 5437 5438 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1; 5439 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5440 (temp1 >= 170) ? temp1 : 170; 5441 5442 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1; 5443 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105; 5444 5445 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1; 5446 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5447 (temp1 >= 220) ? temp1 : 220; 5448 5449 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5450 5451 } else { 5452 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5453 } 5454 5455 return (IWK_SUCCESS); 5456 } 5457 5458 /* 5459 * 1) log_event_table_ptr indicates base of the event log. This traces 5460 * a 256-entry history of uCode execution within a circular buffer. 5461 * Its header format is: 5462 * 5463 * uint32_t log_size; log capacity (in number of entries) 5464 * uint32_t type; (1) timestamp with each entry, (0) no timestamp 5465 * uint32_t wraps; # times uCode has wrapped to top of circular buffer 5466 * uint32_t write_index; next circular buffer entry that uCode would fill 5467 * 5468 * The header is followed by the circular buffer of log entries. Entries 5469 * with timestamps have the following format: 5470 * 5471 * uint32_t event_id; range 0 - 1500 5472 * uint32_t timestamp; low 32 bits of TSF (of network, if associated) 5473 * uint32_t data; event_id-specific data value 5474 * 5475 * Entries without timestamps contain only event_id and data. 5476 */ 5477 5478 /* 5479 * iwk_write_event_log - Write event log to dmesg 5480 */ 5481 static void iwk_write_event_log(iwk_sc_t *sc) 5482 { 5483 uint32_t log_event_table_ptr; /* Start address of event table */ 5484 uint32_t startptr; /* Start address of log data */ 5485 uint32_t logptr; /* address of log data entry */ 5486 uint32_t i, n, num_events; 5487 uint32_t event_id, data1, data2; /* log data */ 5488 5489 uint32_t log_size; /* log capacity (in number of entries) */ 5490 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */ 5491 uint32_t wraps; /* # times uCode has wrapped to */ 5492 /* the top of circular buffer */ 5493 uint32_t idx; /* index of entry to be filled in next */ 5494 5495 log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr; 5496 if (!(log_event_table_ptr)) { 5497 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n")); 5498 return; 5499 } 5500 5501 iwk_mac_access_enter(sc); 5502 5503 /* Read log header */ 5504 log_size = iwk_mem_read(sc, log_event_table_ptr); 5505 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */ 5506 type = iwk_mem_read(sc, log_event_table_ptr); 5507 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */ 5508 wraps = iwk_mem_read(sc, log_event_table_ptr); 5509 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */ 5510 idx = iwk_mem_read(sc, log_event_table_ptr); 5511 startptr = log_event_table_ptr + 5512 sizeof (uint32_t); /* addr of start of log data */ 5513 if (!log_size & !wraps) { 5514 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n")); 5515 iwk_mac_access_exit(sc); 5516 return; 5517 } 5518 5519 if (!wraps) { 5520 num_events = idx; 5521 logptr = startptr; 5522 } else { 5523 num_events = log_size - idx; 5524 n = type ? 2 : 3; 5525 logptr = startptr + (idx * n * sizeof (uint32_t)); 5526 } 5527 5528 for (i = 0; i < num_events; i++) { 5529 event_id = iwk_mem_read(sc, logptr); 5530 logptr += sizeof (uint32_t); 5531 data1 = iwk_mem_read(sc, logptr); 5532 logptr += sizeof (uint32_t); 5533 if (type == 0) { /* no timestamp */ 5534 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x", 5535 event_id, data1)); 5536 } else { /* timestamp */ 5537 data2 = iwk_mem_read(sc, logptr); 5538 printf("Time=%d, Event ID=%d, Data=0x%x\n", 5539 data1, event_id, data2); 5540 IWK_DBG((IWK_DEBUG_EEPROM, 5541 "Time=%d, Event ID=%d, Data=0x%x\n", 5542 data1, event_id, data2)); 5543 logptr += sizeof (uint32_t); 5544 } 5545 } 5546 5547 /* 5548 * Print the wrapped around entries, if any 5549 */ 5550 if (wraps) { 5551 logptr = startptr; 5552 for (i = 0; i < idx; i++) { 5553 event_id = iwk_mem_read(sc, logptr); 5554 logptr += sizeof (uint32_t); 5555 data1 = iwk_mem_read(sc, logptr); 5556 logptr += sizeof (uint32_t); 5557 if (type == 0) { /* no timestamp */ 5558 IWK_DBG((IWK_DEBUG_EEPROM, 5559 "Event ID=%d, Data=%x0x", event_id, data1)); 5560 } else { /* timestamp */ 5561 data2 = iwk_mem_read(sc, logptr); 5562 IWK_DBG((IWK_DEBUG_EEPROM, 5563 "Time = %d, Event ID=%d, Data=0x%x\n", 5564 data1, event_id, data2)); 5565 logptr += sizeof (uint32_t); 5566 } 5567 } 5568 } 5569 5570 iwk_mac_access_exit(sc); 5571 } 5572 5573 /* 5574 * error_event_table_ptr indicates base of the error log. This contains 5575 * information about any uCode error that occurs. For 4965, the format is: 5576 * 5577 * uint32_t valid; (nonzero) valid, (0) log is empty 5578 * uint32_t error_id; type of error 5579 * uint32_t pc; program counter 5580 * uint32_t blink1; branch link 5581 * uint32_t blink2; branch link 5582 * uint32_t ilink1; interrupt link 5583 * uint32_t ilink2; interrupt link 5584 * uint32_t data1; error-specific data 5585 * uint32_t data2; error-specific data 5586 * uint32_t line; source code line of error 5587 * uint32_t bcon_time; beacon timer 5588 * uint32_t tsf_low; network timestamp function timer 5589 * uint32_t tsf_hi; network timestamp function timer 5590 */ 5591 /* 5592 * iwk_write_error_log - Write error log to dmesg 5593 */ 5594 static void iwk_write_error_log(iwk_sc_t *sc) 5595 { 5596 uint32_t err_ptr; /* Start address of error log */ 5597 uint32_t valid; /* is error log valid */ 5598 5599 err_ptr = sc->sc_card_alive_run.error_event_table_ptr; 5600 if (!(err_ptr)) { 5601 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n")); 5602 return; 5603 } 5604 5605 iwk_mac_access_enter(sc); 5606 5607 valid = iwk_mem_read(sc, err_ptr); 5608 if (!(valid)) { 5609 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n")); 5610 iwk_mac_access_exit(sc); 5611 return; 5612 } 5613 err_ptr += sizeof (uint32_t); 5614 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr))); 5615 err_ptr += sizeof (uint32_t); 5616 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr))); 5617 err_ptr += sizeof (uint32_t); 5618 IWK_DBG((IWK_DEBUG_EEPROM, 5619 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5620 err_ptr += sizeof (uint32_t); 5621 IWK_DBG((IWK_DEBUG_EEPROM, 5622 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5623 err_ptr += sizeof (uint32_t); 5624 IWK_DBG((IWK_DEBUG_EEPROM, 5625 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr))); 5626 err_ptr += sizeof (uint32_t); 5627 IWK_DBG((IWK_DEBUG_EEPROM, 5628 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr))); 5629 err_ptr += sizeof (uint32_t); 5630 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr))); 5631 err_ptr += sizeof (uint32_t); 5632 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr))); 5633 err_ptr += sizeof (uint32_t); 5634 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr))); 5635 err_ptr += sizeof (uint32_t); 5636 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr))); 5637 err_ptr += sizeof (uint32_t); 5638 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr))); 5639 err_ptr += sizeof (uint32_t); 5640 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr))); 5641 5642 iwk_mac_access_exit(sc); 5643 } 5644