1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * Copyright (c) 2007, Intel Corporation 8 * All rights reserved. 9 */ 10 11 /* 12 * Copyright (c) 2006 13 * Copyright (c) 2007 14 * Damien Bergamini <damien.bergamini@free.fr> 15 * 16 * Permission to use, copy, modify, and distribute this software for any 17 * purpose with or without fee is hereby granted, provided that the above 18 * copyright notice and this permission notice appear in all copies. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 27 */ 28 29 /* 30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/byteorder.h> 35 #include <sys/conf.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/strsubr.h> 41 #include <sys/ethernet.h> 42 #include <inet/common.h> 43 #include <inet/nd.h> 44 #include <inet/mi.h> 45 #include <sys/note.h> 46 #include <sys/stream.h> 47 #include <sys/strsun.h> 48 #include <sys/modctl.h> 49 #include <sys/devops.h> 50 #include <sys/dlpi.h> 51 #include <sys/mac_provider.h> 52 #include <sys/mac_wifi.h> 53 #include <sys/net80211.h> 54 #include <sys/net80211_proto.h> 55 #include <sys/varargs.h> 56 #include <sys/policy.h> 57 #include <sys/pci.h> 58 59 #include "iwk_calibration.h" 60 #include "iwk_hw.h" 61 #include "iwk_eeprom.h" 62 #include "iwk2_var.h" 63 #include <inet/wifi_ioctl.h> 64 65 #ifdef DEBUG 66 #define IWK_DEBUG_80211 (1 << 0) 67 #define IWK_DEBUG_CMD (1 << 1) 68 #define IWK_DEBUG_DMA (1 << 2) 69 #define IWK_DEBUG_EEPROM (1 << 3) 70 #define IWK_DEBUG_FW (1 << 4) 71 #define IWK_DEBUG_HW (1 << 5) 72 #define IWK_DEBUG_INTR (1 << 6) 73 #define IWK_DEBUG_MRR (1 << 7) 74 #define IWK_DEBUG_PIO (1 << 8) 75 #define IWK_DEBUG_RX (1 << 9) 76 #define IWK_DEBUG_SCAN (1 << 10) 77 #define IWK_DEBUG_TX (1 << 11) 78 #define IWK_DEBUG_RATECTL (1 << 12) 79 #define IWK_DEBUG_RADIO (1 << 13) 80 #define IWK_DEBUG_RESUME (1 << 14) 81 #define IWK_DEBUG_CALIBRATION (1 << 15) 82 uint32_t iwk_dbg_flags = 0; 83 #define IWK_DBG(x) \ 84 iwk_dbg x 85 #else 86 #define IWK_DBG(x) 87 #endif 88 89 static void *iwk_soft_state_p = NULL; 90 static uint8_t iwk_fw_bin [] = { 91 #include "fw-iw/iw4965.ucode.hex" 92 }; 93 94 /* DMA attributes for a shared page */ 95 static ddi_dma_attr_t sh_dma_attr = { 96 DMA_ATTR_V0, /* version of this structure */ 97 0, /* lowest usable address */ 98 0xffffffffU, /* highest usable address */ 99 0xffffffffU, /* maximum DMAable byte count */ 100 0x1000, /* alignment in bytes */ 101 0x1000, /* burst sizes (any?) */ 102 1, /* minimum transfer */ 103 0xffffffffU, /* maximum transfer */ 104 0xffffffffU, /* maximum segment length */ 105 1, /* maximum number of segments */ 106 1, /* granularity */ 107 0, /* flags (reserved) */ 108 }; 109 110 /* DMA attributes for a keep warm DRAM descriptor */ 111 static ddi_dma_attr_t kw_dma_attr = { 112 DMA_ATTR_V0, /* version of this structure */ 113 0, /* lowest usable address */ 114 0xffffffffU, /* highest usable address */ 115 0xffffffffU, /* maximum DMAable byte count */ 116 0x1000, /* alignment in bytes */ 117 0x1000, /* burst sizes (any?) */ 118 1, /* minimum transfer */ 119 0xffffffffU, /* maximum transfer */ 120 0xffffffffU, /* maximum segment length */ 121 1, /* maximum number of segments */ 122 1, /* granularity */ 123 0, /* flags (reserved) */ 124 }; 125 126 /* DMA attributes for a ring descriptor */ 127 static ddi_dma_attr_t ring_desc_dma_attr = { 128 DMA_ATTR_V0, /* version of this structure */ 129 0, /* lowest usable address */ 130 0xffffffffU, /* highest usable address */ 131 0xffffffffU, /* maximum DMAable byte count */ 132 0x100, /* alignment in bytes */ 133 0x100, /* burst sizes (any?) */ 134 1, /* minimum transfer */ 135 0xffffffffU, /* maximum transfer */ 136 0xffffffffU, /* maximum segment length */ 137 1, /* maximum number of segments */ 138 1, /* granularity */ 139 0, /* flags (reserved) */ 140 }; 141 142 /* DMA attributes for a cmd */ 143 static ddi_dma_attr_t cmd_dma_attr = { 144 DMA_ATTR_V0, /* version of this structure */ 145 0, /* lowest usable address */ 146 0xffffffffU, /* highest usable address */ 147 0xffffffffU, /* maximum DMAable byte count */ 148 4, /* alignment in bytes */ 149 0x100, /* burst sizes (any?) */ 150 1, /* minimum transfer */ 151 0xffffffffU, /* maximum transfer */ 152 0xffffffffU, /* maximum segment length */ 153 1, /* maximum number of segments */ 154 1, /* granularity */ 155 0, /* flags (reserved) */ 156 }; 157 158 /* DMA attributes for a rx buffer */ 159 static ddi_dma_attr_t rx_buffer_dma_attr = { 160 DMA_ATTR_V0, /* version of this structure */ 161 0, /* lowest usable address */ 162 0xffffffffU, /* highest usable address */ 163 0xffffffffU, /* maximum DMAable byte count */ 164 0x100, /* alignment in bytes */ 165 0x100, /* burst sizes (any?) */ 166 1, /* minimum transfer */ 167 0xffffffffU, /* maximum transfer */ 168 0xffffffffU, /* maximum segment length */ 169 1, /* maximum number of segments */ 170 1, /* granularity */ 171 0, /* flags (reserved) */ 172 }; 173 174 /* 175 * DMA attributes for a tx buffer. 176 * the maximum number of segments is 4 for the hardware. 177 * now all the wifi drivers put the whole frame in a single 178 * descriptor, so we define the maximum number of segments 1, 179 * just the same as the rx_buffer. we consider leverage the HW 180 * ability in the future, that is why we don't define rx and tx 181 * buffer_dma_attr as the same. 182 */ 183 static ddi_dma_attr_t tx_buffer_dma_attr = { 184 DMA_ATTR_V0, /* version of this structure */ 185 0, /* lowest usable address */ 186 0xffffffffU, /* highest usable address */ 187 0xffffffffU, /* maximum DMAable byte count */ 188 4, /* alignment in bytes */ 189 0x100, /* burst sizes (any?) */ 190 1, /* minimum transfer */ 191 0xffffffffU, /* maximum transfer */ 192 0xffffffffU, /* maximum segment length */ 193 1, /* maximum number of segments */ 194 1, /* granularity */ 195 0, /* flags (reserved) */ 196 }; 197 198 /* DMA attributes for text and data part in the firmware */ 199 static ddi_dma_attr_t fw_dma_attr = { 200 DMA_ATTR_V0, /* version of this structure */ 201 0, /* lowest usable address */ 202 0xffffffffU, /* highest usable address */ 203 0x7fffffff, /* maximum DMAable byte count */ 204 0x10, /* alignment in bytes */ 205 0x100, /* burst sizes (any?) */ 206 1, /* minimum transfer */ 207 0xffffffffU, /* maximum transfer */ 208 0xffffffffU, /* maximum segment length */ 209 1, /* maximum number of segments */ 210 1, /* granularity */ 211 0, /* flags (reserved) */ 212 }; 213 214 215 /* regs access attributes */ 216 static ddi_device_acc_attr_t iwk_reg_accattr = { 217 DDI_DEVICE_ATTR_V0, 218 DDI_STRUCTURE_LE_ACC, 219 DDI_STRICTORDER_ACC, 220 DDI_DEFAULT_ACC 221 }; 222 223 /* DMA access attributes for Descriptor */ 224 static ddi_device_acc_attr_t iwk_dma_descattr = { 225 DDI_DEVICE_ATTR_V0, 226 DDI_STRUCTURE_LE_ACC, 227 DDI_STRICTORDER_ACC, 228 DDI_DEFAULT_ACC 229 }; 230 231 /* DMA access attributes */ 232 static ddi_device_acc_attr_t iwk_dma_accattr = { 233 DDI_DEVICE_ATTR_V0, 234 DDI_NEVERSWAP_ACC, 235 DDI_STRICTORDER_ACC, 236 DDI_DEFAULT_ACC 237 }; 238 239 static int iwk_ring_init(iwk_sc_t *); 240 static void iwk_ring_free(iwk_sc_t *); 241 static int iwk_alloc_shared(iwk_sc_t *); 242 static void iwk_free_shared(iwk_sc_t *); 243 static int iwk_alloc_kw(iwk_sc_t *); 244 static void iwk_free_kw(iwk_sc_t *); 245 static int iwk_alloc_fw_dma(iwk_sc_t *); 246 static void iwk_free_fw_dma(iwk_sc_t *); 247 static int iwk_alloc_rx_ring(iwk_sc_t *); 248 static void iwk_reset_rx_ring(iwk_sc_t *); 249 static void iwk_free_rx_ring(iwk_sc_t *); 250 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *, 251 int, int); 252 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 253 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *); 254 255 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *); 256 static void iwk_node_free(ieee80211_node_t *); 257 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int); 258 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *, 259 const uint8_t mac[IEEE80211_ADDR_LEN]); 260 static void iwk_mac_access_enter(iwk_sc_t *); 261 static void iwk_mac_access_exit(iwk_sc_t *); 262 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t); 263 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t); 264 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t, 265 uint32_t *, int); 266 static int iwk_load_firmware(iwk_sc_t *); 267 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *, 268 iwk_rx_data_t *); 269 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *, 270 iwk_rx_data_t *); 271 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *); 272 static uint_t iwk_intr(caddr_t, caddr_t); 273 static int iwk_eep_load(iwk_sc_t *sc); 274 static void iwk_get_mac_from_eep(iwk_sc_t *sc); 275 static int iwk_eep_sem_down(iwk_sc_t *sc); 276 static void iwk_eep_sem_up(iwk_sc_t *sc); 277 static uint_t iwk_rx_softintr(caddr_t, caddr_t); 278 static uint8_t iwk_rate_to_plcp(int); 279 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int); 280 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t); 281 static int iwk_hw_set_before_auth(iwk_sc_t *); 282 static int iwk_scan(iwk_sc_t *); 283 static int iwk_config(iwk_sc_t *); 284 static void iwk_stop_master(iwk_sc_t *); 285 static int iwk_power_up(iwk_sc_t *); 286 static int iwk_preinit(iwk_sc_t *); 287 static int iwk_init(iwk_sc_t *); 288 static void iwk_stop(iwk_sc_t *); 289 static void iwk_amrr_init(iwk_amrr_t *); 290 static void iwk_amrr_timeout(iwk_sc_t *); 291 static void iwk_amrr_ratectl(void *, ieee80211_node_t *); 292 static int32_t iwk_curr_tempera(iwk_sc_t *sc); 293 static int iwk_tx_power_calibration(iwk_sc_t *sc); 294 static inline int iwk_is_24G_band(iwk_sc_t *sc); 295 static inline int iwk_is_fat_channel(iwk_sc_t *sc); 296 static int iwk_txpower_grp(uint16_t channel); 297 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 298 uint16_t channel, 299 int is_24G, int is_fat, int is_hi_chan); 300 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel); 301 static int iwk_division(int32_t num, int32_t denom, int32_t *res); 302 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 303 int32_t x2, int32_t y2); 304 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 305 struct iwk_eep_calib_channel_info *chan_info); 306 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 307 int32_t curr_voltage); 308 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G); 309 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 310 struct iwk_tx_power_db *tp_db); 311 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc); 312 static int iwk_is_associated(iwk_sc_t *sc); 313 static int iwk_rxgain_diff_init(iwk_sc_t *sc); 314 static int iwk_rxgain_diff(iwk_sc_t *sc); 315 static int iwk_rx_sens_init(iwk_sc_t *sc); 316 static int iwk_rx_sens(iwk_sc_t *sc); 317 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 318 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time); 319 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp, 320 struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp); 321 322 static void iwk_write_event_log(iwk_sc_t *); 323 static void iwk_write_error_log(iwk_sc_t *); 324 325 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 326 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 327 static int iwk_quiesce(dev_info_t *dip); 328 329 /* 330 * GLD specific operations 331 */ 332 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val); 333 static int iwk_m_start(void *arg); 334 static void iwk_m_stop(void *arg); 335 static int iwk_m_unicst(void *arg, const uint8_t *macaddr); 336 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m); 337 static int iwk_m_promisc(void *arg, boolean_t on); 338 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp); 339 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp); 340 static int iwk_m_setprop(void *arg, const char *pr_name, 341 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf); 342 static int iwk_m_getprop(void *arg, const char *pr_name, 343 mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length, 344 void *wldp_buf, uint_t *perm); 345 static void iwk_destroy_locks(iwk_sc_t *sc); 346 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type); 347 static void iwk_thread(iwk_sc_t *sc); 348 static void iwk_watchdog(void *arg); 349 static int iwk_run_state_config_ibss(ieee80211com_t *ic); 350 static int iwk_run_state_config_sta(ieee80211com_t *ic); 351 static int iwk_fast_recover(iwk_sc_t *sc); 352 static int iwk_start_tx_beacon(ieee80211com_t *ic); 353 static int iwk_clean_add_node_ibss(struct ieee80211com *ic, 354 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2); 355 356 /* 357 * Supported rates for 802.11b/g modes (in 500Kbps unit). 358 * 11a and 11n support will be added later. 359 */ 360 static const struct ieee80211_rateset iwk_rateset_11b = 361 { 4, { 2, 4, 11, 22 } }; 362 363 static const struct ieee80211_rateset iwk_rateset_11g = 364 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } }; 365 366 /* 367 * For mfthread only 368 */ 369 extern pri_t minclsyspri; 370 371 #define DRV_NAME_4965 "iwk" 372 373 /* 374 * Module Loading Data & Entry Points 375 */ 376 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach, 377 iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce); 378 379 static struct modldrv iwk_modldrv = { 380 &mod_driverops, 381 "Intel(R) 4965AGN driver(N)", 382 &iwk_devops 383 }; 384 385 static struct modlinkage iwk_modlinkage = { 386 MODREV_1, 387 &iwk_modldrv, 388 NULL 389 }; 390 391 int 392 _init(void) 393 { 394 int status; 395 396 status = ddi_soft_state_init(&iwk_soft_state_p, 397 sizeof (iwk_sc_t), 1); 398 if (status != DDI_SUCCESS) 399 return (status); 400 401 mac_init_ops(&iwk_devops, DRV_NAME_4965); 402 status = mod_install(&iwk_modlinkage); 403 if (status != DDI_SUCCESS) { 404 mac_fini_ops(&iwk_devops); 405 ddi_soft_state_fini(&iwk_soft_state_p); 406 } 407 408 return (status); 409 } 410 411 int 412 _fini(void) 413 { 414 int status; 415 416 status = mod_remove(&iwk_modlinkage); 417 if (status == DDI_SUCCESS) { 418 mac_fini_ops(&iwk_devops); 419 ddi_soft_state_fini(&iwk_soft_state_p); 420 } 421 422 return (status); 423 } 424 425 int 426 _info(struct modinfo *mip) 427 { 428 return (mod_info(&iwk_modlinkage, mip)); 429 } 430 431 /* 432 * Mac Call Back entries 433 */ 434 mac_callbacks_t iwk_m_callbacks = { 435 MC_IOCTL | MC_SETPROP | MC_GETPROP, 436 iwk_m_stat, 437 iwk_m_start, 438 iwk_m_stop, 439 iwk_m_promisc, 440 iwk_m_multicst, 441 iwk_m_unicst, 442 iwk_m_tx, 443 iwk_m_ioctl, 444 NULL, 445 NULL, 446 NULL, 447 iwk_m_setprop, 448 iwk_m_getprop 449 }; 450 451 #ifdef DEBUG 452 void 453 iwk_dbg(uint32_t flags, const char *fmt, ...) 454 { 455 va_list ap; 456 457 if (flags & iwk_dbg_flags) { 458 va_start(ap, fmt); 459 vcmn_err(CE_NOTE, fmt, ap); 460 va_end(ap); 461 } 462 } 463 #endif 464 465 /* 466 * device operations 467 */ 468 int 469 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 470 { 471 iwk_sc_t *sc; 472 ieee80211com_t *ic; 473 int instance, err, i; 474 char strbuf[32]; 475 wifi_data_t wd = { 0 }; 476 mac_register_t *macp; 477 478 int intr_type; 479 int intr_count; 480 int intr_actual; 481 482 switch (cmd) { 483 case DDI_ATTACH: 484 break; 485 case DDI_RESUME: 486 sc = ddi_get_soft_state(iwk_soft_state_p, 487 ddi_get_instance(dip)); 488 ASSERT(sc != NULL); 489 490 mutex_enter(&sc->sc_glock); 491 sc->sc_flags &= ~IWK_F_SUSPEND; 492 mutex_exit(&sc->sc_glock); 493 494 if (sc->sc_flags & IWK_F_RUNNING) 495 (void) iwk_init(sc); 496 497 mutex_enter(&sc->sc_glock); 498 sc->sc_flags |= IWK_F_LAZY_RESUME; 499 mutex_exit(&sc->sc_glock); 500 501 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n")); 502 return (DDI_SUCCESS); 503 default: 504 err = DDI_FAILURE; 505 goto attach_fail1; 506 } 507 508 instance = ddi_get_instance(dip); 509 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance); 510 if (err != DDI_SUCCESS) { 511 cmn_err(CE_WARN, 512 "iwk_attach(): failed to allocate soft state\n"); 513 goto attach_fail1; 514 } 515 sc = ddi_get_soft_state(iwk_soft_state_p, instance); 516 sc->sc_dip = dip; 517 518 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0, 519 &iwk_reg_accattr, &sc->sc_cfg_handle); 520 if (err != DDI_SUCCESS) { 521 cmn_err(CE_WARN, 522 "iwk_attach(): failed to map config spaces regs\n"); 523 goto attach_fail2; 524 } 525 sc->sc_rev = ddi_get8(sc->sc_cfg_handle, 526 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID)); 527 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0); 528 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle, 529 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ)); 530 if (!sc->sc_clsz) 531 sc->sc_clsz = 16; 532 sc->sc_clsz = (sc->sc_clsz << 2); 533 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) + 534 IEEE80211_MTU + IEEE80211_CRC_LEN + 535 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + 536 IEEE80211_WEP_CRCLEN), sc->sc_clsz); 537 /* 538 * Map operating registers 539 */ 540 err = ddi_regs_map_setup(dip, 1, &sc->sc_base, 541 0, 0, &iwk_reg_accattr, &sc->sc_handle); 542 if (err != DDI_SUCCESS) { 543 cmn_err(CE_WARN, 544 "iwk_attach(): failed to map device regs\n"); 545 goto attach_fail2a; 546 } 547 548 err = ddi_intr_get_supported_types(dip, &intr_type); 549 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) { 550 cmn_err(CE_WARN, "iwk_attach(): " 551 "Fixed type interrupt is not supported\n"); 552 goto attach_fail_intr_a; 553 } 554 555 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count); 556 if ((err != DDI_SUCCESS) || (intr_count != 1)) { 557 cmn_err(CE_WARN, "iwk_attach(): " 558 "No fixed interrupts\n"); 559 goto attach_fail_intr_a; 560 } 561 562 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 563 564 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 565 intr_count, &intr_actual, 0); 566 if ((err != DDI_SUCCESS) || (intr_actual != 1)) { 567 cmn_err(CE_WARN, "iwk_attach(): " 568 "ddi_intr_alloc() failed 0x%x\n", err); 569 goto attach_fail_intr_b; 570 } 571 572 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri); 573 if (err != DDI_SUCCESS) { 574 cmn_err(CE_WARN, "iwk_attach(): " 575 "ddi_intr_get_pri() failed 0x%x\n", err); 576 goto attach_fail_intr_c; 577 } 578 579 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, 580 DDI_INTR_PRI(sc->sc_intr_pri)); 581 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, 582 DDI_INTR_PRI(sc->sc_intr_pri)); 583 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER, 584 DDI_INTR_PRI(sc->sc_intr_pri)); 585 mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER, 586 DDI_INTR_PRI(sc->sc_intr_pri)); 587 588 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL); 589 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL); 590 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL); 591 /* 592 * initialize the mfthread 593 */ 594 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL); 595 sc->sc_mf_thread = NULL; 596 sc->sc_mf_thread_switch = 0; 597 598 /* 599 * Allocate shared page. 600 */ 601 err = iwk_alloc_shared(sc); 602 if (err != DDI_SUCCESS) { 603 cmn_err(CE_WARN, "iwk_attach(): " 604 "failed to allocate shared page\n"); 605 goto attach_fail3; 606 } 607 608 /* 609 * Allocate keep warm page. 610 */ 611 err = iwk_alloc_kw(sc); 612 if (err != DDI_SUCCESS) { 613 cmn_err(CE_WARN, "iwk_attach(): " 614 "failed to allocate keep warm page\n"); 615 goto attach_fail3a; 616 } 617 618 /* 619 * Do some necessary hardware initializations. 620 */ 621 err = iwk_preinit(sc); 622 if (err != DDI_SUCCESS) { 623 cmn_err(CE_WARN, "iwk_attach(): " 624 "failed to init hardware\n"); 625 goto attach_fail4; 626 } 627 628 /* initialize EEPROM */ 629 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */ 630 if (err != 0) { 631 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n"); 632 goto attach_fail4; 633 } 634 635 if (LE_16(sc->sc_eep_map.calib_version) < EEP_TX_POWER_VERSION_NEW) { 636 cmn_err(CE_WARN, "older EEPROM detected\n"); 637 goto attach_fail4; 638 } 639 640 iwk_get_mac_from_eep(sc); 641 642 err = iwk_ring_init(sc); 643 if (err != DDI_SUCCESS) { 644 cmn_err(CE_WARN, "iwk_attach(): " 645 "failed to allocate and initialize ring\n"); 646 goto attach_fail4; 647 } 648 649 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin; 650 651 err = iwk_alloc_fw_dma(sc); 652 if (err != DDI_SUCCESS) { 653 cmn_err(CE_WARN, "iwk_attach(): " 654 "failed to allocate firmware dma\n"); 655 goto attach_fail5; 656 } 657 658 /* 659 * Initialize the wifi part, which will be used by 660 * generic layer 661 */ 662 ic = &sc->sc_ic; 663 ic->ic_phytype = IEEE80211_T_OFDM; 664 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 665 ic->ic_state = IEEE80211_S_INIT; 666 ic->ic_maxrssi = 100; /* experimental number */ 667 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT | 668 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT; 669 /* 670 * use software WEP and TKIP, hardware CCMP; 671 */ 672 ic->ic_caps |= IEEE80211_C_AES_CCM; 673 /* 674 * Support WPA/WPA2 675 */ 676 ic->ic_caps |= IEEE80211_C_WPA; 677 /* 678 * support Adhoc mode 679 */ 680 ic->ic_caps |= IEEE80211_C_IBSS; 681 682 /* set supported .11b and .11g rates */ 683 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b; 684 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g; 685 686 /* set supported .11b and .11g channels (1 through 11) */ 687 for (i = 1; i <= 11; i++) { 688 ic->ic_sup_channels[i].ich_freq = 689 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); 690 ic->ic_sup_channels[i].ich_flags = 691 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 692 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ | 693 IEEE80211_CHAN_PASSIVE; 694 } 695 ic->ic_ibss_chan = &ic->ic_sup_channels[0]; 696 697 ic->ic_xmit = iwk_send; 698 /* 699 * init Wifi layer 700 */ 701 ieee80211_attach(ic); 702 703 /* 704 * different instance has different WPA door 705 */ 706 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR, 707 ddi_driver_name(dip), 708 ddi_get_instance(dip)); 709 710 /* 711 * Override 80211 default routines 712 */ 713 sc->sc_newstate = ic->ic_newstate; 714 ic->ic_newstate = iwk_newstate; 715 ic->ic_watchdog = iwk_watchdog; 716 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 717 ic->ic_recv_mgmt = iwk_recv_mgmt; 718 ic->ic_node_alloc = iwk_node_alloc; 719 ic->ic_node_free = iwk_node_free; 720 ic->ic_crypto.cs_key_set = iwk_key_set; 721 ieee80211_media_init(ic); 722 /* 723 * initialize default tx key 724 */ 725 ic->ic_def_txkey = 0; 726 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX, 727 iwk_rx_softintr, (caddr_t)sc); 728 if (err != DDI_SUCCESS) { 729 cmn_err(CE_WARN, "iwk_attach(): " 730 "add soft interrupt failed\n"); 731 goto attach_fail7; 732 } 733 734 /* 735 * Add the interrupt handler 736 */ 737 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr, 738 (caddr_t)sc, NULL); 739 if (err != DDI_SUCCESS) { 740 cmn_err(CE_WARN, "iwk_attach(): " 741 "ddi_intr_add_handle() failed\n"); 742 goto attach_fail8; 743 } 744 745 err = ddi_intr_enable(sc->sc_intr_htable[0]); 746 if (err != DDI_SUCCESS) { 747 cmn_err(CE_WARN, "iwk_attach(): " 748 "ddi_intr_enable() failed\n"); 749 goto attach_fail_intr_d; 750 } 751 752 /* 753 * Initialize pointer to device specific functions 754 */ 755 wd.wd_secalloc = WIFI_SEC_NONE; 756 wd.wd_opmode = ic->ic_opmode; 757 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr); 758 759 macp = mac_alloc(MAC_VERSION); 760 if (macp == NULL) { 761 cmn_err(CE_WARN, 762 "iwk_attach(): failed to do mac_alloc()\n"); 763 goto attach_fail9; 764 } 765 766 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI; 767 macp->m_driver = sc; 768 macp->m_dip = dip; 769 macp->m_src_addr = ic->ic_macaddr; 770 macp->m_callbacks = &iwk_m_callbacks; 771 macp->m_min_sdu = 0; 772 macp->m_max_sdu = IEEE80211_MTU; 773 macp->m_pdata = &wd; 774 macp->m_pdata_size = sizeof (wd); 775 776 /* 777 * Register the macp to mac 778 */ 779 err = mac_register(macp, &ic->ic_mach); 780 mac_free(macp); 781 if (err != DDI_SUCCESS) { 782 cmn_err(CE_WARN, 783 "iwk_attach(): failed to do mac_register()\n"); 784 goto attach_fail9; 785 } 786 787 /* 788 * Create minor node of type DDI_NT_NET_WIFI 789 */ 790 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance); 791 err = ddi_create_minor_node(dip, strbuf, S_IFCHR, 792 instance + 1, DDI_NT_NET_WIFI, 0); 793 if (err != DDI_SUCCESS) 794 cmn_err(CE_WARN, 795 "iwk_attach(): failed to do ddi_create_minor_node()\n"); 796 797 /* 798 * Notify link is down now 799 */ 800 mac_link_update(ic->ic_mach, LINK_STATE_DOWN); 801 802 /* 803 * create the mf thread to handle the link status, 804 * recovery fatal error, etc. 805 */ 806 sc->sc_mf_thread_switch = 1; 807 if (sc->sc_mf_thread == NULL) 808 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0, 809 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri); 810 811 sc->sc_flags |= IWK_F_ATTACHED; 812 813 return (DDI_SUCCESS); 814 attach_fail9: 815 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 816 attach_fail_intr_d: 817 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 818 819 attach_fail8: 820 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 821 sc->sc_soft_hdl = NULL; 822 attach_fail7: 823 ieee80211_detach(ic); 824 attach_fail6: 825 iwk_free_fw_dma(sc); 826 attach_fail5: 827 iwk_ring_free(sc); 828 attach_fail4: 829 iwk_free_kw(sc); 830 attach_fail3a: 831 iwk_free_shared(sc); 832 attach_fail3: 833 iwk_destroy_locks(sc); 834 attach_fail_intr_c: 835 (void) ddi_intr_free(sc->sc_intr_htable[0]); 836 attach_fail_intr_b: 837 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 838 attach_fail_intr_a: 839 ddi_regs_map_free(&sc->sc_handle); 840 attach_fail2a: 841 ddi_regs_map_free(&sc->sc_cfg_handle); 842 attach_fail2: 843 ddi_soft_state_free(iwk_soft_state_p, instance); 844 attach_fail1: 845 return (err); 846 } 847 848 int 849 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 850 { 851 iwk_sc_t *sc; 852 int err; 853 854 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 855 ASSERT(sc != NULL); 856 857 switch (cmd) { 858 case DDI_DETACH: 859 break; 860 case DDI_SUSPEND: 861 mutex_enter(&sc->sc_glock); 862 sc->sc_flags |= IWK_F_SUSPEND; 863 mutex_exit(&sc->sc_glock); 864 if (sc->sc_flags & IWK_F_RUNNING) { 865 iwk_stop(sc); 866 } 867 868 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n")); 869 return (DDI_SUCCESS); 870 default: 871 return (DDI_FAILURE); 872 } 873 874 if (!(sc->sc_flags & IWK_F_ATTACHED)) 875 return (DDI_FAILURE); 876 877 err = mac_disable(sc->sc_ic.ic_mach); 878 if (err != DDI_SUCCESS) 879 return (err); 880 881 /* 882 * Destroy the mf_thread 883 */ 884 mutex_enter(&sc->sc_mt_lock); 885 sc->sc_mf_thread_switch = 0; 886 while (sc->sc_mf_thread != NULL) { 887 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) 888 break; 889 } 890 mutex_exit(&sc->sc_mt_lock); 891 892 iwk_stop(sc); 893 DELAY(500000); 894 895 /* 896 * Unregiste from the MAC layer subsystem 897 */ 898 (void) mac_unregister(sc->sc_ic.ic_mach); 899 900 mutex_enter(&sc->sc_glock); 901 iwk_free_fw_dma(sc); 902 iwk_ring_free(sc); 903 iwk_free_kw(sc); 904 iwk_free_shared(sc); 905 mutex_exit(&sc->sc_glock); 906 907 (void) ddi_intr_disable(sc->sc_intr_htable[0]); 908 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]); 909 (void) ddi_intr_free(sc->sc_intr_htable[0]); 910 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); 911 912 (void) ddi_intr_remove_softint(sc->sc_soft_hdl); 913 sc->sc_soft_hdl = NULL; 914 915 /* 916 * detach ieee80211 917 */ 918 ieee80211_detach(&sc->sc_ic); 919 920 iwk_destroy_locks(sc); 921 922 ddi_regs_map_free(&sc->sc_handle); 923 ddi_regs_map_free(&sc->sc_cfg_handle); 924 ddi_remove_minor_node(dip, NULL); 925 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip)); 926 927 return (DDI_SUCCESS); 928 } 929 930 /* 931 * quiesce(9E) entry point. 932 * 933 * This function is called when the system is single-threaded at high 934 * PIL with preemption disabled. Therefore, this function must not be 935 * blocked. 936 * 937 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 938 * DDI_FAILURE indicates an error condition and should almost never happen. 939 */ 940 int 941 iwk_quiesce(dev_info_t *dip) 942 { 943 iwk_sc_t *sc; 944 945 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip)); 946 ASSERT(sc != NULL); 947 948 /* no message prints and no lock accquisition */ 949 #ifdef DEBUG 950 iwk_dbg_flags = 0; 951 #endif 952 sc->sc_flags |= IWK_F_QUIESCED; 953 954 iwk_stop(sc); 955 956 return (DDI_SUCCESS); 957 } 958 959 static void 960 iwk_destroy_locks(iwk_sc_t *sc) 961 { 962 cv_destroy(&sc->sc_mt_cv); 963 mutex_destroy(&sc->sc_mt_lock); 964 cv_destroy(&sc->sc_tx_cv); 965 cv_destroy(&sc->sc_cmd_cv); 966 cv_destroy(&sc->sc_fw_cv); 967 mutex_destroy(&sc->sc_tx_lock); 968 mutex_destroy(&sc->sc_glock); 969 } 970 971 /* 972 * Allocate an area of memory and a DMA handle for accessing it 973 */ 974 static int 975 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize, 976 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p, 977 uint_t dma_flags, iwk_dma_t *dma_p) 978 { 979 caddr_t vaddr; 980 int err; 981 982 /* 983 * Allocate handle 984 */ 985 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p, 986 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 987 if (err != DDI_SUCCESS) { 988 dma_p->dma_hdl = NULL; 989 return (DDI_FAILURE); 990 } 991 992 /* 993 * Allocate memory 994 */ 995 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p, 996 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 997 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl); 998 if (err != DDI_SUCCESS) { 999 ddi_dma_free_handle(&dma_p->dma_hdl); 1000 dma_p->dma_hdl = NULL; 1001 dma_p->acc_hdl = NULL; 1002 return (DDI_FAILURE); 1003 } 1004 1005 /* 1006 * Bind the two together 1007 */ 1008 dma_p->mem_va = vaddr; 1009 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1010 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL, 1011 &dma_p->cookie, &dma_p->ncookies); 1012 if (err != DDI_DMA_MAPPED) { 1013 ddi_dma_mem_free(&dma_p->acc_hdl); 1014 ddi_dma_free_handle(&dma_p->dma_hdl); 1015 dma_p->acc_hdl = NULL; 1016 dma_p->dma_hdl = NULL; 1017 return (DDI_FAILURE); 1018 } 1019 1020 dma_p->nslots = ~0U; 1021 dma_p->size = ~0U; 1022 dma_p->token = ~0U; 1023 dma_p->offset = 0; 1024 return (DDI_SUCCESS); 1025 } 1026 1027 /* 1028 * Free one allocated area of DMAable memory 1029 */ 1030 static void 1031 iwk_free_dma_mem(iwk_dma_t *dma_p) 1032 { 1033 if (dma_p->dma_hdl != NULL) { 1034 if (dma_p->ncookies) { 1035 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1036 dma_p->ncookies = 0; 1037 } 1038 ddi_dma_free_handle(&dma_p->dma_hdl); 1039 dma_p->dma_hdl = NULL; 1040 } 1041 1042 if (dma_p->acc_hdl != NULL) { 1043 ddi_dma_mem_free(&dma_p->acc_hdl); 1044 dma_p->acc_hdl = NULL; 1045 } 1046 } 1047 1048 /* 1049 * 1050 */ 1051 static int 1052 iwk_alloc_fw_dma(iwk_sc_t *sc) 1053 { 1054 int err = DDI_SUCCESS; 1055 iwk_dma_t *dma_p; 1056 char *t; 1057 1058 /* 1059 * firmware image layout: 1060 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->| 1061 */ 1062 t = (char *)(sc->sc_hdr + 1); 1063 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz), 1064 &fw_dma_attr, &iwk_dma_accattr, 1065 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1066 &sc->sc_dma_fw_text); 1067 dma_p = &sc->sc_dma_fw_text; 1068 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n", 1069 dma_p->ncookies, dma_p->cookie.dmac_address, 1070 dma_p->cookie.dmac_size)); 1071 if (err != DDI_SUCCESS) { 1072 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1073 " text dma memory"); 1074 goto fail; 1075 } 1076 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz)); 1077 1078 t += LE_32(sc->sc_hdr->textsz); 1079 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1080 &fw_dma_attr, &iwk_dma_accattr, 1081 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1082 &sc->sc_dma_fw_data); 1083 dma_p = &sc->sc_dma_fw_data; 1084 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n", 1085 dma_p->ncookies, dma_p->cookie.dmac_address, 1086 dma_p->cookie.dmac_size)); 1087 if (err != DDI_SUCCESS) { 1088 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1089 " data dma memory"); 1090 goto fail; 1091 } 1092 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1093 1094 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz), 1095 &fw_dma_attr, &iwk_dma_accattr, 1096 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1097 &sc->sc_dma_fw_data_bak); 1098 dma_p = &sc->sc_dma_fw_data_bak; 1099 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx " 1100 "size:%lx]\n", 1101 dma_p->ncookies, dma_p->cookie.dmac_address, 1102 dma_p->cookie.dmac_size)); 1103 if (err != DDI_SUCCESS) { 1104 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1105 " data bakeup dma memory"); 1106 goto fail; 1107 } 1108 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz)); 1109 1110 t += LE_32(sc->sc_hdr->datasz); 1111 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz), 1112 &fw_dma_attr, &iwk_dma_accattr, 1113 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1114 &sc->sc_dma_fw_init_text); 1115 dma_p = &sc->sc_dma_fw_init_text; 1116 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx " 1117 "size:%lx]\n", 1118 dma_p->ncookies, dma_p->cookie.dmac_address, 1119 dma_p->cookie.dmac_size)); 1120 if (err != DDI_SUCCESS) { 1121 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1122 "init text dma memory"); 1123 goto fail; 1124 } 1125 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz)); 1126 1127 t += LE_32(sc->sc_hdr->init_textsz); 1128 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz), 1129 &fw_dma_attr, &iwk_dma_accattr, 1130 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1131 &sc->sc_dma_fw_init_data); 1132 dma_p = &sc->sc_dma_fw_init_data; 1133 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx " 1134 "size:%lx]\n", 1135 dma_p->ncookies, dma_p->cookie.dmac_address, 1136 dma_p->cookie.dmac_size)); 1137 if (err != DDI_SUCCESS) { 1138 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc" 1139 "init data dma memory"); 1140 goto fail; 1141 } 1142 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz)); 1143 1144 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz); 1145 fail: 1146 return (err); 1147 } 1148 1149 static void 1150 iwk_free_fw_dma(iwk_sc_t *sc) 1151 { 1152 iwk_free_dma_mem(&sc->sc_dma_fw_text); 1153 iwk_free_dma_mem(&sc->sc_dma_fw_data); 1154 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak); 1155 iwk_free_dma_mem(&sc->sc_dma_fw_init_text); 1156 iwk_free_dma_mem(&sc->sc_dma_fw_init_data); 1157 } 1158 1159 /* 1160 * Allocate a shared page between host and NIC. 1161 */ 1162 static int 1163 iwk_alloc_shared(iwk_sc_t *sc) 1164 { 1165 iwk_dma_t *dma_p; 1166 int err = DDI_SUCCESS; 1167 1168 /* must be aligned on a 4K-page boundary */ 1169 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t), 1170 &sh_dma_attr, &iwk_dma_descattr, 1171 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1172 &sc->sc_dma_sh); 1173 if (err != DDI_SUCCESS) 1174 goto fail; 1175 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va; 1176 1177 dma_p = &sc->sc_dma_sh; 1178 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n", 1179 dma_p->ncookies, dma_p->cookie.dmac_address, 1180 dma_p->cookie.dmac_size)); 1181 1182 return (err); 1183 fail: 1184 iwk_free_shared(sc); 1185 return (err); 1186 } 1187 1188 static void 1189 iwk_free_shared(iwk_sc_t *sc) 1190 { 1191 iwk_free_dma_mem(&sc->sc_dma_sh); 1192 } 1193 1194 /* 1195 * Allocate a keep warm page. 1196 */ 1197 static int 1198 iwk_alloc_kw(iwk_sc_t *sc) 1199 { 1200 iwk_dma_t *dma_p; 1201 int err = DDI_SUCCESS; 1202 1203 /* must be aligned on a 4K-page boundary */ 1204 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE, 1205 &kw_dma_attr, &iwk_dma_accattr, 1206 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1207 &sc->sc_dma_kw); 1208 if (err != DDI_SUCCESS) 1209 goto fail; 1210 1211 dma_p = &sc->sc_dma_kw; 1212 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n", 1213 dma_p->ncookies, dma_p->cookie.dmac_address, 1214 dma_p->cookie.dmac_size)); 1215 1216 return (err); 1217 fail: 1218 iwk_free_kw(sc); 1219 return (err); 1220 } 1221 1222 static void 1223 iwk_free_kw(iwk_sc_t *sc) 1224 { 1225 iwk_free_dma_mem(&sc->sc_dma_kw); 1226 } 1227 1228 static int 1229 iwk_alloc_rx_ring(iwk_sc_t *sc) 1230 { 1231 iwk_rx_ring_t *ring; 1232 iwk_rx_data_t *data; 1233 iwk_dma_t *dma_p; 1234 int i, err = DDI_SUCCESS; 1235 1236 ring = &sc->sc_rxq; 1237 ring->cur = 0; 1238 1239 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t), 1240 &ring_desc_dma_attr, &iwk_dma_descattr, 1241 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1242 &ring->dma_desc); 1243 if (err != DDI_SUCCESS) { 1244 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n"); 1245 goto fail; 1246 } 1247 ring->desc = (uint32_t *)ring->dma_desc.mem_va; 1248 dma_p = &ring->dma_desc; 1249 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n", 1250 dma_p->ncookies, dma_p->cookie.dmac_address, 1251 dma_p->cookie.dmac_size)); 1252 1253 /* 1254 * Allocate Rx buffers. 1255 */ 1256 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1257 data = &ring->data[i]; 1258 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1259 &rx_buffer_dma_attr, &iwk_dma_accattr, 1260 DDI_DMA_READ | DDI_DMA_STREAMING, 1261 &data->dma_data); 1262 if (err != DDI_SUCCESS) { 1263 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] " 1264 "failed\n", i); 1265 goto fail; 1266 } 1267 /* 1268 * the physical address bit [8-36] are used, 1269 * instead of bit [0-31] in 3945. 1270 */ 1271 ring->desc[i] = (uint32_t) 1272 (data->dma_data.cookie.dmac_address >> 8); 1273 } 1274 dma_p = &ring->data[0].dma_data; 1275 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx " 1276 "size:%lx]\n", 1277 dma_p->ncookies, dma_p->cookie.dmac_address, 1278 dma_p->cookie.dmac_size)); 1279 1280 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1281 1282 return (err); 1283 1284 fail: 1285 iwk_free_rx_ring(sc); 1286 return (err); 1287 } 1288 1289 static void 1290 iwk_reset_rx_ring(iwk_sc_t *sc) 1291 { 1292 int n; 1293 1294 iwk_mac_access_enter(sc); 1295 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 1296 for (n = 0; n < 2000; n++) { 1297 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) 1298 break; 1299 DELAY(1000); 1300 } 1301 1302 if (n == 2000) 1303 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n")); 1304 1305 iwk_mac_access_exit(sc); 1306 1307 sc->sc_rxq.cur = 0; 1308 } 1309 1310 static void 1311 iwk_free_rx_ring(iwk_sc_t *sc) 1312 { 1313 int i; 1314 1315 for (i = 0; i < RX_QUEUE_SIZE; i++) { 1316 if (sc->sc_rxq.data[i].dma_data.dma_hdl) 1317 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data, 1318 DDI_DMA_SYNC_FORCPU); 1319 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data); 1320 } 1321 1322 if (sc->sc_rxq.dma_desc.dma_hdl) 1323 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV); 1324 iwk_free_dma_mem(&sc->sc_rxq.dma_desc); 1325 } 1326 1327 static int 1328 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring, 1329 int slots, int qid) 1330 { 1331 iwk_tx_data_t *data; 1332 iwk_tx_desc_t *desc_h; 1333 uint32_t paddr_desc_h; 1334 iwk_cmd_t *cmd_h; 1335 uint32_t paddr_cmd_h; 1336 iwk_dma_t *dma_p; 1337 int i, err = DDI_SUCCESS; 1338 1339 ring->qid = qid; 1340 ring->count = TFD_QUEUE_SIZE_MAX; 1341 ring->window = slots; 1342 ring->queued = 0; 1343 ring->cur = 0; 1344 1345 err = iwk_alloc_dma_mem(sc, 1346 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t), 1347 &ring_desc_dma_attr, &iwk_dma_descattr, 1348 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1349 &ring->dma_desc); 1350 if (err != DDI_SUCCESS) { 1351 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] " 1352 "failed\n", qid); 1353 goto fail; 1354 } 1355 dma_p = &ring->dma_desc; 1356 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n", 1357 dma_p->ncookies, dma_p->cookie.dmac_address, 1358 dma_p->cookie.dmac_size)); 1359 1360 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va; 1361 paddr_desc_h = ring->dma_desc.cookie.dmac_address; 1362 1363 err = iwk_alloc_dma_mem(sc, 1364 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t), 1365 &cmd_dma_attr, &iwk_dma_accattr, 1366 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1367 &ring->dma_cmd); 1368 if (err != DDI_SUCCESS) { 1369 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] " 1370 "failed\n", qid); 1371 goto fail; 1372 } 1373 dma_p = &ring->dma_cmd; 1374 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n", 1375 dma_p->ncookies, dma_p->cookie.dmac_address, 1376 dma_p->cookie.dmac_size)); 1377 1378 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va; 1379 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address; 1380 1381 /* 1382 * Allocate Tx buffers. 1383 */ 1384 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX, 1385 KM_NOSLEEP); 1386 if (ring->data == NULL) { 1387 cmn_err(CE_WARN, "could not allocate tx data slots\n"); 1388 goto fail; 1389 } 1390 1391 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) { 1392 data = &ring->data[i]; 1393 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz, 1394 &tx_buffer_dma_attr, &iwk_dma_accattr, 1395 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1396 &data->dma_data); 1397 if (err != DDI_SUCCESS) { 1398 cmn_err(CE_WARN, "dma alloc tx ring " 1399 "buf[%d] failed\n", i); 1400 goto fail; 1401 } 1402 1403 data->desc = desc_h + i; 1404 data->paddr_desc = paddr_desc_h + 1405 _PTRDIFF(data->desc, desc_h); 1406 data->cmd = cmd_h + i; /* (i % slots); */ 1407 /* ((i % slots) * sizeof (iwk_cmd_t)); */ 1408 data->paddr_cmd = paddr_cmd_h + 1409 _PTRDIFF(data->cmd, cmd_h); 1410 } 1411 dma_p = &ring->data[0].dma_data; 1412 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx " 1413 "size:%lx]\n", 1414 dma_p->ncookies, dma_p->cookie.dmac_address, 1415 dma_p->cookie.dmac_size)); 1416 1417 return (err); 1418 1419 fail: 1420 if (ring->data) 1421 kmem_free(ring->data, 1422 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX); 1423 iwk_free_tx_ring(sc, ring); 1424 return (err); 1425 } 1426 1427 static void 1428 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1429 { 1430 iwk_tx_data_t *data; 1431 int i, n; 1432 1433 iwk_mac_access_enter(sc); 1434 1435 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0); 1436 for (n = 0; n < 200; n++) { 1437 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) & 1438 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) 1439 break; 1440 DELAY(10); 1441 } 1442 if (n == 200) { 1443 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n", 1444 ring->qid)); 1445 } 1446 iwk_mac_access_exit(sc); 1447 1448 for (i = 0; i < ring->count; i++) { 1449 data = &ring->data[i]; 1450 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 1451 } 1452 1453 ring->queued = 0; 1454 ring->cur = 0; 1455 } 1456 1457 /*ARGSUSED*/ 1458 static void 1459 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring) 1460 { 1461 int i; 1462 1463 if (ring->dma_desc.dma_hdl != NULL) 1464 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 1465 iwk_free_dma_mem(&ring->dma_desc); 1466 1467 if (ring->dma_cmd.dma_hdl != NULL) 1468 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV); 1469 iwk_free_dma_mem(&ring->dma_cmd); 1470 1471 if (ring->data != NULL) { 1472 for (i = 0; i < ring->count; i++) { 1473 if (ring->data[i].dma_data.dma_hdl) 1474 IWK_DMA_SYNC(ring->data[i].dma_data, 1475 DDI_DMA_SYNC_FORDEV); 1476 iwk_free_dma_mem(&ring->data[i].dma_data); 1477 } 1478 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t)); 1479 } 1480 } 1481 1482 static int 1483 iwk_ring_init(iwk_sc_t *sc) 1484 { 1485 int i, err = DDI_SUCCESS; 1486 1487 for (i = 0; i < IWK_NUM_QUEUES; i++) { 1488 if (i == IWK_CMD_QUEUE_NUM) 1489 continue; 1490 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS, 1491 i); 1492 if (err != DDI_SUCCESS) 1493 goto fail; 1494 } 1495 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM], 1496 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM); 1497 if (err != DDI_SUCCESS) 1498 goto fail; 1499 err = iwk_alloc_rx_ring(sc); 1500 if (err != DDI_SUCCESS) 1501 goto fail; 1502 return (err); 1503 1504 fail: 1505 return (err); 1506 } 1507 1508 static void 1509 iwk_ring_free(iwk_sc_t *sc) 1510 { 1511 int i = IWK_NUM_QUEUES; 1512 1513 iwk_free_rx_ring(sc); 1514 while (--i >= 0) { 1515 iwk_free_tx_ring(sc, &sc->sc_txq[i]); 1516 } 1517 } 1518 1519 /* ARGSUSED */ 1520 static ieee80211_node_t * 1521 iwk_node_alloc(ieee80211com_t *ic) 1522 { 1523 iwk_amrr_t *amrr; 1524 1525 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP); 1526 if (amrr != NULL) 1527 iwk_amrr_init(amrr); 1528 return (&amrr->in); 1529 } 1530 1531 static void 1532 iwk_node_free(ieee80211_node_t *in) 1533 { 1534 ieee80211com_t *ic = in->in_ic; 1535 1536 ic->ic_node_cleanup(in); 1537 if (in->in_wpa_ie != NULL) 1538 ieee80211_free(in->in_wpa_ie); 1539 kmem_free(in, sizeof (iwk_amrr_t)); 1540 } 1541 1542 /*ARGSUSED*/ 1543 static int 1544 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg) 1545 { 1546 iwk_sc_t *sc = (iwk_sc_t *)ic; 1547 ieee80211_node_t *in = ic->ic_bss; 1548 enum ieee80211_state ostate = ic->ic_state; 1549 int i, err = IWK_SUCCESS; 1550 1551 mutex_enter(&sc->sc_glock); 1552 switch (nstate) { 1553 case IEEE80211_S_SCAN: 1554 switch (ostate) { 1555 case IEEE80211_S_INIT: 1556 { 1557 iwk_add_sta_t node; 1558 1559 sc->sc_flags |= IWK_F_SCANNING; 1560 sc->sc_scan_pending = 0; 1561 iwk_set_led(sc, 2, 10, 2); 1562 1563 /* 1564 * clear association to receive beacons from 1565 * all BSS'es 1566 */ 1567 sc->sc_config.assoc_id = 0; 1568 sc->sc_config.filter_flags &= 1569 ~LE_32(RXON_FILTER_ASSOC_MSK); 1570 1571 IWK_DBG((IWK_DEBUG_80211, "config chan %d " 1572 "flags %x filter_flags %x\n", sc->sc_config.chan, 1573 sc->sc_config.flags, sc->sc_config.filter_flags)); 1574 1575 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 1576 sizeof (iwk_rxon_cmd_t), 1); 1577 if (err != IWK_SUCCESS) { 1578 cmn_err(CE_WARN, 1579 "could not clear association\n"); 1580 sc->sc_flags &= ~IWK_F_SCANNING; 1581 mutex_exit(&sc->sc_glock); 1582 return (err); 1583 } 1584 1585 /* add broadcast node to send probe request */ 1586 (void) memset(&node, 0, sizeof (node)); 1587 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN); 1588 node.id = IWK_BROADCAST_ID; 1589 err = iwk_cmd(sc, REPLY_ADD_STA, &node, 1590 sizeof (node), 1); 1591 if (err != IWK_SUCCESS) { 1592 cmn_err(CE_WARN, "could not add " 1593 "broadcast node\n"); 1594 sc->sc_flags &= ~IWK_F_SCANNING; 1595 mutex_exit(&sc->sc_glock); 1596 return (err); 1597 } 1598 break; 1599 } 1600 1601 case IEEE80211_S_AUTH: 1602 case IEEE80211_S_ASSOC: 1603 case IEEE80211_S_RUN: 1604 sc->sc_flags |= IWK_F_SCANNING; 1605 sc->sc_scan_pending = 0; 1606 1607 iwk_set_led(sc, 2, 10, 2); 1608 /* FALLTHRU */ 1609 case IEEE80211_S_SCAN: 1610 mutex_exit(&sc->sc_glock); 1611 /* step to next channel before actual FW scan */ 1612 err = sc->sc_newstate(ic, nstate, arg); 1613 mutex_enter(&sc->sc_glock); 1614 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) { 1615 cmn_err(CE_WARN, 1616 "could not initiate scan\n"); 1617 sc->sc_flags &= ~IWK_F_SCANNING; 1618 ieee80211_cancel_scan(ic); 1619 } 1620 mutex_exit(&sc->sc_glock); 1621 return (err); 1622 default: 1623 break; 1624 1625 } 1626 sc->sc_clk = 0; 1627 break; 1628 1629 case IEEE80211_S_AUTH: 1630 if (ostate == IEEE80211_S_SCAN) { 1631 sc->sc_flags &= ~IWK_F_SCANNING; 1632 } 1633 1634 /* reset state to handle reassociations correctly */ 1635 sc->sc_config.assoc_id = 0; 1636 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK); 1637 1638 /* 1639 * before sending authentication and association request frame, 1640 * we need do something in the hardware, such as setting the 1641 * channel same to the target AP... 1642 */ 1643 if ((err = iwk_hw_set_before_auth(sc)) != 0) { 1644 cmn_err(CE_WARN, "could not setup firmware for " 1645 "authentication\n"); 1646 mutex_exit(&sc->sc_glock); 1647 return (err); 1648 } 1649 break; 1650 1651 case IEEE80211_S_RUN: 1652 if (ostate == IEEE80211_S_SCAN) { 1653 sc->sc_flags &= ~IWK_F_SCANNING; 1654 } 1655 1656 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 1657 /* let LED blink when monitoring */ 1658 iwk_set_led(sc, 2, 10, 10); 1659 break; 1660 } 1661 IWK_DBG((IWK_DEBUG_80211, "iwk: associated.")); 1662 1663 /* IBSS mode */ 1664 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1665 /* 1666 * clean all nodes in ibss node table 1667 * in order to be consistent with hardware 1668 */ 1669 err = iwk_run_state_config_ibss(ic); 1670 if (err != IWK_SUCCESS) { 1671 cmn_err(CE_WARN, "iwk_newstate(): " 1672 "failed to update configuration " 1673 "in IBSS mode\n"); 1674 mutex_exit(&sc->sc_glock); 1675 return (err); 1676 } 1677 } 1678 1679 /* none IBSS mode */ 1680 if (ic->ic_opmode != IEEE80211_M_IBSS) { 1681 /* update adapter's configuration */ 1682 err = iwk_run_state_config_sta(ic); 1683 if (err != IWK_SUCCESS) { 1684 cmn_err(CE_WARN, "iwk_newstate(): " 1685 "failed to update configuration " 1686 "in none IBSS mode\n"); 1687 mutex_exit(&sc->sc_glock); 1688 return (err); 1689 } 1690 } 1691 1692 /* obtain current temperature of chipset */ 1693 sc->sc_tempera = iwk_curr_tempera(sc); 1694 1695 /* 1696 * make Tx power calibration to determine 1697 * the gains of DSP and radio 1698 */ 1699 err = iwk_tx_power_calibration(sc); 1700 if (err) { 1701 cmn_err(CE_WARN, "iwk_newstate(): " 1702 "failed to set tx power table\n"); 1703 mutex_exit(&sc->sc_glock); 1704 return (err); 1705 } 1706 1707 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1708 1709 /* 1710 * allocate and transmit beacon frames 1711 */ 1712 err = iwk_start_tx_beacon(ic); 1713 if (err != IWK_SUCCESS) { 1714 cmn_err(CE_WARN, "iwk_newstate(): " 1715 "can't transmit beacon frames\n"); 1716 mutex_exit(&sc->sc_glock); 1717 return (err); 1718 } 1719 } 1720 1721 /* start automatic rate control */ 1722 mutex_enter(&sc->sc_mt_lock); 1723 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) { 1724 sc->sc_flags |= IWK_F_RATE_AUTO_CTL; 1725 /* set rate to some reasonable initial value */ 1726 i = in->in_rates.ir_nrates - 1; 1727 while (i > 0 && IEEE80211_RATE(i) > 72) 1728 i--; 1729 in->in_txrate = i; 1730 } else { 1731 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 1732 } 1733 mutex_exit(&sc->sc_mt_lock); 1734 1735 /* set LED on after associated */ 1736 iwk_set_led(sc, 2, 0, 1); 1737 break; 1738 1739 case IEEE80211_S_INIT: 1740 if (ostate == IEEE80211_S_SCAN) { 1741 sc->sc_flags &= ~IWK_F_SCANNING; 1742 } 1743 1744 /* set LED off after init */ 1745 iwk_set_led(sc, 2, 1, 0); 1746 break; 1747 case IEEE80211_S_ASSOC: 1748 if (ostate == IEEE80211_S_SCAN) { 1749 sc->sc_flags &= ~IWK_F_SCANNING; 1750 } 1751 1752 break; 1753 } 1754 1755 mutex_exit(&sc->sc_glock); 1756 1757 err = sc->sc_newstate(ic, nstate, arg); 1758 1759 if (nstate == IEEE80211_S_RUN) { 1760 1761 mutex_enter(&sc->sc_glock); 1762 1763 /* 1764 * make initialization for Receiver 1765 * sensitivity calibration 1766 */ 1767 err = iwk_rx_sens_init(sc); 1768 if (err) { 1769 cmn_err(CE_WARN, "iwk_newstate(): " 1770 "failed to init RX sensitivity\n"); 1771 mutex_exit(&sc->sc_glock); 1772 return (err); 1773 } 1774 1775 /* make initialization for Receiver gain balance */ 1776 err = iwk_rxgain_diff_init(sc); 1777 if (err) { 1778 cmn_err(CE_WARN, "iwk_newstate(): " 1779 "failed to init phy calibration\n"); 1780 mutex_exit(&sc->sc_glock); 1781 return (err); 1782 } 1783 1784 mutex_exit(&sc->sc_glock); 1785 1786 } 1787 1788 return (err); 1789 } 1790 1791 static void 1792 iwk_watchdog(void *arg) 1793 { 1794 iwk_sc_t *sc = arg; 1795 struct ieee80211com *ic = &sc->sc_ic; 1796 #ifdef DEBUG 1797 timeout_id_t timeout_id = ic->ic_watchdog_timer; 1798 #endif 1799 1800 ieee80211_stop_watchdog(ic); 1801 1802 if ((ic->ic_state != IEEE80211_S_AUTH) && 1803 (ic->ic_state != IEEE80211_S_ASSOC)) 1804 return; 1805 1806 if (ic->ic_bss->in_fails > 0) { 1807 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: " 1808 "node (0x%x)\n", timeout_id, &ic->ic_bss)); 1809 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1810 } else { 1811 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: " 1812 "node (0x%x), retry (%d)\n", 1813 timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1)); 1814 ieee80211_watchdog(ic); 1815 } 1816 } 1817 1818 /*ARGSUSED*/ 1819 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k, 1820 const uint8_t mac[IEEE80211_ADDR_LEN]) 1821 { 1822 iwk_sc_t *sc = (iwk_sc_t *)ic; 1823 iwk_add_sta_t node; 1824 int err; 1825 uint8_t index1; 1826 1827 switch (k->wk_cipher->ic_cipher) { 1828 case IEEE80211_CIPHER_WEP: 1829 case IEEE80211_CIPHER_TKIP: 1830 return (1); /* sofeware do it. */ 1831 case IEEE80211_CIPHER_AES_CCM: 1832 break; 1833 default: 1834 return (0); 1835 } 1836 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_DIS_DECRYPT_MSK | 1837 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 1838 1839 mutex_enter(&sc->sc_glock); 1840 1841 /* update ap/multicast node */ 1842 (void) memset(&node, 0, sizeof (node)); 1843 if (IEEE80211_IS_MULTICAST(mac)) { 1844 (void) memset(node.bssid, 0xff, 6); 1845 node.id = IWK_BROADCAST_ID; 1846 } else if (ic->ic_opmode == IEEE80211_M_IBSS) { 1847 mutex_exit(&sc->sc_glock); 1848 mutex_enter(&sc->sc_ibss.node_tb_lock); 1849 1850 /* 1851 * search for node in ibss node table 1852 */ 1853 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT; 1854 index1++) { 1855 if (sc->sc_ibss.ibss_node_tb[index1].used && 1856 IEEE80211_ADDR_EQ(sc->sc_ibss. 1857 ibss_node_tb[index1].node.bssid, 1858 mac)) { 1859 break; 1860 } 1861 } 1862 if (index1 >= IWK_BROADCAST_ID) { 1863 cmn_err(CE_WARN, "iwk_key_set(): " 1864 "have no this node in hardware node table\n"); 1865 mutex_exit(&sc->sc_ibss.node_tb_lock); 1866 return (0); 1867 } else { 1868 /* 1869 * configure key for given node in hardware 1870 */ 1871 if (k->wk_flags & IEEE80211_KEY_XMIT) { 1872 sc->sc_ibss.ibss_node_tb[index1]. 1873 node.key_flags = 0; 1874 sc->sc_ibss.ibss_node_tb[index1]. 1875 node.keyp = k->wk_keyix; 1876 } else { 1877 sc->sc_ibss.ibss_node_tb[index1]. 1878 node.key_flags = (1 << 14); 1879 sc->sc_ibss.ibss_node_tb[index1]. 1880 node.keyp = k->wk_keyix + 4; 1881 } 1882 1883 (void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key, 1884 k->wk_key, k->wk_keylen); 1885 sc->sc_ibss.ibss_node_tb[index1].node.key_flags |= 1886 (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8)); 1887 sc->sc_ibss.ibss_node_tb[index1].node.key_flags = 1888 LE_16(sc->sc_ibss.ibss_node_tb[index1]. 1889 node.key_flags); 1890 sc->sc_ibss.ibss_node_tb[index1].node.sta_mask = 1891 STA_MODIFY_KEY_MASK; 1892 sc->sc_ibss.ibss_node_tb[index1].node.control = 1; 1893 1894 mutex_enter(&sc->sc_glock); 1895 err = iwk_cmd(sc, REPLY_ADD_STA, 1896 &sc->sc_ibss.ibss_node_tb[index1].node, 1897 sizeof (iwk_add_sta_t), 1); 1898 if (err != IWK_SUCCESS) { 1899 cmn_err(CE_WARN, "iwk_key_set(): " 1900 "failed to update IBSS node in hardware\n"); 1901 mutex_exit(&sc->sc_glock); 1902 mutex_exit(&sc->sc_ibss.node_tb_lock); 1903 return (0); 1904 } 1905 mutex_exit(&sc->sc_glock); 1906 } 1907 mutex_exit(&sc->sc_ibss.node_tb_lock); 1908 return (1); 1909 } else { 1910 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid); 1911 node.id = IWK_AP_ID; 1912 } 1913 if (k->wk_flags & IEEE80211_KEY_XMIT) { 1914 node.key_flags = 0; 1915 node.keyp = k->wk_keyix; 1916 } else { 1917 node.key_flags = (1 << 14); 1918 node.keyp = k->wk_keyix + 4; 1919 } 1920 (void) memcpy(node.key, k->wk_key, k->wk_keylen); 1921 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8)); 1922 node.key_flags = LE_16(node.key_flags); 1923 node.sta_mask = STA_MODIFY_KEY_MASK; 1924 node.control = 1; 1925 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 1926 if (err != IWK_SUCCESS) { 1927 cmn_err(CE_WARN, "iwk_key_set():" 1928 "failed to update ap node\n"); 1929 mutex_exit(&sc->sc_glock); 1930 return (0); 1931 } 1932 mutex_exit(&sc->sc_glock); 1933 return (1); 1934 } 1935 1936 /* 1937 * exclusive access to mac begin. 1938 */ 1939 static void 1940 iwk_mac_access_enter(iwk_sc_t *sc) 1941 { 1942 uint32_t tmp; 1943 int n; 1944 1945 tmp = IWK_READ(sc, CSR_GP_CNTRL); 1946 IWK_WRITE(sc, CSR_GP_CNTRL, 1947 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1948 1949 /* wait until we succeed */ 1950 for (n = 0; n < 1000; n++) { 1951 if ((IWK_READ(sc, CSR_GP_CNTRL) & 1952 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1953 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) == 1954 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) 1955 break; 1956 DELAY(10); 1957 } 1958 if (n == 1000) 1959 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n")); 1960 } 1961 1962 /* 1963 * exclusive access to mac end. 1964 */ 1965 static void 1966 iwk_mac_access_exit(iwk_sc_t *sc) 1967 { 1968 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL); 1969 IWK_WRITE(sc, CSR_GP_CNTRL, 1970 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1971 } 1972 1973 static uint32_t 1974 iwk_mem_read(iwk_sc_t *sc, uint32_t addr) 1975 { 1976 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr); 1977 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT)); 1978 } 1979 1980 static void 1981 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1982 { 1983 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr); 1984 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data); 1985 } 1986 1987 static uint32_t 1988 iwk_reg_read(iwk_sc_t *sc, uint32_t addr) 1989 { 1990 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24)); 1991 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT)); 1992 } 1993 1994 static void 1995 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data) 1996 { 1997 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24)); 1998 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data); 1999 } 2000 2001 static void 2002 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr, 2003 uint32_t *data, int wlen) 2004 { 2005 for (; wlen > 0; wlen--, data++, addr += 4) 2006 iwk_reg_write(sc, addr, LE_32(*data)); 2007 } 2008 2009 2010 /* 2011 * ucode load/initialization steps: 2012 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image. 2013 * BSM contains a small memory that *always* stays powered up, so it can 2014 * retain the bootstrap program even when the card is in a power-saving 2015 * power-down state. The BSM loads the small program into ARC processor's 2016 * instruction memory when triggered by power-up. 2017 * 2) load Initialize image via bootstrap program. 2018 * The Initialize image sets up regulatory and calibration data for the 2019 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed. 2020 * The 4965 reply contains calibration data for temperature, voltage and tx gain 2021 * correction. 2022 */ 2023 static int 2024 iwk_load_firmware(iwk_sc_t *sc) 2025 { 2026 uint32_t *boot_fw = (uint32_t *)sc->sc_boot; 2027 uint32_t size = LE_32(sc->sc_hdr->bootsz); 2028 int n, err = IWK_SUCCESS; 2029 2030 /* 2031 * The physical address bit [4-35] of the initialize uCode. 2032 * In the initialize alive notify interrupt the physical address of 2033 * the runtime ucode will be set for loading. 2034 */ 2035 iwk_mac_access_enter(sc); 2036 2037 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 2038 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4); 2039 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 2040 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4); 2041 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 2042 sc->sc_dma_fw_init_text.cookie.dmac_size); 2043 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 2044 sc->sc_dma_fw_init_data.cookie.dmac_size); 2045 2046 /* load bootstrap code into BSM memory */ 2047 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw, 2048 size / sizeof (uint32_t)); 2049 2050 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0); 2051 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); 2052 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t)); 2053 2054 /* 2055 * prepare to load initialize uCode 2056 */ 2057 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); 2058 2059 /* wait while the adapter is busy loading the firmware */ 2060 for (n = 0; n < 1000; n++) { 2061 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) & 2062 BSM_WR_CTRL_REG_BIT_START)) 2063 break; 2064 DELAY(10); 2065 } 2066 if (n == 1000) { 2067 cmn_err(CE_WARN, "timeout transferring firmware\n"); 2068 err = ETIMEDOUT; 2069 return (err); 2070 } 2071 2072 /* for future power-save mode use */ 2073 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); 2074 2075 iwk_mac_access_exit(sc); 2076 2077 return (err); 2078 } 2079 2080 /*ARGSUSED*/ 2081 static void 2082 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 2083 { 2084 ieee80211com_t *ic = &sc->sc_ic; 2085 iwk_rx_ring_t *ring = &sc->sc_rxq; 2086 iwk_rx_phy_res_t *stat; 2087 ieee80211_node_t *in; 2088 uint32_t *tail; 2089 struct ieee80211_frame *wh; 2090 mblk_t *mp; 2091 uint16_t len, rssi, mrssi, agc; 2092 int16_t t; 2093 uint32_t ants, i; 2094 struct iwk_rx_non_cfg_phy *phyinfo; 2095 uint32_t crc; 2096 2097 /* assuming not 11n here. cope with 11n in phase-II */ 2098 stat = (iwk_rx_phy_res_t *)(desc + 1); 2099 if (stat->cfg_phy_cnt > 20) { 2100 return; 2101 } 2102 2103 for (i = 0; i < RX_RES_PHY_CNT; i++) 2104 stat->non_cfg_phy[i] = LE_16(stat->non_cfg_phy[i]); 2105 2106 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy; 2107 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS; 2108 mrssi = 0; 2109 ants = (LE_16(stat->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) >> 2110 RX_PHY_FLAGS_ANTENNAE_OFFSET; 2111 for (i = 0; i < 3; i++) { 2112 if (ants & (1 << i)) 2113 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]); 2114 } 2115 t = mrssi - agc - 44; /* t is the dBM value */ 2116 /* 2117 * convert dBm to percentage ??? 2118 */ 2119 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) / 2120 (75 * 75); 2121 if (rssi > 100) 2122 rssi = 100; 2123 if (rssi < 1) 2124 rssi = 1; 2125 len = LE_16(stat->byte_count); 2126 tail = (uint32_t *)((caddr_t)(stat + 1) + stat->cfg_phy_cnt + len); 2127 bcopy(tail, &crc, 4); 2128 2129 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d " 2130 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x " 2131 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat), 2132 len, stat->rate.r.s.rate, LE_16(stat->channel), 2133 LE_32(stat->timestampl), stat->non_cfg_phy_cnt, 2134 stat->cfg_phy_cnt, LE_32(crc))); 2135 2136 if ((len < 16) || (len > sc->sc_dmabuf_sz)) { 2137 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n")); 2138 return; 2139 } 2140 2141 /* 2142 * discard Rx frames with bad CRC 2143 */ 2144 if ((LE_32(crc) & 2145 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) != 2146 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) { 2147 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n", 2148 LE_32(crc))); 2149 sc->sc_rx_err++; 2150 return; 2151 } 2152 2153 wh = (struct ieee80211_frame *) 2154 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt); 2155 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) { 2156 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2); 2157 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n", 2158 sc->sc_assoc_id)); 2159 } 2160 #ifdef DEBUG 2161 if (iwk_dbg_flags & IWK_DEBUG_RX) 2162 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0); 2163 #endif 2164 in = ieee80211_find_rxnode(ic, wh); 2165 mp = allocb(len, BPRI_MED); 2166 if (mp) { 2167 (void) memcpy(mp->b_wptr, wh, len); 2168 mp->b_wptr += len; 2169 2170 /* send the frame to the 802.11 layer */ 2171 (void) ieee80211_input(ic, mp, in, rssi, 0); 2172 } else { 2173 sc->sc_rx_nobuf++; 2174 IWK_DBG((IWK_DEBUG_RX, 2175 "iwk_rx_intr(): alloc rx buf failed\n")); 2176 } 2177 /* release node reference */ 2178 ieee80211_free_node(in); 2179 } 2180 2181 /*ARGSUSED*/ 2182 static void 2183 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data) 2184 { 2185 ieee80211com_t *ic = &sc->sc_ic; 2186 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3]; 2187 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1); 2188 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss; 2189 2190 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d" 2191 " retries=%d frame_count=%x nkill=%d " 2192 "rate=%x duration=%d status=%x\n", 2193 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count, 2194 stat->bt_kill_count, stat->rate.r.s.rate, 2195 LE_16(stat->duration), LE_32(stat->status))); 2196 2197 amrr->txcnt++; 2198 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt)); 2199 if (stat->ntries > 0) { 2200 amrr->retrycnt++; 2201 sc->sc_tx_retries++; 2202 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n", 2203 sc->sc_tx_retries)); 2204 } 2205 2206 sc->sc_tx_timer = 0; 2207 2208 mutex_enter(&sc->sc_tx_lock); 2209 ring->queued--; 2210 if (ring->queued < 0) 2211 ring->queued = 0; 2212 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) { 2213 sc->sc_need_reschedule = 0; 2214 mutex_exit(&sc->sc_tx_lock); 2215 mac_tx_update(ic->ic_mach); 2216 mutex_enter(&sc->sc_tx_lock); 2217 } 2218 mutex_exit(&sc->sc_tx_lock); 2219 } 2220 2221 static void 2222 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2223 { 2224 if ((desc->hdr.qid & 7) != 4) { 2225 return; 2226 } 2227 mutex_enter(&sc->sc_glock); 2228 sc->sc_flags |= IWK_F_CMD_DONE; 2229 cv_signal(&sc->sc_cmd_cv); 2230 mutex_exit(&sc->sc_glock); 2231 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: " 2232 "qid=%x idx=%d flags=%x type=0x%x\n", 2233 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags, 2234 desc->hdr.type)); 2235 } 2236 2237 static void 2238 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc) 2239 { 2240 uint32_t base, i; 2241 struct iwk_alive_resp *ar = 2242 (struct iwk_alive_resp *)(desc + 1); 2243 2244 /* the microcontroller is ready */ 2245 IWK_DBG((IWK_DEBUG_FW, 2246 "microcode alive notification minor: %x major: %x type:" 2247 " %x subtype: %x\n", 2248 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype)); 2249 2250 if (LE_32(ar->is_valid) != UCODE_VALID_OK) { 2251 IWK_DBG((IWK_DEBUG_FW, 2252 "microcontroller initialization failed\n")); 2253 } 2254 if (ar->ver_subtype == INITIALIZE_SUBTYPE) { 2255 IWK_DBG((IWK_DEBUG_FW, 2256 "initialization alive received.\n")); 2257 (void) memcpy(&sc->sc_card_alive_init, ar, 2258 sizeof (struct iwk_init_alive_resp)); 2259 /* XXX get temperature */ 2260 iwk_mac_access_enter(sc); 2261 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG, 2262 sc->sc_dma_fw_text.cookie.dmac_address >> 4); 2263 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG, 2264 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4); 2265 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG, 2266 sc->sc_dma_fw_data.cookie.dmac_size); 2267 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG, 2268 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000); 2269 iwk_mac_access_exit(sc); 2270 } else { 2271 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n")); 2272 (void) memcpy(&sc->sc_card_alive_run, ar, 2273 sizeof (struct iwk_alive_resp)); 2274 2275 /* 2276 * Init SCD related registers to make Tx work. XXX 2277 */ 2278 iwk_mac_access_enter(sc); 2279 2280 /* read sram address of data base */ 2281 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR); 2282 2283 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */ 2284 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0; 2285 i < 128; i += 4) 2286 iwk_mem_write(sc, base + i, 0); 2287 2288 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */ 2289 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET; 2290 i < 256; i += 4) 2291 iwk_mem_write(sc, base + i, 0); 2292 2293 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */ 2294 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET; 2295 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4) 2296 iwk_mem_write(sc, base + i, 0); 2297 2298 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR, 2299 sc->sc_dma_sh.cookie.dmac_address >> 10); 2300 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0); 2301 2302 /* initiate the tx queues */ 2303 for (i = 0; i < IWK_NUM_QUEUES; i++) { 2304 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0); 2305 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8)); 2306 iwk_mem_write(sc, sc->sc_scd_base + 2307 SCD_CONTEXT_QUEUE_OFFSET(i), 2308 (SCD_WIN_SIZE & 0x7f)); 2309 iwk_mem_write(sc, sc->sc_scd_base + 2310 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t), 2311 (SCD_FRAME_LIMIT & 0x7f) << 16); 2312 } 2313 /* interrupt enable on each queue0-7 */ 2314 iwk_reg_write(sc, SCD_INTERRUPT_MASK, 2315 (1 << IWK_NUM_QUEUES) - 1); 2316 /* enable each channel 0-7 */ 2317 iwk_reg_write(sc, SCD_TXFACT, 2318 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 2319 /* 2320 * queue 0-7 maps to FIFO 0-7 and 2321 * all queues work under FIFO mode (none-scheduler-ack) 2322 */ 2323 for (i = 0; i < 7; i++) { 2324 iwk_reg_write(sc, 2325 SCD_QUEUE_STATUS_BITS(i), 2326 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2327 (i << SCD_QUEUE_STTS_REG_POS_TXF)| 2328 SCD_QUEUE_STTS_REG_MSK); 2329 } 2330 iwk_mac_access_exit(sc); 2331 2332 sc->sc_flags |= IWK_F_FW_INIT; 2333 cv_signal(&sc->sc_fw_cv); 2334 } 2335 2336 } 2337 2338 static uint_t 2339 /* LINTED: argument unused in function: unused */ 2340 iwk_rx_softintr(caddr_t arg, caddr_t unused) 2341 { 2342 iwk_sc_t *sc = (iwk_sc_t *)arg; 2343 ieee80211com_t *ic = &sc->sc_ic; 2344 iwk_rx_desc_t *desc; 2345 iwk_rx_data_t *data; 2346 uint32_t index; 2347 2348 mutex_enter(&sc->sc_glock); 2349 if (sc->sc_rx_softint_pending != 1) { 2350 mutex_exit(&sc->sc_glock); 2351 return (DDI_INTR_UNCLAIMED); 2352 } 2353 /* disable interrupts */ 2354 IWK_WRITE(sc, CSR_INT_MASK, 0); 2355 mutex_exit(&sc->sc_glock); 2356 2357 /* 2358 * firmware has moved the index of the rx queue, driver get it, 2359 * and deal with it. 2360 */ 2361 index = sc->sc_shared->val0 & 0xfff; 2362 2363 while (sc->sc_rxq.cur != index) { 2364 data = &sc->sc_rxq.data[sc->sc_rxq.cur]; 2365 desc = (iwk_rx_desc_t *)data->dma_data.mem_va; 2366 2367 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d" 2368 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n", 2369 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx, 2370 desc->hdr.flags, desc->hdr.type, LE_32(desc->len))); 2371 2372 /* a command other than a tx need to be replied */ 2373 if (!(desc->hdr.qid & 0x80) && 2374 (desc->hdr.type != REPLY_RX_PHY_CMD) && 2375 (desc->hdr.type != REPLY_TX) && 2376 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) && 2377 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) && 2378 (desc->hdr.type != SENSITIVITY_CMD)) 2379 iwk_cmd_intr(sc, desc); 2380 2381 switch (desc->hdr.type) { 2382 case REPLY_4965_RX: 2383 iwk_rx_intr(sc, desc, data); 2384 break; 2385 2386 case REPLY_TX: 2387 iwk_tx_intr(sc, desc, data); 2388 break; 2389 2390 case REPLY_ALIVE: 2391 iwk_ucode_alive(sc, desc); 2392 break; 2393 2394 case CARD_STATE_NOTIFICATION: 2395 { 2396 uint32_t *status = (uint32_t *)(desc + 1); 2397 2398 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n", 2399 LE_32(*status))); 2400 2401 if (LE_32(*status) & 1) { 2402 /* 2403 * the radio button has to be pushed(OFF). It 2404 * is considered as a hw error, the 2405 * iwk_thread() tries to recover it after the 2406 * button is pushed again(ON) 2407 */ 2408 cmn_err(CE_NOTE, 2409 "iwk_rx_softintr(): " 2410 "Radio transmitter is off\n"); 2411 sc->sc_ostate = sc->sc_ic.ic_state; 2412 ieee80211_new_state(&sc->sc_ic, 2413 IEEE80211_S_INIT, -1); 2414 sc->sc_flags |= 2415 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF); 2416 } 2417 break; 2418 } 2419 case SCAN_START_NOTIFICATION: 2420 { 2421 iwk_start_scan_t *scan = 2422 (iwk_start_scan_t *)(desc + 1); 2423 2424 IWK_DBG((IWK_DEBUG_SCAN, 2425 "scanning channel %d status %x\n", 2426 scan->chan, LE_32(scan->status))); 2427 2428 ic->ic_curchan = &ic->ic_sup_channels[scan->chan]; 2429 break; 2430 } 2431 case SCAN_COMPLETE_NOTIFICATION: 2432 { 2433 iwk_stop_scan_t *scan = 2434 (iwk_stop_scan_t *)(desc + 1); 2435 2436 IWK_DBG((IWK_DEBUG_SCAN, 2437 "completed channel %d (burst of %d) status %02x\n", 2438 scan->chan, scan->nchan, scan->status)); 2439 2440 sc->sc_scan_pending++; 2441 break; 2442 } 2443 case STATISTICS_NOTIFICATION: 2444 /* handle statistics notification */ 2445 iwk_statistics_notify(sc, desc); 2446 break; 2447 } 2448 2449 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE; 2450 } 2451 2452 /* 2453 * driver dealt with what reveived in rx queue and tell the information 2454 * to the firmware. 2455 */ 2456 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1; 2457 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7)); 2458 2459 mutex_enter(&sc->sc_glock); 2460 /* re-enable interrupts */ 2461 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2462 sc->sc_rx_softint_pending = 0; 2463 mutex_exit(&sc->sc_glock); 2464 2465 return (DDI_INTR_CLAIMED); 2466 } 2467 2468 static uint_t 2469 /* LINTED: argument unused in function: unused */ 2470 iwk_intr(caddr_t arg, caddr_t unused) 2471 { 2472 iwk_sc_t *sc = (iwk_sc_t *)arg; 2473 uint32_t r, rfh; 2474 2475 mutex_enter(&sc->sc_glock); 2476 2477 if (sc->sc_flags & IWK_F_SUSPEND) { 2478 mutex_exit(&sc->sc_glock); 2479 return (DDI_INTR_UNCLAIMED); 2480 } 2481 2482 r = IWK_READ(sc, CSR_INT); 2483 if (r == 0 || r == 0xffffffff) { 2484 mutex_exit(&sc->sc_glock); 2485 return (DDI_INTR_UNCLAIMED); 2486 } 2487 2488 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r)); 2489 2490 rfh = IWK_READ(sc, CSR_FH_INT_STATUS); 2491 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh)); 2492 /* disable interrupts */ 2493 IWK_WRITE(sc, CSR_INT_MASK, 0); 2494 /* ack interrupts */ 2495 IWK_WRITE(sc, CSR_INT, r); 2496 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh); 2497 2498 if (sc->sc_soft_hdl == NULL) { 2499 mutex_exit(&sc->sc_glock); 2500 return (DDI_INTR_CLAIMED); 2501 } 2502 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) { 2503 cmn_err(CE_WARN, "fatal firmware error\n"); 2504 mutex_exit(&sc->sc_glock); 2505 #ifdef DEBUG 2506 /* dump event and error logs to dmesg */ 2507 iwk_write_error_log(sc); 2508 iwk_write_event_log(sc); 2509 #endif /* DEBUG */ 2510 iwk_stop(sc); 2511 sc->sc_ostate = sc->sc_ic.ic_state; 2512 2513 /* not capable of fast recovery */ 2514 if (!IWK_CHK_FAST_RECOVER(sc)) 2515 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 2516 2517 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 2518 return (DDI_INTR_CLAIMED); 2519 } 2520 2521 if (r & BIT_INT_RF_KILL) { 2522 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL); 2523 if (tmp & (1 << 27)) 2524 cmn_err(CE_NOTE, "RF switch: radio on\n"); 2525 } 2526 2527 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) || 2528 (rfh & FH_INT_RX_MASK)) { 2529 sc->sc_rx_softint_pending = 1; 2530 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL); 2531 } 2532 2533 if (r & BIT_INT_ALIVE) { 2534 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n")); 2535 } 2536 2537 /* re-enable interrupts */ 2538 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 2539 mutex_exit(&sc->sc_glock); 2540 2541 return (DDI_INTR_CLAIMED); 2542 } 2543 2544 static uint8_t 2545 iwk_rate_to_plcp(int rate) 2546 { 2547 uint8_t ret; 2548 2549 switch (rate) { 2550 /* CCK rates */ 2551 case 2: 2552 ret = 0xa; 2553 break; 2554 case 4: 2555 ret = 0x14; 2556 break; 2557 case 11: 2558 ret = 0x37; 2559 break; 2560 case 22: 2561 ret = 0x6e; 2562 break; 2563 /* OFDM rates */ 2564 case 12: 2565 ret = 0xd; 2566 break; 2567 case 18: 2568 ret = 0xf; 2569 break; 2570 case 24: 2571 ret = 0x5; 2572 break; 2573 case 36: 2574 ret = 0x7; 2575 break; 2576 case 48: 2577 ret = 0x9; 2578 break; 2579 case 72: 2580 ret = 0xb; 2581 break; 2582 case 96: 2583 ret = 0x1; 2584 break; 2585 case 108: 2586 ret = 0x3; 2587 break; 2588 default: 2589 ret = 0; 2590 break; 2591 } 2592 return (ret); 2593 } 2594 2595 static mblk_t * 2596 iwk_m_tx(void *arg, mblk_t *mp) 2597 { 2598 iwk_sc_t *sc = (iwk_sc_t *)arg; 2599 ieee80211com_t *ic = &sc->sc_ic; 2600 mblk_t *next; 2601 2602 if (sc->sc_flags & IWK_F_SUSPEND) { 2603 freemsgchain(mp); 2604 return (NULL); 2605 } 2606 2607 if (ic->ic_state != IEEE80211_S_RUN) { 2608 freemsgchain(mp); 2609 return (NULL); 2610 } 2611 2612 if ((sc->sc_flags & IWK_F_HW_ERR_RECOVER) && 2613 IWK_CHK_FAST_RECOVER(sc)) { 2614 IWK_DBG((IWK_DEBUG_FW, "iwk_m_tx(): hold queue\n")); 2615 return (mp); 2616 } 2617 2618 while (mp != NULL) { 2619 next = mp->b_next; 2620 mp->b_next = NULL; 2621 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) { 2622 mp->b_next = next; 2623 break; 2624 } 2625 mp = next; 2626 } 2627 return (mp); 2628 } 2629 2630 /* ARGSUSED */ 2631 static int 2632 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type) 2633 { 2634 iwk_sc_t *sc = (iwk_sc_t *)ic; 2635 iwk_tx_ring_t *ring; 2636 iwk_tx_desc_t *desc; 2637 iwk_tx_data_t *data; 2638 iwk_cmd_t *cmd; 2639 iwk_tx_cmd_t *tx; 2640 ieee80211_node_t *in; 2641 struct ieee80211_frame *wh; 2642 struct ieee80211_key *k = NULL; 2643 mblk_t *m, *m0; 2644 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS; 2645 uint16_t masks = 0; 2646 uint8_t index, index1, index2; 2647 2648 ring = &sc->sc_txq[0]; 2649 data = &ring->data[ring->cur]; 2650 desc = data->desc; 2651 cmd = data->cmd; 2652 bzero(desc, sizeof (*desc)); 2653 bzero(cmd, sizeof (*cmd)); 2654 2655 mutex_enter(&sc->sc_tx_lock); 2656 if (sc->sc_flags & IWK_F_SUSPEND) { 2657 mutex_exit(&sc->sc_tx_lock); 2658 if ((type & IEEE80211_FC0_TYPE_MASK) != 2659 IEEE80211_FC0_TYPE_DATA) { 2660 freemsg(mp); 2661 } 2662 err = IWK_FAIL; 2663 goto exit; 2664 } 2665 2666 if (ring->queued > ring->count - 64) { 2667 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n")); 2668 sc->sc_need_reschedule = 1; 2669 mutex_exit(&sc->sc_tx_lock); 2670 if ((type & IEEE80211_FC0_TYPE_MASK) != 2671 IEEE80211_FC0_TYPE_DATA) { 2672 freemsg(mp); 2673 } 2674 sc->sc_tx_nobuf++; 2675 err = IWK_FAIL; 2676 goto exit; 2677 } 2678 mutex_exit(&sc->sc_tx_lock); 2679 2680 hdrlen = sizeof (struct ieee80211_frame); 2681 2682 m = allocb(msgdsize(mp) + 32, BPRI_MED); 2683 if (m == NULL) { /* can not alloc buf, drop this package */ 2684 cmn_err(CE_WARN, 2685 "iwk_send(): failed to allocate msgbuf\n"); 2686 freemsg(mp); 2687 err = IWK_SUCCESS; 2688 goto exit; 2689 } 2690 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) { 2691 mblen = MBLKL(m0); 2692 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen); 2693 off += mblen; 2694 } 2695 m->b_wptr += off; 2696 freemsg(mp); 2697 2698 wh = (struct ieee80211_frame *)m->b_rptr; 2699 2700 if (ic->ic_opmode == IEEE80211_M_IBSS && 2701 (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) { 2702 mutex_enter(&sc->sc_glock); 2703 mutex_enter(&sc->sc_ibss.node_tb_lock); 2704 2705 /* 2706 * search for node in ibss node table 2707 */ 2708 for (index1 = IWK_STA_ID; 2709 index1 < IWK_STATION_COUNT; index1++) { 2710 if (sc->sc_ibss.ibss_node_tb[index1].used && 2711 IEEE80211_ADDR_EQ(sc->sc_ibss. 2712 ibss_node_tb[index1].node.bssid, 2713 wh->i_addr1)) { 2714 break; 2715 } 2716 } 2717 2718 /* 2719 * if don't find in ibss node table 2720 */ 2721 if (index1 >= IWK_BROADCAST_ID) { 2722 err = iwk_clean_add_node_ibss(ic, 2723 wh->i_addr1, &index2); 2724 if (err != IWK_SUCCESS) { 2725 cmn_err(CE_WARN, "iwk_send(): " 2726 "failed to clean all nodes " 2727 "and add one node\n"); 2728 mutex_exit(&sc->sc_ibss.node_tb_lock); 2729 mutex_exit(&sc->sc_glock); 2730 freemsg(m); 2731 sc->sc_tx_err++; 2732 err = IWK_SUCCESS; 2733 goto exit; 2734 } 2735 index = index2; 2736 } else { 2737 index = index1; 2738 } 2739 mutex_exit(&sc->sc_ibss.node_tb_lock); 2740 mutex_exit(&sc->sc_glock); 2741 } 2742 2743 in = ieee80211_find_txnode(ic, wh->i_addr1); 2744 if (in == NULL) { 2745 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n"); 2746 freemsg(m); 2747 sc->sc_tx_err++; 2748 err = IWK_SUCCESS; 2749 goto exit; 2750 } 2751 (void) ieee80211_encap(ic, m, in); 2752 2753 cmd->hdr.type = REPLY_TX; 2754 cmd->hdr.flags = 0; 2755 cmd->hdr.qid = ring->qid; 2756 cmd->hdr.idx = ring->cur; 2757 2758 tx = (iwk_tx_cmd_t *)cmd->data; 2759 tx->tx_flags = 0; 2760 2761 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2762 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK)); 2763 } else { 2764 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2765 } 2766 2767 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2768 k = ieee80211_crypto_encap(ic, m); 2769 if (k == NULL) { 2770 freemsg(m); 2771 sc->sc_tx_err++; 2772 err = IWK_SUCCESS; 2773 goto exit; 2774 } 2775 2776 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) { 2777 tx->sec_ctl = 2; /* for CCMP */ 2778 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK); 2779 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen); 2780 } 2781 2782 /* packet header may have moved, reset our local pointer */ 2783 wh = (struct ieee80211_frame *)m->b_rptr; 2784 } 2785 2786 len = msgdsize(m); 2787 2788 #ifdef DEBUG 2789 if (iwk_dbg_flags & IWK_DEBUG_TX) 2790 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0); 2791 #endif 2792 2793 /* pickup a rate */ 2794 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2795 IEEE80211_FC0_TYPE_MGT) { 2796 /* mgmt frames are sent at 1M */ 2797 rate = in->in_rates.ir_rates[0]; 2798 } else { 2799 /* 2800 * do it here for the software way rate control. 2801 * later for rate scaling in hardware. 2802 * maybe like the following, for management frame: 2803 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; 2804 * for data frame: 2805 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK)); 2806 * rate = in->in_rates.ir_rates[in->in_txrate]; 2807 * tx->initial_rate_index = 1; 2808 * 2809 * now the txrate is determined in tx cmd flags, set to the 2810 * max value 54M for 11g and 11M for 11b. 2811 */ 2812 2813 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { 2814 rate = ic->ic_fixed_rate; 2815 } else { 2816 rate = in->in_rates.ir_rates[in->in_txrate]; 2817 } 2818 } 2819 rate &= IEEE80211_RATE_VAL; 2820 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x", 2821 in->in_txrate, in->in_rates.ir_nrates, rate)); 2822 2823 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK)); 2824 2825 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4); 2826 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen)) 2827 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK); 2828 2829 /* retrieve destination node's id */ 2830 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2831 tx->sta_id = IWK_BROADCAST_ID; 2832 } else { 2833 if (ic->ic_opmode == IEEE80211_M_IBSS) 2834 tx->sta_id = index; 2835 else 2836 tx->sta_id = IWK_AP_ID; 2837 } 2838 2839 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 2840 IEEE80211_FC0_TYPE_MGT) { 2841 /* tell h/w to set timestamp in probe responses */ 2842 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2843 IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2844 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK); 2845 2846 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2847 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) || 2848 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 2849 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) 2850 tx->timeout.pm_frame_timeout = LE_16(3); 2851 else 2852 tx->timeout.pm_frame_timeout = LE_16(2); 2853 } else 2854 tx->timeout.pm_frame_timeout = 0; 2855 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 2856 masks |= RATE_MCS_CCK_MSK; 2857 2858 masks |= RATE_MCS_ANT_B_MSK; 2859 tx->rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(rate) | masks); 2860 2861 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x", 2862 LE_32(tx->tx_flags))); 2863 2864 tx->rts_retry_limit = 60; 2865 tx->data_retry_limit = 15; 2866 2867 tx->stop_time.life_time = LE_32(0xffffffff); 2868 2869 tx->len = LE_16(len); 2870 2871 tx->dram_lsb_ptr = 2872 LE_32(data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch)); 2873 tx->dram_msb_ptr = 0; 2874 tx->driver_txop = 0; 2875 tx->next_frame_len = 0; 2876 2877 (void) memcpy(tx + 1, m->b_rptr, hdrlen); 2878 m->b_rptr += hdrlen; 2879 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen); 2880 2881 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d", 2882 ring->qid, ring->cur, len)); 2883 2884 /* 2885 * first segment includes the tx cmd plus the 802.11 header, 2886 * the second includes the remaining of the 802.11 frame. 2887 */ 2888 desc->val0 = 2 << 24; 2889 desc->pa[0].tb1_addr = data->paddr_cmd; 2890 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) | 2891 ((data->dma_data.cookie.dmac_address & 0xffff) << 16); 2892 desc->pa[0].val2 = 2893 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) | 2894 ((len - hdrlen) << 20); 2895 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x " 2896 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x", 2897 data->paddr_cmd, data->dma_data.cookie.dmac_address, 2898 len0, len - hdrlen, LE_32(desc->pa[0].val1), 2899 LE_32(desc->pa[0].val2))); 2900 2901 mutex_enter(&sc->sc_tx_lock); 2902 ring->queued++; 2903 mutex_exit(&sc->sc_tx_lock); 2904 2905 /* kick ring */ 2906 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2907 tfd_offset[ring->cur].val = 8 + len; 2908 if (ring->cur < IWK_MAX_WIN_SIZE) { 2909 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 2910 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len; 2911 } 2912 2913 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV); 2914 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV); 2915 2916 ring->cur = (ring->cur + 1) % ring->count; 2917 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2918 freemsg(m); 2919 /* release node reference */ 2920 ieee80211_free_node(in); 2921 2922 ic->ic_stats.is_tx_bytes += len; 2923 ic->ic_stats.is_tx_frags++; 2924 2925 if (sc->sc_tx_timer == 0) 2926 sc->sc_tx_timer = 4; 2927 2928 exit: 2929 return (err); 2930 } 2931 2932 static void 2933 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp) 2934 { 2935 iwk_sc_t *sc = (iwk_sc_t *)arg; 2936 ieee80211com_t *ic = &sc->sc_ic; 2937 2938 enum ieee80211_opmode oldmod; 2939 iwk_tx_power_table_cmd_t txpower; 2940 iwk_add_sta_t node; 2941 iwk_link_quality_cmd_t link_quality; 2942 uint16_t masks = 0; 2943 int i, err, err1; 2944 2945 oldmod = ic->ic_opmode; 2946 2947 err = ieee80211_ioctl(ic, wq, mp); 2948 2949 /* 2950 * return to STA mode 2951 */ 2952 if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) && 2953 (ic->ic_opmode == IEEE80211_M_STA)) { 2954 /* configure rxon */ 2955 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t)); 2956 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 2957 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 2958 sc->sc_config.chan = 2959 LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan)); 2960 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK | 2961 RXON_FLG_AUTO_DETECT_MSK | 2962 RXON_FLG_BAND_24G_MSK); 2963 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK); 2964 switch (ic->ic_opmode) { 2965 case IEEE80211_M_STA: 2966 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 2967 sc->sc_config.filter_flags |= 2968 LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 2969 RXON_FILTER_DIS_DECRYPT_MSK | 2970 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 2971 break; 2972 case IEEE80211_M_IBSS: 2973 case IEEE80211_M_AHDEMO: 2974 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 2975 sc->sc_config.flags |= 2976 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 2977 sc->sc_config.filter_flags = 2978 LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 2979 RXON_FILTER_DIS_DECRYPT_MSK | 2980 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 2981 break; 2982 case IEEE80211_M_HOSTAP: 2983 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 2984 break; 2985 case IEEE80211_M_MONITOR: 2986 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 2987 sc->sc_config.filter_flags |= 2988 LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 2989 RXON_FILTER_CTL2HOST_MSK | 2990 RXON_FILTER_PROMISC_MSK); 2991 break; 2992 } 2993 sc->sc_config.cck_basic_rates = 0x0f; 2994 sc->sc_config.ofdm_basic_rates = 0xff; 2995 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff; 2996 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff; 2997 /* set antenna */ 2998 mutex_enter(&sc->sc_glock); 2999 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3000 (0x7 << RXON_RX_CHAIN_VALID_POS) | 3001 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3002 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3003 err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3004 sizeof (iwk_rxon_cmd_t), 1); 3005 if (err1 != IWK_SUCCESS) { 3006 cmn_err(CE_WARN, "iwk_m_ioctl(): " 3007 "failed to set configure command" 3008 " please run (ifconfig unplumb and" 3009 " ifconfig plumb)\n"); 3010 } 3011 /* 3012 * set Tx power for 2.4GHz channels 3013 * (need further investigation. fix tx power at present) 3014 */ 3015 (void) memset(&txpower, 0, sizeof (txpower)); 3016 txpower.band = 1; /* for 2.4G */ 3017 txpower.channel = sc->sc_config.chan; 3018 txpower.channel_normal_width = 0; 3019 for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) { 3020 txpower.tx_power.ht_ofdm_power[i]. 3021 s.ramon_tx_gain = LE_16(0x3f3f); 3022 txpower.tx_power.ht_ofdm_power[i]. 3023 s.dsp_predis_atten = LE_16(110 | (110 << 8)); 3024 } 3025 txpower.tx_power.legacy_cck_power.s. 3026 ramon_tx_gain = LE_16(0x3f3f); 3027 txpower.tx_power.legacy_cck_power.s. 3028 dsp_predis_atten = LE_16(110 | (110 << 8)); 3029 err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower, 3030 sizeof (txpower), 1); 3031 if (err1 != IWK_SUCCESS) { 3032 cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower" 3033 " please run (ifconfig unplumb " 3034 "and ifconfig plumb)\n"); 3035 } 3036 /* add broadcast node so that we can send broadcast frame */ 3037 (void) memset(&node, 0, sizeof (node)); 3038 (void) memset(node.bssid, 0xff, 6); 3039 node.id = IWK_BROADCAST_ID; 3040 err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 3041 if (err1 != IWK_SUCCESS) { 3042 cmn_err(CE_WARN, "iwk_m_ioctl(): " 3043 "failed to add broadcast node\n"); 3044 } 3045 3046 /* TX_LINK_QUALITY cmd */ 3047 (void) memset(&link_quality, 0, sizeof (link_quality)); 3048 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3049 masks |= RATE_MCS_CCK_MSK; 3050 masks |= RATE_MCS_ANT_B_MSK; 3051 masks &= ~RATE_MCS_ANT_A_MSK; 3052 link_quality.rate_n_flags[i] = 3053 LE_32(iwk_rate_to_plcp(2) | masks); 3054 } 3055 link_quality.general_params.single_stream_ant_msk = 2; 3056 link_quality.general_params.dual_stream_ant_msk = 3; 3057 link_quality.agg_params.agg_dis_start_th = 3; 3058 link_quality.agg_params.agg_time_limit = LE_16(4000); 3059 link_quality.sta_id = IWK_BROADCAST_ID; 3060 err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3061 sizeof (link_quality), 1); 3062 if (err1 != IWK_SUCCESS) { 3063 cmn_err(CE_WARN, "iwk_m_ioctl(): " 3064 "failed to config link quality table\n"); 3065 } 3066 mutex_exit(&sc->sc_glock); 3067 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3068 } 3069 3070 if (err == ENETRESET) { 3071 /* 3072 * This is special for the hidden AP connection. 3073 * In any case, we should make sure only one 'scan' 3074 * in the driver for a 'connect' CLI command. So 3075 * when connecting to a hidden AP, the scan is just 3076 * sent out to the air when we know the desired 3077 * essid of the AP we want to connect. 3078 */ 3079 if (ic->ic_des_esslen) { 3080 if (sc->sc_flags & IWK_F_RUNNING) { 3081 iwk_m_stop(sc); 3082 (void) iwk_m_start(sc); 3083 (void) ieee80211_new_state(ic, 3084 IEEE80211_S_SCAN, -1); 3085 } 3086 } 3087 } 3088 } 3089 3090 /* 3091 * callback functions for set/get properties 3092 */ 3093 /* ARGSUSED */ 3094 static int 3095 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 3096 uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm) 3097 { 3098 int err = 0; 3099 iwk_sc_t *sc = (iwk_sc_t *)arg; 3100 3101 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num, 3102 pr_flags, wldp_length, wldp_buf, perm); 3103 3104 return (err); 3105 } 3106 static int 3107 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num, 3108 uint_t wldp_length, const void *wldp_buf) 3109 { 3110 int err; 3111 iwk_sc_t *sc = (iwk_sc_t *)arg; 3112 ieee80211com_t *ic = &sc->sc_ic; 3113 3114 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length, 3115 wldp_buf); 3116 3117 if (err == ENETRESET) { 3118 if (ic->ic_des_esslen) { 3119 if (sc->sc_flags & IWK_F_RUNNING) { 3120 iwk_m_stop(sc); 3121 (void) iwk_m_start(sc); 3122 (void) ieee80211_new_state(ic, 3123 IEEE80211_S_SCAN, -1); 3124 } 3125 } 3126 err = 0; 3127 } 3128 3129 return (err); 3130 } 3131 3132 /*ARGSUSED*/ 3133 static int 3134 iwk_m_stat(void *arg, uint_t stat, uint64_t *val) 3135 { 3136 iwk_sc_t *sc = (iwk_sc_t *)arg; 3137 ieee80211com_t *ic = &sc->sc_ic; 3138 ieee80211_node_t *in; 3139 3140 mutex_enter(&sc->sc_glock); 3141 switch (stat) { 3142 case MAC_STAT_IFSPEED: 3143 in = ic->ic_bss; 3144 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ? 3145 IEEE80211_RATE(in->in_txrate) : 3146 ic->ic_fixed_rate) / 2 * 1000000; 3147 break; 3148 case MAC_STAT_NOXMTBUF: 3149 *val = sc->sc_tx_nobuf; 3150 break; 3151 case MAC_STAT_NORCVBUF: 3152 *val = sc->sc_rx_nobuf; 3153 break; 3154 case MAC_STAT_IERRORS: 3155 *val = sc->sc_rx_err; 3156 break; 3157 case MAC_STAT_RBYTES: 3158 *val = ic->ic_stats.is_rx_bytes; 3159 break; 3160 case MAC_STAT_IPACKETS: 3161 *val = ic->ic_stats.is_rx_frags; 3162 break; 3163 case MAC_STAT_OBYTES: 3164 *val = ic->ic_stats.is_tx_bytes; 3165 break; 3166 case MAC_STAT_OPACKETS: 3167 *val = ic->ic_stats.is_tx_frags; 3168 break; 3169 case MAC_STAT_OERRORS: 3170 case WIFI_STAT_TX_FAILED: 3171 *val = sc->sc_tx_err; 3172 break; 3173 case WIFI_STAT_TX_RETRANS: 3174 *val = sc->sc_tx_retries; 3175 break; 3176 case WIFI_STAT_FCS_ERRORS: 3177 case WIFI_STAT_WEP_ERRORS: 3178 case WIFI_STAT_TX_FRAGS: 3179 case WIFI_STAT_MCAST_TX: 3180 case WIFI_STAT_RTS_SUCCESS: 3181 case WIFI_STAT_RTS_FAILURE: 3182 case WIFI_STAT_ACK_FAILURE: 3183 case WIFI_STAT_RX_FRAGS: 3184 case WIFI_STAT_MCAST_RX: 3185 case WIFI_STAT_RX_DUPS: 3186 mutex_exit(&sc->sc_glock); 3187 return (ieee80211_stat(ic, stat, val)); 3188 default: 3189 mutex_exit(&sc->sc_glock); 3190 return (ENOTSUP); 3191 } 3192 mutex_exit(&sc->sc_glock); 3193 3194 return (IWK_SUCCESS); 3195 3196 } 3197 3198 static int 3199 iwk_m_start(void *arg) 3200 { 3201 iwk_sc_t *sc = (iwk_sc_t *)arg; 3202 ieee80211com_t *ic = &sc->sc_ic; 3203 int err; 3204 3205 err = iwk_init(sc); 3206 3207 if (err != IWK_SUCCESS) { 3208 /* 3209 * The hw init err(eg. RF is OFF). Return Success to make 3210 * the 'plumb' succeed. The iwk_thread() tries to re-init 3211 * background. 3212 */ 3213 mutex_enter(&sc->sc_glock); 3214 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 3215 mutex_exit(&sc->sc_glock); 3216 return (IWK_SUCCESS); 3217 } 3218 3219 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3220 3221 mutex_enter(&sc->sc_glock); 3222 sc->sc_flags |= IWK_F_RUNNING; 3223 mutex_exit(&sc->sc_glock); 3224 3225 return (IWK_SUCCESS); 3226 } 3227 3228 static void 3229 iwk_m_stop(void *arg) 3230 { 3231 iwk_sc_t *sc = (iwk_sc_t *)arg; 3232 ieee80211com_t *ic = &sc->sc_ic; 3233 3234 iwk_stop(sc); 3235 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3236 ieee80211_stop_watchdog(ic); 3237 mutex_enter(&sc->sc_mt_lock); 3238 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 3239 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL; 3240 mutex_exit(&sc->sc_mt_lock); 3241 mutex_enter(&sc->sc_glock); 3242 sc->sc_flags &= ~IWK_F_RUNNING; 3243 mutex_exit(&sc->sc_glock); 3244 } 3245 3246 /*ARGSUSED*/ 3247 static int 3248 iwk_m_unicst(void *arg, const uint8_t *macaddr) 3249 { 3250 iwk_sc_t *sc = (iwk_sc_t *)arg; 3251 ieee80211com_t *ic = &sc->sc_ic; 3252 int err; 3253 3254 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) { 3255 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); 3256 mutex_enter(&sc->sc_glock); 3257 err = iwk_config(sc); 3258 mutex_exit(&sc->sc_glock); 3259 if (err != IWK_SUCCESS) { 3260 cmn_err(CE_WARN, 3261 "iwk_m_unicst(): " 3262 "failed to configure device\n"); 3263 goto fail; 3264 } 3265 } 3266 return (IWK_SUCCESS); 3267 fail: 3268 return (err); 3269 } 3270 3271 /*ARGSUSED*/ 3272 static int 3273 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m) 3274 { 3275 return (IWK_SUCCESS); 3276 } 3277 3278 /*ARGSUSED*/ 3279 static int 3280 iwk_m_promisc(void *arg, boolean_t on) 3281 { 3282 return (IWK_SUCCESS); 3283 } 3284 3285 static void 3286 iwk_thread(iwk_sc_t *sc) 3287 { 3288 ieee80211com_t *ic = &sc->sc_ic; 3289 clock_t clk; 3290 int times = 0, err, n = 0, timeout = 0; 3291 uint32_t tmp; 3292 3293 mutex_enter(&sc->sc_mt_lock); 3294 while (sc->sc_mf_thread_switch) { 3295 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3296 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) { 3297 sc->sc_flags &= ~IWK_F_RADIO_OFF; 3298 } else { 3299 sc->sc_flags |= IWK_F_RADIO_OFF; 3300 } 3301 /* 3302 * If in SUSPEND or the RF is OFF, do nothing 3303 */ 3304 if ((sc->sc_flags & IWK_F_SUSPEND) || 3305 (sc->sc_flags & IWK_F_RADIO_OFF)) { 3306 mutex_exit(&sc->sc_mt_lock); 3307 delay(drv_usectohz(100000)); 3308 mutex_enter(&sc->sc_mt_lock); 3309 continue; 3310 } 3311 3312 /* 3313 * recovery fatal error 3314 */ 3315 if (ic->ic_mach && 3316 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) { 3317 3318 IWK_DBG((IWK_DEBUG_FW, 3319 "iwk_thread(): " 3320 "try to recover fatal hw error: %d\n", times++)); 3321 3322 iwk_stop(sc); 3323 3324 if (IWK_CHK_FAST_RECOVER(sc)) { 3325 /* save runtime configuration */ 3326 bcopy(&sc->sc_config, &sc->sc_config_save, 3327 sizeof (sc->sc_config)); 3328 } else { 3329 mutex_exit(&sc->sc_mt_lock); 3330 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 3331 delay(drv_usectohz(2000000 + n*500000)); 3332 mutex_enter(&sc->sc_mt_lock); 3333 } 3334 3335 err = iwk_init(sc); 3336 if (err != IWK_SUCCESS) { 3337 n++; 3338 if (n < 20) 3339 continue; 3340 } 3341 n = 0; 3342 if (!err) 3343 sc->sc_flags |= IWK_F_RUNNING; 3344 3345 if (!IWK_CHK_FAST_RECOVER(sc) || 3346 iwk_fast_recover(sc) != IWK_SUCCESS) { 3347 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 3348 3349 mutex_exit(&sc->sc_mt_lock); 3350 delay(drv_usectohz(2000000)); 3351 if (sc->sc_ostate != IEEE80211_S_INIT) 3352 ieee80211_new_state(ic, 3353 IEEE80211_S_SCAN, 0); 3354 mutex_enter(&sc->sc_mt_lock); 3355 } 3356 } 3357 3358 if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) { 3359 IWK_DBG((IWK_DEBUG_RESUME, 3360 "iwk_thread(): lazy resume\n")); 3361 3362 sc->sc_flags &= ~IWK_F_LAZY_RESUME; 3363 mutex_exit(&sc->sc_mt_lock); 3364 /* 3365 * NB: under WPA mode, this call hangs (door problem?) 3366 * when called in iwk_attach() and iwk_detach() while 3367 * system is in the procedure of CPR. To be safe, let 3368 * the thread do this. 3369 */ 3370 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); 3371 mutex_enter(&sc->sc_mt_lock); 3372 } 3373 3374 if (ic->ic_mach && 3375 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) { 3376 IWK_DBG((IWK_DEBUG_SCAN, 3377 "iwk_thread(): " 3378 "wait for probe response\n")); 3379 sc->sc_scan_pending--; 3380 mutex_exit(&sc->sc_mt_lock); 3381 delay(drv_usectohz(200000)); 3382 if (sc->sc_flags & IWK_F_SCANNING) 3383 ieee80211_next_scan(ic); 3384 mutex_enter(&sc->sc_mt_lock); 3385 } 3386 3387 /* 3388 * rate ctl 3389 */ 3390 if (ic->ic_mach && 3391 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) { 3392 clk = ddi_get_lbolt(); 3393 if (clk > sc->sc_clk + drv_usectohz(500000)) { 3394 iwk_amrr_timeout(sc); 3395 } 3396 } 3397 3398 mutex_exit(&sc->sc_mt_lock); 3399 delay(drv_usectohz(100000)); 3400 mutex_enter(&sc->sc_mt_lock); 3401 3402 if (sc->sc_tx_timer) { 3403 timeout++; 3404 if (timeout == 10) { 3405 sc->sc_tx_timer--; 3406 if (sc->sc_tx_timer == 0) { 3407 sc->sc_flags |= IWK_F_HW_ERR_RECOVER; 3408 sc->sc_ostate = IEEE80211_S_RUN; 3409 IWK_DBG((IWK_DEBUG_FW, 3410 "iwk_thread(): try to recover from" 3411 " 'send fail\n")); 3412 } 3413 timeout = 0; 3414 } 3415 } 3416 3417 } 3418 sc->sc_mf_thread = NULL; 3419 cv_signal(&sc->sc_mt_cv); 3420 mutex_exit(&sc->sc_mt_lock); 3421 } 3422 3423 3424 /* 3425 * Send a command to the firmware. 3426 */ 3427 static int 3428 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async) 3429 { 3430 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3431 iwk_tx_desc_t *desc; 3432 iwk_cmd_t *cmd; 3433 clock_t clk; 3434 3435 ASSERT(size <= sizeof (cmd->data)); 3436 ASSERT(mutex_owned(&sc->sc_glock)); 3437 3438 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code)); 3439 desc = ring->data[ring->cur].desc; 3440 cmd = ring->data[ring->cur].cmd; 3441 3442 cmd->hdr.type = (uint8_t)code; 3443 cmd->hdr.flags = 0; 3444 cmd->hdr.qid = ring->qid; 3445 cmd->hdr.idx = ring->cur; 3446 (void) memcpy(cmd->data, buf, size); 3447 (void) memset(desc, 0, sizeof (*desc)); 3448 3449 desc->val0 = 1 << 24; 3450 desc->pa[0].tb1_addr = 3451 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff); 3452 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0; 3453 3454 /* kick cmd ring XXX */ 3455 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3456 tfd_offset[ring->cur].val = 8; 3457 if (ring->cur < IWK_MAX_WIN_SIZE) { 3458 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3459 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3460 } 3461 ring->cur = (ring->cur + 1) % ring->count; 3462 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3463 3464 if (async) 3465 return (IWK_SUCCESS); 3466 else { 3467 sc->sc_flags &= ~IWK_F_CMD_DONE; 3468 clk = ddi_get_lbolt() + drv_usectohz(2000000); 3469 while (!(sc->sc_flags & IWK_F_CMD_DONE)) { 3470 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) < 3471 0) 3472 break; 3473 } 3474 if (sc->sc_flags & IWK_F_CMD_DONE) 3475 return (IWK_SUCCESS); 3476 else 3477 return (IWK_FAIL); 3478 } 3479 } 3480 3481 static void 3482 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on) 3483 { 3484 iwk_led_cmd_t led; 3485 3486 led.interval = LE_32(100000); /* unit: 100ms */ 3487 led.id = id; 3488 led.off = off; 3489 led.on = on; 3490 3491 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1); 3492 } 3493 3494 static int 3495 iwk_hw_set_before_auth(iwk_sc_t *sc) 3496 { 3497 ieee80211com_t *ic = &sc->sc_ic; 3498 ieee80211_node_t *in = ic->ic_bss; 3499 iwk_add_sta_t node; 3500 iwk_link_quality_cmd_t link_quality; 3501 struct ieee80211_rateset rs; 3502 uint16_t masks = 0, rate; 3503 int i, err; 3504 3505 if (in->in_chan == IEEE80211_CHAN_ANYC) { 3506 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3507 "channel (%d) isn't in proper range\n", 3508 LE_16(ieee80211_chan2ieee(ic, in->in_chan))); 3509 return (IWK_FAIL); 3510 } 3511 3512 /* update adapter's configuration according the info of target AP */ 3513 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 3514 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan)); 3515 if (ic->ic_curmode == IEEE80211_MODE_11B) { 3516 sc->sc_config.cck_basic_rates = 0x03; 3517 sc->sc_config.ofdm_basic_rates = 0; 3518 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 3519 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 3520 sc->sc_config.cck_basic_rates = 0; 3521 sc->sc_config.ofdm_basic_rates = 0x15; 3522 } else { /* assume 802.11b/g */ 3523 sc->sc_config.cck_basic_rates = 0x0f; 3524 sc->sc_config.ofdm_basic_rates = 0xff; 3525 } 3526 3527 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 3528 RXON_FLG_SHORT_SLOT_MSK); 3529 3530 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3531 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK); 3532 else 3533 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK); 3534 3535 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 3536 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3537 else 3538 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK); 3539 3540 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x " 3541 "filter_flags %x cck %x ofdm %x" 3542 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n", 3543 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags), 3544 LE_32(sc->sc_config.filter_flags), 3545 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates, 3546 sc->sc_config.bssid[0], sc->sc_config.bssid[1], 3547 sc->sc_config.bssid[2], sc->sc_config.bssid[3], 3548 sc->sc_config.bssid[4], sc->sc_config.bssid[5])); 3549 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3550 sizeof (iwk_rxon_cmd_t), 1); 3551 if (err != IWK_SUCCESS) { 3552 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3553 " failed to config chan%d\n", 3554 sc->sc_config.chan); 3555 return (err); 3556 } 3557 3558 /* obtain current temperature of chipset */ 3559 sc->sc_tempera = iwk_curr_tempera(sc); 3560 3561 /* make Tx power calibration to determine the gains of DSP and radio */ 3562 err = iwk_tx_power_calibration(sc); 3563 if (err) { 3564 cmn_err(CE_WARN, "iwk_hw_set_before_auth():" 3565 "failed to set tx power table\n"); 3566 return (err); 3567 } 3568 3569 /* add default AP node */ 3570 (void) memset(&node, 0, sizeof (node)); 3571 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid); 3572 node.id = IWK_AP_ID; 3573 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1); 3574 if (err != IWK_SUCCESS) { 3575 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3576 "failed to add BSS node\n"); 3577 return (err); 3578 } 3579 3580 /* TX_LINK_QUALITY cmd */ 3581 (void) memset(&link_quality, 0, sizeof (link_quality)); 3582 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)]; 3583 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3584 if (i < rs.ir_nrates) 3585 rate = rs.ir_rates[rs.ir_nrates - i]; 3586 else 3587 rate = 2; 3588 if (rate == 2 || rate == 4 || rate == 11 || rate == 22) 3589 masks |= RATE_MCS_CCK_MSK; 3590 masks |= RATE_MCS_ANT_B_MSK; 3591 masks &= ~RATE_MCS_ANT_A_MSK; 3592 link_quality.rate_n_flags[i] = 3593 LE_32(iwk_rate_to_plcp(rate) | masks); 3594 } 3595 3596 link_quality.general_params.single_stream_ant_msk = 2; 3597 link_quality.general_params.dual_stream_ant_msk = 3; 3598 link_quality.agg_params.agg_dis_start_th = 3; 3599 link_quality.agg_params.agg_time_limit = LE_16(4000); 3600 link_quality.sta_id = IWK_AP_ID; 3601 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3602 sizeof (link_quality), 1); 3603 if (err != IWK_SUCCESS) { 3604 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): " 3605 "failed to config link quality table\n"); 3606 return (err); 3607 } 3608 3609 return (IWK_SUCCESS); 3610 } 3611 3612 /* 3613 * Send a scan request(assembly scan cmd) to the firmware. 3614 */ 3615 static int 3616 iwk_scan(iwk_sc_t *sc) 3617 { 3618 ieee80211com_t *ic = &sc->sc_ic; 3619 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM]; 3620 iwk_tx_desc_t *desc; 3621 iwk_tx_data_t *data; 3622 iwk_cmd_t *cmd; 3623 iwk_scan_hdr_t *hdr; 3624 iwk_scan_chan_t *chan; 3625 struct ieee80211_frame *wh; 3626 ieee80211_node_t *in = ic->ic_bss; 3627 uint8_t essid[IEEE80211_NWID_LEN+1]; 3628 struct ieee80211_rateset *rs; 3629 enum ieee80211_phymode mode; 3630 uint8_t *frm; 3631 int i, pktlen, nrates; 3632 3633 data = &ring->data[ring->cur]; 3634 desc = data->desc; 3635 cmd = (iwk_cmd_t *)data->dma_data.mem_va; 3636 3637 cmd->hdr.type = REPLY_SCAN_CMD; 3638 cmd->hdr.flags = 0; 3639 cmd->hdr.qid = ring->qid; 3640 cmd->hdr.idx = ring->cur | 0x40; 3641 3642 hdr = (iwk_scan_hdr_t *)cmd->data; 3643 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t)); 3644 hdr->nchan = 1; 3645 hdr->quiet_time = LE_16(50); 3646 hdr->quiet_plcp_th = LE_16(1); 3647 3648 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK); 3649 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3650 (0x7 << RXON_RX_CHAIN_VALID_POS) | 3651 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3652 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3653 3654 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK); 3655 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID; 3656 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff); 3657 hdr->tx_cmd.tx_flags |= LE_32(0x200); 3658 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(2)); 3659 hdr->tx_cmd.rate.r.rate_n_flags |= 3660 LE_32(RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); 3661 hdr->direct_scan[0].len = ic->ic_des_esslen; 3662 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID; 3663 3664 if (ic->ic_des_esslen) { 3665 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen); 3666 essid[ic->ic_des_esslen] = '\0'; 3667 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid)); 3668 3669 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid, 3670 ic->ic_des_esslen); 3671 } else { 3672 bzero(hdr->direct_scan[0].ssid, 3673 sizeof (hdr->direct_scan[0].ssid)); 3674 } 3675 /* 3676 * a probe request frame is required after the REPLY_SCAN_CMD 3677 */ 3678 wh = (struct ieee80211_frame *)(hdr + 1); 3679 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3680 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3681 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3682 (void) memset(wh->i_addr1, 0xff, 6); 3683 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr); 3684 (void) memset(wh->i_addr3, 0xff, 6); 3685 *(uint16_t *)&wh->i_dur[0] = 0; 3686 *(uint16_t *)&wh->i_seq[0] = 0; 3687 3688 frm = (uint8_t *)(wh + 1); 3689 3690 /* essid IE */ 3691 if (in->in_esslen) { 3692 bcopy(in->in_essid, essid, in->in_esslen); 3693 essid[in->in_esslen] = '\0'; 3694 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n", 3695 essid)); 3696 } 3697 *frm++ = IEEE80211_ELEMID_SSID; 3698 *frm++ = in->in_esslen; 3699 (void) memcpy(frm, in->in_essid, in->in_esslen); 3700 frm += in->in_esslen; 3701 3702 mode = ieee80211_chan2mode(ic, ic->ic_curchan); 3703 rs = &ic->ic_sup_rates[mode]; 3704 3705 /* supported rates IE */ 3706 *frm++ = IEEE80211_ELEMID_RATES; 3707 nrates = rs->ir_nrates; 3708 if (nrates > IEEE80211_RATE_SIZE) 3709 nrates = IEEE80211_RATE_SIZE; 3710 *frm++ = (uint8_t)nrates; 3711 (void) memcpy(frm, rs->ir_rates, nrates); 3712 frm += nrates; 3713 3714 /* supported xrates IE */ 3715 if (rs->ir_nrates > IEEE80211_RATE_SIZE) { 3716 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE; 3717 *frm++ = IEEE80211_ELEMID_XRATES; 3718 *frm++ = (uint8_t)nrates; 3719 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates); 3720 frm += nrates; 3721 } 3722 3723 /* optionnal IE (usually for wpa) */ 3724 if (ic->ic_opt_ie != NULL) { 3725 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len); 3726 frm += ic->ic_opt_ie_len; 3727 } 3728 3729 /* setup length of probe request */ 3730 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh)); 3731 hdr->len = LE_16(hdr->nchan * sizeof (iwk_scan_chan_t) + 3732 LE_16(hdr->tx_cmd.len) + sizeof (iwk_scan_hdr_t)); 3733 3734 /* 3735 * the attribute of the scan channels are required after the probe 3736 * request frame. 3737 */ 3738 chan = (iwk_scan_chan_t *)frm; 3739 for (i = 1; i <= hdr->nchan; i++, chan++) { 3740 if (ic->ic_des_esslen) { 3741 chan->type = 3; 3742 } else { 3743 chan->type = 1; 3744 } 3745 3746 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3747 chan->tpc.tx_gain = 0x3f; 3748 chan->tpc.dsp_atten = 110; 3749 chan->active_dwell = LE_16(50); 3750 chan->passive_dwell = LE_16(120); 3751 3752 frm += sizeof (iwk_scan_chan_t); 3753 } 3754 3755 pktlen = _PTRDIFF(frm, cmd); 3756 3757 (void) memset(desc, 0, sizeof (*desc)); 3758 desc->val0 = 1 << 24; 3759 desc->pa[0].tb1_addr = 3760 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff); 3761 desc->pa[0].val1 = (pktlen << 4) & 0xfff0; 3762 3763 /* 3764 * maybe for cmd, filling the byte cnt table is not necessary. 3765 * anyway, we fill it here. 3766 */ 3767 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3768 tfd_offset[ring->cur].val = 8; 3769 if (ring->cur < IWK_MAX_WIN_SIZE) { 3770 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]. 3771 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8; 3772 } 3773 3774 /* kick cmd ring */ 3775 ring->cur = (ring->cur + 1) % ring->count; 3776 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3777 3778 return (IWK_SUCCESS); 3779 } 3780 3781 static int 3782 iwk_config(iwk_sc_t *sc) 3783 { 3784 ieee80211com_t *ic = &sc->sc_ic; 3785 iwk_powertable_cmd_t powertable; 3786 iwk_bt_cmd_t bt; 3787 iwk_add_sta_t node; 3788 iwk_link_quality_cmd_t link_quality; 3789 int i, err; 3790 uint16_t masks = 0; 3791 3792 /* 3793 * set power mode. Disable power management at present, do it later 3794 */ 3795 (void) memset(&powertable, 0, sizeof (powertable)); 3796 powertable.flags = LE_16(0x8); 3797 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable, 3798 sizeof (powertable), 0); 3799 if (err != IWK_SUCCESS) { 3800 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n"); 3801 return (err); 3802 } 3803 3804 /* configure bt coexistence */ 3805 (void) memset(&bt, 0, sizeof (bt)); 3806 bt.flags = 3; 3807 bt.lead_time = 0xaa; 3808 bt.max_kill = 1; 3809 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt, 3810 sizeof (bt), 0); 3811 if (err != IWK_SUCCESS) { 3812 cmn_err(CE_WARN, 3813 "iwk_config(): " 3814 "failed to configurate bt coexistence\n"); 3815 return (err); 3816 } 3817 3818 /* configure rxon */ 3819 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t)); 3820 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr); 3821 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr); 3822 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan)); 3823 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK | 3824 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK); 3825 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK); 3826 switch (ic->ic_opmode) { 3827 case IEEE80211_M_STA: 3828 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS; 3829 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3830 RXON_FILTER_DIS_DECRYPT_MSK | 3831 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3832 break; 3833 case IEEE80211_M_IBSS: 3834 case IEEE80211_M_AHDEMO: 3835 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 3836 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 3837 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3838 RXON_FILTER_DIS_DECRYPT_MSK | 3839 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 3840 break; 3841 case IEEE80211_M_HOSTAP: 3842 sc->sc_config.dev_type = RXON_DEV_TYPE_AP; 3843 break; 3844 case IEEE80211_M_MONITOR: 3845 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER; 3846 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 3847 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); 3848 break; 3849 } 3850 sc->sc_config.cck_basic_rates = 0x0f; 3851 sc->sc_config.ofdm_basic_rates = 0xff; 3852 3853 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff; 3854 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff; 3855 3856 /* set antenna */ 3857 3858 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK | 3859 (0x7 << RXON_RX_CHAIN_VALID_POS) | 3860 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | 3861 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); 3862 3863 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 3864 sizeof (iwk_rxon_cmd_t), 0); 3865 if (err != IWK_SUCCESS) { 3866 cmn_err(CE_WARN, "iwk_config(): " 3867 "failed to set configure command\n"); 3868 return (err); 3869 } 3870 /* obtain current temperature of chipset */ 3871 sc->sc_tempera = iwk_curr_tempera(sc); 3872 3873 /* make Tx power calibration to determine the gains of DSP and radio */ 3874 err = iwk_tx_power_calibration(sc); 3875 if (err) { 3876 cmn_err(CE_WARN, "iwk_config(): " 3877 "failed to set tx power table\n"); 3878 return (err); 3879 } 3880 3881 /* add broadcast node so that we can send broadcast frame */ 3882 (void) memset(&node, 0, sizeof (node)); 3883 (void) memset(node.bssid, 0xff, 6); 3884 node.id = IWK_BROADCAST_ID; 3885 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0); 3886 if (err != IWK_SUCCESS) { 3887 cmn_err(CE_WARN, "iwk_config(): " 3888 "failed to add broadcast node\n"); 3889 return (err); 3890 } 3891 3892 /* TX_LINK_QUALITY cmd ? */ 3893 (void) memset(&link_quality, 0, sizeof (link_quality)); 3894 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 3895 masks |= RATE_MCS_CCK_MSK; 3896 masks |= RATE_MCS_ANT_B_MSK; 3897 masks &= ~RATE_MCS_ANT_A_MSK; 3898 link_quality.rate_n_flags[i] = 3899 LE_32(iwk_rate_to_plcp(2) | masks); 3900 } 3901 3902 link_quality.general_params.single_stream_ant_msk = 2; 3903 link_quality.general_params.dual_stream_ant_msk = 3; 3904 link_quality.agg_params.agg_dis_start_th = 3; 3905 link_quality.agg_params.agg_time_limit = LE_16(4000); 3906 link_quality.sta_id = IWK_BROADCAST_ID; 3907 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality, 3908 sizeof (link_quality), 0); 3909 if (err != IWK_SUCCESS) { 3910 cmn_err(CE_WARN, "iwk_config(): " 3911 "failed to config link quality table\n"); 3912 return (err); 3913 } 3914 3915 return (IWK_SUCCESS); 3916 } 3917 3918 static void 3919 iwk_stop_master(iwk_sc_t *sc) 3920 { 3921 uint32_t tmp; 3922 int n; 3923 3924 tmp = IWK_READ(sc, CSR_RESET); 3925 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER); 3926 3927 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3928 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) == 3929 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) 3930 return; 3931 3932 for (n = 0; n < 2000; n++) { 3933 if (IWK_READ(sc, CSR_RESET) & 3934 CSR_RESET_REG_FLAG_MASTER_DISABLED) 3935 break; 3936 DELAY(1000); 3937 } 3938 if (n == 2000) 3939 IWK_DBG((IWK_DEBUG_HW, 3940 "timeout waiting for master stop\n")); 3941 } 3942 3943 static int 3944 iwk_power_up(iwk_sc_t *sc) 3945 { 3946 uint32_t tmp; 3947 3948 iwk_mac_access_enter(sc); 3949 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 3950 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC; 3951 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN; 3952 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 3953 iwk_mac_access_exit(sc); 3954 3955 DELAY(5000); 3956 return (IWK_SUCCESS); 3957 } 3958 3959 static int 3960 iwk_preinit(iwk_sc_t *sc) 3961 { 3962 uint32_t tmp; 3963 int n; 3964 uint8_t vlink; 3965 3966 /* clear any pending interrupts */ 3967 IWK_WRITE(sc, CSR_INT, 0xffffffff); 3968 3969 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS); 3970 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS, 3971 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 3972 3973 tmp = IWK_READ(sc, CSR_GP_CNTRL); 3974 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3975 3976 /* wait for clock ready */ 3977 for (n = 0; n < 1000; n++) { 3978 if (IWK_READ(sc, CSR_GP_CNTRL) & 3979 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) 3980 break; 3981 DELAY(10); 3982 } 3983 if (n == 1000) { 3984 cmn_err(CE_WARN, 3985 "iwk_preinit(): timeout waiting for clock ready\n"); 3986 return (ETIMEDOUT); 3987 } 3988 iwk_mac_access_enter(sc); 3989 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG); 3990 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp | 3991 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT); 3992 3993 DELAY(20); 3994 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT); 3995 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp | 3996 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE); 3997 iwk_mac_access_exit(sc); 3998 3999 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */ 4000 4001 (void) iwk_power_up(sc); 4002 4003 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) { 4004 tmp = ddi_get32(sc->sc_cfg_handle, 4005 (uint32_t *)(sc->sc_cfg_base + 0xe8)); 4006 ddi_put32(sc->sc_cfg_handle, 4007 (uint32_t *)(sc->sc_cfg_base + 0xe8), 4008 tmp & ~(1 << 11)); 4009 } 4010 4011 4012 vlink = ddi_get8(sc->sc_cfg_handle, 4013 (uint8_t *)(sc->sc_cfg_base + 0xf0)); 4014 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0), 4015 vlink & ~2); 4016 4017 tmp = IWK_READ(sc, CSR_SW_VER); 4018 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 4019 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 4020 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R; 4021 IWK_WRITE(sc, CSR_SW_VER, tmp); 4022 4023 /* make sure power supply on each part of the hardware */ 4024 iwk_mac_access_enter(sc); 4025 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 4026 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 4027 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 4028 DELAY(5); 4029 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL); 4030 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ; 4031 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp); 4032 iwk_mac_access_exit(sc); 4033 return (IWK_SUCCESS); 4034 } 4035 4036 /* 4037 * set up semphore flag to own EEPROM 4038 */ 4039 static int iwk_eep_sem_down(iwk_sc_t *sc) 4040 { 4041 int count1, count2; 4042 uint32_t tmp; 4043 4044 for (count1 = 0; count1 < 1000; count1++) { 4045 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 4046 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 4047 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM); 4048 4049 for (count2 = 0; count2 < 2; count2++) { 4050 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) & 4051 CSR_HW_IF_CONFIG_REG_EEP_SEM) 4052 return (IWK_SUCCESS); 4053 DELAY(10000); 4054 } 4055 } 4056 return (IWK_FAIL); 4057 } 4058 4059 /* 4060 * reset semphore flag to release EEPROM 4061 */ 4062 static void iwk_eep_sem_up(iwk_sc_t *sc) 4063 { 4064 uint32_t tmp; 4065 4066 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG); 4067 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG, 4068 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM)); 4069 } 4070 4071 /* 4072 * This function load all infomation in eeprom into iwk_eep 4073 * structure in iwk_sc_t structure 4074 */ 4075 static int iwk_eep_load(iwk_sc_t *sc) 4076 { 4077 int i, rr; 4078 uint32_t rv, tmp, eep_gp; 4079 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map); 4080 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map; 4081 4082 /* read eeprom gp register in CSR */ 4083 eep_gp = IWK_READ(sc, CSR_EEPROM_GP); 4084 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) == 4085 CSR_EEPROM_GP_BAD_SIGNATURE) { 4086 cmn_err(CE_WARN, "EEPROM not found\n"); 4087 return (IWK_FAIL); 4088 } 4089 4090 rr = iwk_eep_sem_down(sc); 4091 if (rr != 0) { 4092 cmn_err(CE_WARN, "failed to own EEPROM\n"); 4093 return (IWK_FAIL); 4094 } 4095 4096 for (addr = 0; addr < eep_sz; addr += 2) { 4097 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1); 4098 tmp = IWK_READ(sc, CSR_EEPROM_REG); 4099 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2)); 4100 4101 for (i = 0; i < 10; i++) { 4102 rv = IWK_READ(sc, CSR_EEPROM_REG); 4103 if (rv & 1) 4104 break; 4105 DELAY(10); 4106 } 4107 4108 if (!(rv & 1)) { 4109 cmn_err(CE_WARN, "time out when read EEPROM\n"); 4110 iwk_eep_sem_up(sc); 4111 return (IWK_FAIL); 4112 } 4113 4114 eep_p[addr/2] = LE_16(rv >> 16); 4115 } 4116 4117 iwk_eep_sem_up(sc); 4118 return (IWK_SUCCESS); 4119 } 4120 4121 /* 4122 * init mac address in ieee80211com_t struct 4123 */ 4124 static void iwk_get_mac_from_eep(iwk_sc_t *sc) 4125 { 4126 ieee80211com_t *ic = &sc->sc_ic; 4127 struct iwk_eep *ep = &sc->sc_eep_map; 4128 4129 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address); 4130 4131 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n", 4132 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2], 4133 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5])); 4134 } 4135 4136 static int 4137 iwk_init(iwk_sc_t *sc) 4138 { 4139 int qid, n, err; 4140 clock_t clk; 4141 uint32_t tmp; 4142 4143 mutex_enter(&sc->sc_glock); 4144 sc->sc_flags &= ~IWK_F_FW_INIT; 4145 4146 (void) iwk_preinit(sc); 4147 4148 tmp = IWK_READ(sc, CSR_GP_CNTRL); 4149 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) { 4150 cmn_err(CE_NOTE, "iwk_init(): Radio transmitter is off\n"); 4151 goto fail1; 4152 } 4153 4154 /* init Rx ring */ 4155 iwk_mac_access_enter(sc); 4156 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 4157 4158 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 4159 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 4160 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8); 4161 4162 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG, 4163 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address + 4164 offsetof(struct iwk_shared, val0)) >> 4)); 4165 4166 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 4167 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 4168 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 4169 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 4170 (RX_QUEUE_SIZE_LOG << 4171 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 4172 iwk_mac_access_exit(sc); 4173 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 4174 (RX_QUEUE_SIZE - 1) & ~0x7); 4175 4176 /* init Tx rings */ 4177 iwk_mac_access_enter(sc); 4178 iwk_reg_write(sc, SCD_TXFACT, 0); 4179 4180 /* keep warm page */ 4181 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG, 4182 sc->sc_dma_kw.cookie.dmac_address >> 4); 4183 4184 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) { 4185 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid), 4186 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8); 4187 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid), 4188 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 4189 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 4190 } 4191 iwk_mac_access_exit(sc); 4192 4193 /* clear "radio off" and "disable command" bits */ 4194 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 4195 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, 4196 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 4197 4198 /* clear any pending interrupts */ 4199 IWK_WRITE(sc, CSR_INT, 0xffffffff); 4200 4201 /* enable interrupts */ 4202 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK); 4203 4204 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 4205 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 4206 4207 /* 4208 * backup ucode data part for future use. 4209 */ 4210 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va, 4211 sc->sc_dma_fw_data.mem_va, 4212 sc->sc_dma_fw_data.alength); 4213 4214 for (n = 0; n < 2; n++) { 4215 /* load firmware init segment into NIC */ 4216 err = iwk_load_firmware(sc); 4217 if (err != IWK_SUCCESS) { 4218 cmn_err(CE_WARN, "iwk_init(): " 4219 "failed to setup boot firmware\n"); 4220 continue; 4221 } 4222 4223 /* now press "execute" start running */ 4224 IWK_WRITE(sc, CSR_RESET, 0); 4225 break; 4226 } 4227 if (n == 2) { 4228 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n"); 4229 goto fail1; 4230 } 4231 /* ..and wait at most one second for adapter to initialize */ 4232 clk = ddi_get_lbolt() + drv_usectohz(2000000); 4233 while (!(sc->sc_flags & IWK_F_FW_INIT)) { 4234 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0) 4235 break; 4236 } 4237 if (!(sc->sc_flags & IWK_F_FW_INIT)) { 4238 cmn_err(CE_WARN, 4239 "iwk_init(): timeout waiting for firmware init\n"); 4240 goto fail1; 4241 } 4242 4243 /* 4244 * at this point, the firmware is loaded OK, then config the hardware 4245 * with the ucode API, including rxon, txpower, etc. 4246 */ 4247 err = iwk_config(sc); 4248 if (err) { 4249 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n"); 4250 goto fail1; 4251 } 4252 4253 /* at this point, hardware may receive beacons :) */ 4254 mutex_exit(&sc->sc_glock); 4255 return (IWK_SUCCESS); 4256 4257 fail1: 4258 err = IWK_FAIL; 4259 mutex_exit(&sc->sc_glock); 4260 return (err); 4261 } 4262 4263 static void 4264 iwk_stop(iwk_sc_t *sc) 4265 { 4266 uint32_t tmp; 4267 int i; 4268 4269 if (!(sc->sc_flags & IWK_F_QUIESCED)) 4270 mutex_enter(&sc->sc_glock); 4271 4272 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 4273 /* disable interrupts */ 4274 IWK_WRITE(sc, CSR_INT_MASK, 0); 4275 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK); 4276 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff); 4277 4278 /* reset all Tx rings */ 4279 for (i = 0; i < IWK_NUM_QUEUES; i++) 4280 iwk_reset_tx_ring(sc, &sc->sc_txq[i]); 4281 4282 /* reset Rx ring */ 4283 iwk_reset_rx_ring(sc); 4284 4285 iwk_mac_access_enter(sc); 4286 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT); 4287 iwk_mac_access_exit(sc); 4288 4289 DELAY(5); 4290 4291 iwk_stop_master(sc); 4292 4293 sc->sc_tx_timer = 0; 4294 sc->sc_flags &= ~IWK_F_SCANNING; 4295 sc->sc_scan_pending = 0; 4296 4297 tmp = IWK_READ(sc, CSR_RESET); 4298 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET); 4299 4300 if (!(sc->sc_flags & IWK_F_QUIESCED)) 4301 mutex_exit(&sc->sc_glock); 4302 } 4303 4304 /* 4305 * Naive implementation of the Adaptive Multi Rate Retry algorithm: 4306 * "IEEE 802.11 Rate Adaptation: A Practical Approach" 4307 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti 4308 * INRIA Sophia - Projet Planete 4309 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html 4310 */ 4311 #define is_success(amrr) \ 4312 ((amrr)->retrycnt < (amrr)->txcnt / 10) 4313 #define is_failure(amrr) \ 4314 ((amrr)->retrycnt > (amrr)->txcnt / 3) 4315 #define is_enough(amrr) \ 4316 ((amrr)->txcnt > 100) 4317 #define is_min_rate(in) \ 4318 ((in)->in_txrate == 0) 4319 #define is_max_rate(in) \ 4320 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1) 4321 #define increase_rate(in) \ 4322 ((in)->in_txrate++) 4323 #define decrease_rate(in) \ 4324 ((in)->in_txrate--) 4325 #define reset_cnt(amrr) \ 4326 { (amrr)->txcnt = (amrr)->retrycnt = 0; } 4327 4328 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1 4329 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15 4330 4331 static void 4332 iwk_amrr_init(iwk_amrr_t *amrr) 4333 { 4334 amrr->success = 0; 4335 amrr->recovery = 0; 4336 amrr->txcnt = amrr->retrycnt = 0; 4337 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD; 4338 } 4339 4340 static void 4341 iwk_amrr_timeout(iwk_sc_t *sc) 4342 { 4343 ieee80211com_t *ic = &sc->sc_ic; 4344 4345 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n")); 4346 if (ic->ic_opmode == IEEE80211_M_STA) 4347 iwk_amrr_ratectl(NULL, ic->ic_bss); 4348 else 4349 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL); 4350 sc->sc_clk = ddi_get_lbolt(); 4351 } 4352 4353 /* ARGSUSED */ 4354 static void 4355 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in) 4356 { 4357 iwk_amrr_t *amrr = (iwk_amrr_t *)in; 4358 int need_change = 0; 4359 4360 if (is_success(amrr) && is_enough(amrr)) { 4361 amrr->success++; 4362 if (amrr->success >= amrr->success_threshold && 4363 !is_max_rate(in)) { 4364 amrr->recovery = 1; 4365 amrr->success = 0; 4366 increase_rate(in); 4367 IWK_DBG((IWK_DEBUG_RATECTL, 4368 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n", 4369 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4370 need_change = 1; 4371 } else { 4372 amrr->recovery = 0; 4373 } 4374 } else if (is_failure(amrr)) { 4375 amrr->success = 0; 4376 if (!is_min_rate(in)) { 4377 if (amrr->recovery) { 4378 amrr->success_threshold++; 4379 if (amrr->success_threshold > 4380 IWK_AMRR_MAX_SUCCESS_THRESHOLD) 4381 amrr->success_threshold = 4382 IWK_AMRR_MAX_SUCCESS_THRESHOLD; 4383 } else { 4384 amrr->success_threshold = 4385 IWK_AMRR_MIN_SUCCESS_THRESHOLD; 4386 } 4387 decrease_rate(in); 4388 IWK_DBG((IWK_DEBUG_RATECTL, 4389 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n", 4390 in->in_txrate, amrr->txcnt, amrr->retrycnt)); 4391 need_change = 1; 4392 } 4393 amrr->recovery = 0; /* paper is incorrect */ 4394 } 4395 4396 if (is_enough(amrr) || need_change) 4397 reset_cnt(amrr); 4398 } 4399 4400 /* 4401 * calculate 4965 chipset's kelvin temperature according to 4402 * the data of init alive and satistics notification. 4403 * The details is described in iwk_calibration.h file 4404 */ 4405 static int32_t iwk_curr_tempera(iwk_sc_t *sc) 4406 { 4407 int32_t tempera; 4408 int32_t r1, r2, r3; 4409 uint32_t r4_u; 4410 int32_t r4_s; 4411 4412 if (iwk_is_fat_channel(sc)) { 4413 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[1]); 4414 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[1]); 4415 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[1]); 4416 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[1]); 4417 } else { 4418 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[0]); 4419 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[0]); 4420 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[0]); 4421 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[0]); 4422 } 4423 4424 if (sc->sc_flags & IWK_F_STATISTICS) { 4425 r4_s = (int32_t)(LE_32(sc->sc_statistics.general.temperature) << 4426 (31-23)) >> (31-23); 4427 } else { 4428 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23); 4429 } 4430 4431 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n", 4432 r1, r2, r3, r4_s)); 4433 4434 if (r3 == r1) { 4435 cmn_err(CE_WARN, "iwk_curr_tempera(): " 4436 "failed to calculate temperature" 4437 "because r3 = r1\n"); 4438 return (DDI_FAILURE); 4439 } 4440 4441 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2); 4442 tempera /= (r3 - r1); 4443 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; 4444 4445 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n", 4446 tempera, KELVIN_TO_CELSIUS(tempera))); 4447 4448 return (tempera); 4449 } 4450 4451 /* Determine whether 4965 is using 2.4 GHz band */ 4452 static inline int iwk_is_24G_band(iwk_sc_t *sc) 4453 { 4454 return (LE_32(sc->sc_config.flags) & RXON_FLG_BAND_24G_MSK); 4455 } 4456 4457 /* Determine whether 4965 is using fat channel */ 4458 static inline int iwk_is_fat_channel(iwk_sc_t *sc) 4459 { 4460 return ((LE_32(sc->sc_config.flags) & 4461 RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 4462 (LE_32(sc->sc_config.flags) & RXON_FLG_CHANNEL_MODE_MIXED_MSK)); 4463 } 4464 4465 /* 4466 * In MIMO mode, determine which group 4965's current channel belong to. 4467 * For more infomation about "channel group", 4468 * please refer to iwk_calibration.h file 4469 */ 4470 static int iwk_txpower_grp(uint16_t channel) 4471 { 4472 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH && 4473 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) { 4474 return (CALIB_CH_GROUP_5); 4475 } 4476 4477 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH && 4478 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) { 4479 return (CALIB_CH_GROUP_1); 4480 } 4481 4482 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH && 4483 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) { 4484 return (CALIB_CH_GROUP_2); 4485 } 4486 4487 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH && 4488 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) { 4489 return (CALIB_CH_GROUP_3); 4490 } 4491 4492 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH && 4493 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) { 4494 return (CALIB_CH_GROUP_4); 4495 } 4496 4497 cmn_err(CE_WARN, "iwk_txpower_grp(): " 4498 "can't find txpower group for channel %d.\n", channel); 4499 4500 return (DDI_FAILURE); 4501 } 4502 4503 /* 2.4 GHz */ 4504 static uint16_t iwk_eep_band_1[14] = { 4505 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 4506 }; 4507 4508 /* 5.2 GHz bands */ 4509 static uint16_t iwk_eep_band_2[13] = { 4510 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 4511 }; 4512 4513 static uint16_t iwk_eep_band_3[12] = { 4514 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 4515 }; 4516 4517 static uint16_t iwk_eep_band_4[11] = { 4518 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 4519 }; 4520 4521 static uint16_t iwk_eep_band_5[6] = { 4522 145, 149, 153, 157, 161, 165 4523 }; 4524 4525 static uint16_t iwk_eep_band_6[7] = { 4526 1, 2, 3, 4, 5, 6, 7 4527 }; 4528 4529 static uint16_t iwk_eep_band_7[11] = { 4530 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 4531 }; 4532 4533 /* Get regulatory data from eeprom for a given channel */ 4534 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc, 4535 uint16_t channel, 4536 int is_24G, int is_fat, int is_hi_chan) 4537 { 4538 int32_t i; 4539 uint16_t chan; 4540 4541 if (is_fat) { /* 11n mode */ 4542 4543 if (is_hi_chan) { 4544 chan = channel - 4; 4545 } else { 4546 chan = channel; 4547 } 4548 4549 for (i = 0; i < 7; i++) { 4550 if (iwk_eep_band_6[i] == chan) { 4551 return (&sc->sc_eep_map.band_24_channels[i]); 4552 } 4553 } 4554 for (i = 0; i < 11; i++) { 4555 if (iwk_eep_band_7[i] == chan) { 4556 return (&sc->sc_eep_map.band_52_channels[i]); 4557 } 4558 } 4559 } else if (is_24G) { /* 2.4 GHz band */ 4560 for (i = 0; i < 14; i++) { 4561 if (iwk_eep_band_1[i] == channel) { 4562 return (&sc->sc_eep_map.band_1_channels[i]); 4563 } 4564 } 4565 } else { /* 5 GHz band */ 4566 for (i = 0; i < 13; i++) { 4567 if (iwk_eep_band_2[i] == channel) { 4568 return (&sc->sc_eep_map.band_2_channels[i]); 4569 } 4570 } 4571 for (i = 0; i < 12; i++) { 4572 if (iwk_eep_band_3[i] == channel) { 4573 return (&sc->sc_eep_map.band_3_channels[i]); 4574 } 4575 } 4576 for (i = 0; i < 11; i++) { 4577 if (iwk_eep_band_4[i] == channel) { 4578 return (&sc->sc_eep_map.band_4_channels[i]); 4579 } 4580 } 4581 for (i = 0; i < 6; i++) { 4582 if (iwk_eep_band_5[i] == channel) { 4583 return (&sc->sc_eep_map.band_5_channels[i]); 4584 } 4585 } 4586 } 4587 4588 return (NULL); 4589 } 4590 4591 /* 4592 * Determine which subband a given channel belongs 4593 * to in 2.4 GHz or 5 GHz band 4594 */ 4595 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel) 4596 { 4597 int32_t b_n = -1; 4598 4599 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) { 4600 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) { 4601 continue; 4602 } 4603 4604 if ((channel >= 4605 (uint16_t)sc->sc_eep_map.calib_info. 4606 band_info_tbl[b_n].ch_from) && 4607 (channel <= 4608 (uint16_t)sc->sc_eep_map.calib_info. 4609 band_info_tbl[b_n].ch_to)) { 4610 break; 4611 } 4612 } 4613 4614 return (b_n); 4615 } 4616 4617 /* Make a special division for interpolation operation */ 4618 static int iwk_division(int32_t num, int32_t denom, int32_t *res) 4619 { 4620 int32_t sign = 1; 4621 4622 if (num < 0) { 4623 sign = -sign; 4624 num = -num; 4625 } 4626 4627 if (denom < 0) { 4628 sign = -sign; 4629 denom = -denom; 4630 } 4631 4632 *res = ((num*2 + denom) / (denom*2)) * sign; 4633 4634 return (IWK_SUCCESS); 4635 } 4636 4637 /* Make interpolation operation */ 4638 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1, 4639 int32_t x2, int32_t y2) 4640 { 4641 int32_t val; 4642 4643 if (x2 == x1) { 4644 return (y1); 4645 } else { 4646 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val); 4647 return (val + y2); 4648 } 4649 } 4650 4651 /* Get interpolation measurement data of a given channel for all chains. */ 4652 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel, 4653 struct iwk_eep_calib_channel_info *chan_info) 4654 { 4655 int32_t ban_n; 4656 uint32_t ch1_n, ch2_n; 4657 int32_t c, m; 4658 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p; 4659 4660 /* determine subband number */ 4661 ban_n = iwk_band_number(sc, channel); 4662 if (ban_n >= EEP_TX_POWER_BANDS) { 4663 return (DDI_FAILURE); 4664 } 4665 4666 ch1_n = 4667 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num; 4668 ch2_n = 4669 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num; 4670 4671 chan_info->ch_num = (uint8_t)channel; /* given channel number */ 4672 4673 /* 4674 * go through all chains on chipset 4675 */ 4676 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) { 4677 /* 4678 * go through all factory measurements 4679 */ 4680 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) { 4681 m1_p = 4682 &(sc->sc_eep_map.calib_info. 4683 band_info_tbl[ban_n].ch1.measure[c][m]); 4684 m2_p = 4685 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n]. 4686 ch2.measure[c][m]); 4687 m_p = &(chan_info->measure[c][m]); 4688 4689 /* 4690 * make interpolation to get actual 4691 * Tx power for given channel 4692 */ 4693 m_p->actual_pow = iwk_interpolate_value(channel, 4694 ch1_n, m1_p->actual_pow, 4695 ch2_n, m2_p->actual_pow); 4696 4697 /* make interpolation to get index into gain table */ 4698 m_p->gain_idx = iwk_interpolate_value(channel, 4699 ch1_n, m1_p->gain_idx, 4700 ch2_n, m2_p->gain_idx); 4701 4702 /* make interpolation to get chipset temperature */ 4703 m_p->temperature = iwk_interpolate_value(channel, 4704 ch1_n, m1_p->temperature, 4705 ch2_n, m2_p->temperature); 4706 4707 /* 4708 * make interpolation to get power 4709 * amp detector level 4710 */ 4711 m_p->pa_det = iwk_interpolate_value(channel, ch1_n, 4712 m1_p->pa_det, 4713 ch2_n, m2_p->pa_det); 4714 } 4715 } 4716 4717 return (IWK_SUCCESS); 4718 } 4719 4720 /* 4721 * Calculate voltage compensation for Tx power. For more infomation, 4722 * please refer to iwk_calibration.h file 4723 */ 4724 static int32_t iwk_voltage_compensation(int32_t eep_voltage, 4725 int32_t curr_voltage) 4726 { 4727 int32_t vol_comp = 0; 4728 4729 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) || 4730 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) { 4731 return (vol_comp); 4732 } 4733 4734 (void) iwk_division(curr_voltage-eep_voltage, 4735 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp); 4736 4737 if (curr_voltage > eep_voltage) { 4738 vol_comp *= 2; 4739 } 4740 if ((vol_comp < -2) || (vol_comp > 2)) { 4741 vol_comp = 0; 4742 } 4743 4744 return (vol_comp); 4745 } 4746 4747 /* 4748 * Thermal compensation values for txpower for various frequency ranges ... 4749 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust 4750 */ 4751 static struct iwk_txpower_tempera_comp { 4752 int32_t degrees_per_05db_a; 4753 int32_t degrees_per_05db_a_denom; 4754 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = { 4755 {9, 2}, /* group 0 5.2, ch 34-43 */ 4756 {4, 1}, /* group 1 5.2, ch 44-70 */ 4757 {4, 1}, /* group 2 5.2, ch 71-124 */ 4758 {4, 1}, /* group 3 5.2, ch 125-200 */ 4759 {3, 1} /* group 4 2.4, ch all */ 4760 }; 4761 4762 /* 4763 * bit-rate-dependent table to prevent Tx distortion, in half-dB units, 4764 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. 4765 */ 4766 static int32_t back_off_table[] = { 4767 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ 4768 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ 4769 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ 4770 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ 4771 10 /* CCK */ 4772 }; 4773 4774 /* determine minimum Tx power index in gain table */ 4775 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G) 4776 { 4777 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) { 4778 return (MIN_TX_GAIN_INDEX_52GHZ_EXT); 4779 } 4780 4781 return (MIN_TX_GAIN_INDEX); 4782 } 4783 4784 /* 4785 * Determine DSP and radio gain according to temperature and other factors. 4786 * This function is the majority of Tx power calibration 4787 */ 4788 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc, 4789 struct iwk_tx_power_db *tp_db) 4790 { 4791 int is_24G, is_fat, is_high_chan, is_mimo; 4792 int c, r; 4793 int32_t target_power; 4794 int32_t tx_grp = CALIB_CH_GROUP_MAX; 4795 uint16_t channel; 4796 uint8_t saturation_power; 4797 int32_t regu_power; 4798 int32_t curr_regu_power; 4799 struct iwk_eep_channel *eep_chan_p; 4800 struct iwk_eep_calib_channel_info eep_chan_calib; 4801 int32_t eep_voltage, init_voltage; 4802 int32_t voltage_compensation; 4803 int32_t temperature; 4804 int32_t degrees_per_05db_num; 4805 int32_t degrees_per_05db_denom; 4806 struct iwk_eep_calib_measure *measure_p; 4807 int32_t interpo_temp; 4808 int32_t power_limit; 4809 int32_t atten_value; 4810 int32_t tempera_comp[2]; 4811 int32_t interpo_gain_idx[2]; 4812 int32_t interpo_actual_pow[2]; 4813 union iwk_tx_power_dual_stream txpower_gains; 4814 int32_t txpower_gains_idx; 4815 4816 channel = LE_16(sc->sc_config.chan); 4817 4818 /* 2.4 GHz or 5 GHz band */ 4819 is_24G = iwk_is_24G_band(sc); 4820 4821 /* fat channel or not */ 4822 is_fat = iwk_is_fat_channel(sc); 4823 4824 /* 4825 * using low half channel number or high half channel number 4826 * identify fat channel 4827 */ 4828 if (is_fat && (LE_32(sc->sc_config.flags) & 4829 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) { 4830 is_high_chan = 1; 4831 } 4832 4833 if ((channel > 0) && (channel < 200)) { 4834 /* get regulatory channel data from eeprom */ 4835 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G, 4836 is_fat, is_high_chan); 4837 if (NULL == eep_chan_p) { 4838 cmn_err(CE_WARN, 4839 "iwk_txpower_table_cmd_init(): " 4840 "can't get channel infomation\n"); 4841 return (DDI_FAILURE); 4842 } 4843 } else { 4844 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4845 "channel(%d) isn't in proper range\n", 4846 channel); 4847 return (DDI_FAILURE); 4848 } 4849 4850 /* initial value of Tx power */ 4851 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg; 4852 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) { 4853 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4854 "user TX power is too weak\n"); 4855 return (DDI_FAILURE); 4856 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) { 4857 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): " 4858 "user TX power is too strong\n"); 4859 return (DDI_FAILURE); 4860 } 4861 4862 target_power = 2 * sc->sc_user_txpower; 4863 4864 /* determine which group current channel belongs to */ 4865 tx_grp = iwk_txpower_grp(channel); 4866 if (tx_grp < 0) { 4867 return (tx_grp); 4868 } 4869 4870 4871 if (is_fat) { 4872 if (is_high_chan) { 4873 channel -= 2; 4874 } else { 4875 channel += 2; 4876 } 4877 } 4878 4879 /* determine saturation power */ 4880 if (is_24G) { 4881 saturation_power = 4882 sc->sc_eep_map.calib_info.saturation_power24; 4883 } else { 4884 saturation_power = 4885 sc->sc_eep_map.calib_info.saturation_power52; 4886 } 4887 4888 if (saturation_power < IWK_TX_POWER_SATURATION_MIN || 4889 saturation_power > IWK_TX_POWER_SATURATION_MAX) { 4890 if (is_24G) { 4891 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24; 4892 } else { 4893 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52; 4894 } 4895 } 4896 4897 /* determine regulatory power */ 4898 regu_power = (int32_t)eep_chan_p->max_power_avg * 2; 4899 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) || 4900 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) { 4901 if (is_24G) { 4902 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24; 4903 } else { 4904 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52; 4905 } 4906 } 4907 4908 /* 4909 * get measurement data for current channel 4910 * suach as temperature,index to gain table,actual Tx power 4911 */ 4912 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib); 4913 4914 eep_voltage = (int32_t)LE_16(sc->sc_eep_map.calib_info.voltage); 4915 init_voltage = (int32_t)LE_32(sc->sc_card_alive_init.voltage); 4916 4917 /* calculate voltage compensation to Tx power */ 4918 voltage_compensation = 4919 iwk_voltage_compensation(eep_voltage, init_voltage); 4920 4921 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) { 4922 temperature = sc->sc_tempera; 4923 } else { 4924 temperature = IWK_TX_POWER_TEMPERATURE_MIN; 4925 } 4926 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) { 4927 temperature = sc->sc_tempera; 4928 } else { 4929 temperature = IWK_TX_POWER_TEMPERATURE_MAX; 4930 } 4931 temperature = KELVIN_TO_CELSIUS(temperature); 4932 4933 degrees_per_05db_num = 4934 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a; 4935 degrees_per_05db_denom = 4936 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom; 4937 4938 for (c = 0; c < 2; c++) { /* go through all chains */ 4939 measure_p = &eep_chan_calib.measure[c][1]; 4940 interpo_temp = measure_p->temperature; 4941 4942 /* determine temperature compensation to Tx power */ 4943 (void) iwk_division( 4944 (temperature-interpo_temp)*degrees_per_05db_denom, 4945 degrees_per_05db_num, &tempera_comp[c]); 4946 4947 interpo_gain_idx[c] = measure_p->gain_idx; 4948 interpo_actual_pow[c] = measure_p->actual_pow; 4949 } 4950 4951 /* 4952 * go through all rate entries in Tx power table 4953 */ 4954 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) { 4955 if (r & 0x8) { 4956 /* need to lower regulatory power for MIMO mode */ 4957 curr_regu_power = regu_power - 4958 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION; 4959 is_mimo = 1; 4960 } else { 4961 curr_regu_power = regu_power; 4962 is_mimo = 0; 4963 } 4964 4965 power_limit = saturation_power - back_off_table[r]; 4966 if (power_limit > curr_regu_power) { 4967 /* final Tx power limit */ 4968 power_limit = curr_regu_power; 4969 } 4970 4971 if (target_power > power_limit) { 4972 target_power = power_limit; /* final target Tx power */ 4973 } 4974 4975 for (c = 0; c < 2; c++) { /* go through all Tx chains */ 4976 if (is_mimo) { 4977 atten_value = 4978 LE_32(sc->sc_card_alive_init. 4979 tx_atten[tx_grp][c]); 4980 } else { 4981 atten_value = 0; 4982 } 4983 4984 /* 4985 * calculate index in gain table 4986 * this step is very important 4987 */ 4988 txpower_gains_idx = interpo_gain_idx[c] - 4989 (target_power - interpo_actual_pow[c]) - 4990 tempera_comp[c] - voltage_compensation + 4991 atten_value; 4992 4993 if (txpower_gains_idx < 4994 iwk_min_power_index(r, is_24G)) { 4995 txpower_gains_idx = 4996 iwk_min_power_index(r, is_24G); 4997 } 4998 4999 if (!is_24G) { 5000 /* 5001 * support negative index for 5 GHz 5002 * band 5003 */ 5004 txpower_gains_idx += 9; 5005 } 5006 5007 if (POWER_TABLE_CCK_ENTRY == r) { 5008 /* for CCK mode, make necessary attenuaton */ 5009 txpower_gains_idx += 5010 IWK_TX_POWER_CCK_COMPENSATION_C_STEP; 5011 } 5012 5013 if (txpower_gains_idx > 107) { 5014 txpower_gains_idx = 107; 5015 } else if (txpower_gains_idx < 0) { 5016 txpower_gains_idx = 0; 5017 } 5018 5019 /* search DSP and radio gains in gain table */ 5020 txpower_gains.s.radio_tx_gain[c] = 5021 gains_table[is_24G][txpower_gains_idx].radio; 5022 txpower_gains.s.dsp_predis_atten[c] = 5023 gains_table[is_24G][txpower_gains_idx].dsp; 5024 5025 IWK_DBG((IWK_DEBUG_CALIBRATION, 5026 "rate_index: %d, " 5027 "gain_index %d, c: %d,is_mimo: %d\n", 5028 r, txpower_gains_idx, c, is_mimo)); 5029 } 5030 5031 /* initialize Tx power table */ 5032 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) { 5033 tp_db->ht_ofdm_power[r].dw = LE_32(txpower_gains.dw); 5034 } else { 5035 tp_db->legacy_cck_power.dw = LE_32(txpower_gains.dw); 5036 } 5037 } 5038 5039 return (IWK_SUCCESS); 5040 } 5041 5042 /* 5043 * make Tx power calibration to adjust Tx power. 5044 * This is completed by sending out Tx power table command. 5045 */ 5046 static int iwk_tx_power_calibration(iwk_sc_t *sc) 5047 { 5048 iwk_tx_power_table_cmd_t cmd; 5049 int rv; 5050 5051 if (sc->sc_flags & IWK_F_SCANNING) { 5052 return (IWK_SUCCESS); 5053 } 5054 5055 /* necessary initialization to Tx power table command */ 5056 cmd.band = (uint8_t)iwk_is_24G_band(sc); 5057 cmd.channel = sc->sc_config.chan; 5058 cmd.channel_normal_width = 0; 5059 5060 /* initialize Tx power table */ 5061 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power); 5062 if (rv) { 5063 cmn_err(CE_NOTE, "rv= %d\n", rv); 5064 return (rv); 5065 } 5066 5067 /* send out Tx power table command */ 5068 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1); 5069 if (rv) { 5070 return (rv); 5071 } 5072 5073 /* record current temperature */ 5074 sc->sc_last_tempera = sc->sc_tempera; 5075 5076 return (IWK_SUCCESS); 5077 } 5078 5079 /* This function is the handler of statistics notification from uCode */ 5080 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc) 5081 { 5082 int is_diff; 5083 struct iwk_notif_statistics *statistics_p = 5084 (struct iwk_notif_statistics *)(desc + 1); 5085 5086 mutex_enter(&sc->sc_glock); 5087 5088 is_diff = (sc->sc_statistics.general.temperature != 5089 statistics_p->general.temperature) || 5090 (LE_32(sc->sc_statistics.flag) & 5091 STATISTICS_REPLY_FLG_FAT_MODE_MSK) != 5092 (LE_32(statistics_p->flag) & STATISTICS_REPLY_FLG_FAT_MODE_MSK); 5093 5094 /* update statistics data */ 5095 (void) memcpy(&sc->sc_statistics, statistics_p, 5096 sizeof (struct iwk_notif_statistics)); 5097 5098 sc->sc_flags |= IWK_F_STATISTICS; 5099 5100 if (!(sc->sc_flags & IWK_F_SCANNING)) { 5101 /* make Receiver gain balance calibration */ 5102 (void) iwk_rxgain_diff(sc); 5103 5104 /* make Receiver sensitivity calibration */ 5105 (void) iwk_rx_sens(sc); 5106 } 5107 5108 5109 if (!is_diff) { 5110 mutex_exit(&sc->sc_glock); 5111 return; 5112 } 5113 5114 /* calibration current temperature of 4965 chipset */ 5115 sc->sc_tempera = iwk_curr_tempera(sc); 5116 5117 /* distinct temperature change will trigger Tx power calibration */ 5118 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) || 5119 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) { 5120 /* make Tx power calibration */ 5121 (void) iwk_tx_power_calibration(sc); 5122 } 5123 5124 mutex_exit(&sc->sc_glock); 5125 } 5126 5127 /* Determine this station is in associated state or not */ 5128 static int iwk_is_associated(iwk_sc_t *sc) 5129 { 5130 return (LE_32(sc->sc_config.filter_flags) & RXON_FILTER_ASSOC_MSK); 5131 } 5132 5133 /* Make necessary preparation for Receiver gain balance calibration */ 5134 static int iwk_rxgain_diff_init(iwk_sc_t *sc) 5135 { 5136 int i, rv; 5137 struct iwk_calibration_cmd cmd; 5138 struct iwk_rx_gain_diff *gain_diff_p; 5139 5140 gain_diff_p = &sc->sc_rxgain_diff; 5141 5142 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff)); 5143 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd)); 5144 5145 for (i = 0; i < RX_CHAINS_NUM; i++) { 5146 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL; 5147 } 5148 5149 if (iwk_is_associated(sc)) { 5150 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 5151 cmd.diff_gain_a = 0; 5152 cmd.diff_gain_b = 0; 5153 cmd.diff_gain_c = 0; 5154 5155 /* assume the gains of every Rx chains is balanceable */ 5156 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd, 5157 sizeof (cmd), 1); 5158 if (rv) { 5159 return (rv); 5160 } 5161 5162 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE; 5163 } 5164 5165 return (IWK_SUCCESS); 5166 } 5167 5168 /* 5169 * make Receiver gain balance to balance Rx gain between Rx chains 5170 * and determine which chain is disconnected 5171 */ 5172 static int iwk_rxgain_diff(iwk_sc_t *sc) 5173 { 5174 int i, is_24G, rv; 5175 int max_beacon_chain_n; 5176 int min_noise_chain_n; 5177 uint16_t channel_n; 5178 int32_t beacon_diff; 5179 int32_t noise_diff; 5180 uint32_t noise_chain_a, noise_chain_b, noise_chain_c; 5181 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c; 5182 struct iwk_calibration_cmd cmd; 5183 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 5184 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF}; 5185 struct statistics_rx_non_phy *rx_general_p = 5186 &sc->sc_statistics.rx.general; 5187 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff; 5188 5189 if (INTERFERENCE_DATA_AVAILABLE != 5190 LE_32(rx_general_p->interference_data_flag)) { 5191 return (IWK_SUCCESS); 5192 } 5193 5194 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) { 5195 return (IWK_SUCCESS); 5196 } 5197 5198 is_24G = iwk_is_24G_band(sc); 5199 channel_n = sc->sc_config.chan; /* channel number */ 5200 5201 if ((channel_n != (LE_32(sc->sc_statistics.flag) >> 16)) || 5202 ((STATISTICS_REPLY_FLG_BAND_24G_MSK == 5203 (LE_32(sc->sc_statistics.flag) & 5204 STATISTICS_REPLY_FLG_BAND_24G_MSK)) && 5205 !is_24G)) { 5206 return (IWK_SUCCESS); 5207 } 5208 5209 /* Rx chain's noise strength from statistics notification */ 5210 noise_chain_a = LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF; 5211 noise_chain_b = LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF; 5212 noise_chain_c = LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF; 5213 5214 /* Rx chain's beacon strength from statistics notification */ 5215 beacon_chain_a = LE_32(rx_general_p->beacon_rssi_a) & 0xFF; 5216 beacon_chain_b = LE_32(rx_general_p->beacon_rssi_b) & 0xFF; 5217 beacon_chain_c = LE_32(rx_general_p->beacon_rssi_c) & 0xFF; 5218 5219 gain_diff_p->beacon_count++; 5220 5221 /* accumulate chain's noise strength */ 5222 gain_diff_p->noise_stren_a += noise_chain_a; 5223 gain_diff_p->noise_stren_b += noise_chain_b; 5224 gain_diff_p->noise_stren_c += noise_chain_c; 5225 5226 /* accumulate chain's beacon strength */ 5227 gain_diff_p->beacon_stren_a += beacon_chain_a; 5228 gain_diff_p->beacon_stren_b += beacon_chain_b; 5229 gain_diff_p->beacon_stren_c += beacon_chain_c; 5230 5231 if (BEACON_NUM_20 == gain_diff_p->beacon_count) { 5232 /* calculate average beacon strength */ 5233 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20; 5234 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20; 5235 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20; 5236 5237 /* calculate average noise strength */ 5238 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20; 5239 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 5240 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20; 5241 5242 /* determine maximum beacon strength among 3 chains */ 5243 if ((beacon_aver[0] >= beacon_aver[1]) && 5244 (beacon_aver[0] >= beacon_aver[2])) { 5245 max_beacon_chain_n = 0; 5246 gain_diff_p->connected_chains = 1 << 0; 5247 } else if (beacon_aver[1] >= beacon_aver[2]) { 5248 max_beacon_chain_n = 1; 5249 gain_diff_p->connected_chains = 1 << 1; 5250 } else { 5251 max_beacon_chain_n = 2; 5252 gain_diff_p->connected_chains = 1 << 2; 5253 } 5254 5255 /* determine which chain is disconnected */ 5256 for (i = 0; i < RX_CHAINS_NUM; i++) { 5257 if (i != max_beacon_chain_n) { 5258 beacon_diff = beacon_aver[max_beacon_chain_n] - 5259 beacon_aver[i]; 5260 if (beacon_diff > MAX_ALLOWED_DIFF) { 5261 gain_diff_p->disconnect_chain[i] = 1; 5262 } else { 5263 gain_diff_p->connected_chains |= 5264 (1 << i); 5265 } 5266 } 5267 } 5268 5269 /* 5270 * if chain A and B are both disconnected, 5271 * assume the stronger in beacon strength is connected 5272 */ 5273 if (gain_diff_p->disconnect_chain[0] && 5274 gain_diff_p->disconnect_chain[1]) { 5275 if (beacon_aver[0] >= beacon_aver[1]) { 5276 gain_diff_p->disconnect_chain[0] = 0; 5277 gain_diff_p->connected_chains |= (1 << 0); 5278 } else { 5279 gain_diff_p->disconnect_chain[1] = 0; 5280 gain_diff_p->connected_chains |= (1 << 1); 5281 } 5282 } 5283 5284 /* determine minimum noise strength among 3 chains */ 5285 if (!gain_diff_p->disconnect_chain[0]) { 5286 min_noise_chain_n = 0; 5287 5288 for (i = 0; i < RX_CHAINS_NUM; i++) { 5289 if (!gain_diff_p->disconnect_chain[i] && 5290 (noise_aver[i] <= 5291 noise_aver[min_noise_chain_n])) { 5292 min_noise_chain_n = i; 5293 } 5294 5295 } 5296 } else { 5297 min_noise_chain_n = 1; 5298 5299 for (i = 0; i < RX_CHAINS_NUM; i++) { 5300 if (!gain_diff_p->disconnect_chain[i] && 5301 (noise_aver[i] <= 5302 noise_aver[min_noise_chain_n])) { 5303 min_noise_chain_n = i; 5304 } 5305 } 5306 } 5307 5308 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0; 5309 5310 /* determine gain difference between chains */ 5311 for (i = 0; i < RX_CHAINS_NUM; i++) { 5312 if (!gain_diff_p->disconnect_chain[i] && 5313 (CHAIN_GAIN_DIFF_INIT_VAL == 5314 gain_diff_p->gain_diff_chain[i])) { 5315 5316 noise_diff = noise_aver[i] - 5317 noise_aver[min_noise_chain_n]; 5318 gain_diff_p->gain_diff_chain[i] = 5319 (uint8_t)((noise_diff * 10) / 15); 5320 5321 if (gain_diff_p->gain_diff_chain[i] > 3) { 5322 gain_diff_p->gain_diff_chain[i] = 3; 5323 } 5324 5325 gain_diff_p->gain_diff_chain[i] |= (1 << 2); 5326 } else { 5327 gain_diff_p->gain_diff_chain[i] = 0; 5328 } 5329 } 5330 5331 if (!gain_diff_p->gain_diff_send) { 5332 gain_diff_p->gain_diff_send = 1; 5333 5334 (void) memset(&cmd, 0, sizeof (cmd)); 5335 5336 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 5337 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0]; 5338 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1]; 5339 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2]; 5340 5341 /* 5342 * send out PHY calibration command to 5343 * adjust every chain's Rx gain 5344 */ 5345 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, 5346 &cmd, sizeof (cmd), 1); 5347 if (rv) { 5348 return (rv); 5349 } 5350 5351 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED; 5352 } 5353 5354 gain_diff_p->beacon_stren_a = 0; 5355 gain_diff_p->beacon_stren_b = 0; 5356 gain_diff_p->beacon_stren_c = 0; 5357 5358 gain_diff_p->noise_stren_a = 0; 5359 gain_diff_p->noise_stren_b = 0; 5360 gain_diff_p->noise_stren_c = 0; 5361 } 5362 5363 return (IWK_SUCCESS); 5364 } 5365 5366 /* Make necessary preparation for Receiver sensitivity calibration */ 5367 static int iwk_rx_sens_init(iwk_sc_t *sc) 5368 { 5369 int i, rv; 5370 struct iwk_rx_sensitivity_cmd cmd; 5371 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5372 5373 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd)); 5374 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity)); 5375 5376 rx_sens_p->auto_corr_ofdm_x4 = 90; 5377 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170; 5378 rx_sens_p->auto_corr_ofdm_x1 = 105; 5379 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220; 5380 5381 rx_sens_p->auto_corr_cck_x4 = 125; 5382 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5383 rx_sens_p->min_energy_det_cck = 100; 5384 5385 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK); 5386 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5387 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5388 5389 rx_sens_p->last_bad_plcp_cnt_ofdm = 0; 5390 rx_sens_p->last_false_alarm_cnt_ofdm = 0; 5391 rx_sens_p->last_bad_plcp_cnt_cck = 0; 5392 rx_sens_p->last_false_alarm_cnt_cck = 0; 5393 5394 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5395 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM; 5396 rx_sens_p->cck_no_false_alarm_num = 0; 5397 rx_sens_p->cck_beacon_idx = 0; 5398 5399 for (i = 0; i < 10; i++) { 5400 rx_sens_p->cck_beacon_min[i] = 0; 5401 } 5402 5403 rx_sens_p->cck_noise_idx = 0; 5404 rx_sens_p->cck_noise_ref = 0; 5405 5406 for (i = 0; i < 20; i++) { 5407 rx_sens_p->cck_noise_max[i] = 0; 5408 } 5409 5410 rx_sens_p->cck_noise_diff = 0; 5411 rx_sens_p->cck_no_false_alarm_num = 0; 5412 5413 cmd.control = LE_16(IWK_SENSITIVITY_CONTROL_WORK_TABLE); 5414 5415 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5416 LE_16(rx_sens_p->auto_corr_ofdm_x4); 5417 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5418 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x4); 5419 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5420 LE_16(rx_sens_p->auto_corr_ofdm_x1); 5421 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5422 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x1); 5423 5424 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5425 LE_16(rx_sens_p->auto_corr_cck_x4); 5426 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5427 LE_16(rx_sens_p->auto_corr_mrc_cck_x4); 5428 cmd.table[MIN_ENERGY_CCK_DET_IDX] = 5429 LE_16(rx_sens_p->min_energy_det_cck); 5430 5431 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = LE_16(100); 5432 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = LE_16(190); 5433 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = LE_16(390); 5434 cmd.table[PTAM_ENERGY_TH_IDX] = LE_16(62); 5435 5436 /* at first, set up Rx to maximum sensitivity */ 5437 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5438 if (rv) { 5439 cmn_err(CE_WARN, "iwk_rx_sens_init(): " 5440 "in the process of initialization, " 5441 "failed to send rx sensitivity command\n"); 5442 return (rv); 5443 } 5444 5445 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK; 5446 5447 return (IWK_SUCCESS); 5448 } 5449 5450 /* 5451 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity. 5452 * for more infomation, please refer to iwk_calibration.h file 5453 */ 5454 static int iwk_rx_sens(iwk_sc_t *sc) 5455 { 5456 int rv; 5457 uint32_t actual_rx_time; 5458 struct statistics_rx_non_phy *rx_general_p = 5459 &sc->sc_statistics.rx.general; 5460 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5461 struct iwk_rx_sensitivity_cmd cmd; 5462 5463 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) { 5464 cmn_err(CE_WARN, "iwk_rx_sens(): " 5465 "sensitivity initialization has not finished.\n"); 5466 return (DDI_FAILURE); 5467 } 5468 5469 if (INTERFERENCE_DATA_AVAILABLE != 5470 LE_32(rx_general_p->interference_data_flag)) { 5471 cmn_err(CE_WARN, "iwk_rx_sens(): " 5472 "can't make rx sensitivity calibration," 5473 "because of invalid statistics\n"); 5474 return (DDI_FAILURE); 5475 } 5476 5477 actual_rx_time = LE_32(rx_general_p->channel_load); 5478 if (!actual_rx_time) { 5479 IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): " 5480 "can't make rx sensitivity calibration," 5481 "because has not enough rx time\n")); 5482 return (DDI_FAILURE); 5483 } 5484 5485 /* make Rx sensitivity calibration for OFDM mode */ 5486 rv = iwk_ofdm_sens(sc, actual_rx_time); 5487 if (rv) { 5488 return (rv); 5489 } 5490 5491 /* make Rx sensitivity calibration for CCK mode */ 5492 rv = iwk_cck_sens(sc, actual_rx_time); 5493 if (rv) { 5494 return (rv); 5495 } 5496 5497 /* 5498 * if the sum of false alarm had not changed, nothing will be done 5499 */ 5500 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) && 5501 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) { 5502 return (IWK_SUCCESS); 5503 } 5504 5505 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE; 5506 5507 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] = 5508 rx_sens_p->auto_corr_ofdm_x4; 5509 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = 5510 rx_sens_p->auto_corr_mrc_ofdm_x4; 5511 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] = 5512 rx_sens_p->auto_corr_ofdm_x1; 5513 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = 5514 rx_sens_p->auto_corr_mrc_ofdm_x1; 5515 5516 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] = 5517 rx_sens_p->auto_corr_cck_x4; 5518 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = 5519 rx_sens_p->auto_corr_mrc_cck_x4; 5520 cmd.table[MIN_ENERGY_CCK_DET_IDX] = 5521 rx_sens_p->min_energy_det_cck; 5522 5523 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100; 5524 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190; 5525 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390; 5526 cmd.table[PTAM_ENERGY_TH_IDX] = 62; 5527 5528 /* 5529 * send sensitivity command to complete actual sensitivity calibration 5530 */ 5531 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1); 5532 if (rv) { 5533 cmn_err(CE_WARN, "iwk_rx_sens(): " 5534 "fail to send rx sensitivity command\n"); 5535 return (rv); 5536 } 5537 5538 return (IWK_SUCCESS); 5539 5540 } 5541 5542 /* 5543 * make Rx sensitivity calibration for CCK mode. 5544 * This is preparing parameters for Sensitivity command 5545 */ 5546 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5547 { 5548 int i; 5549 uint8_t noise_a, noise_b, noise_c; 5550 uint8_t max_noise_abc, max_noise_20; 5551 uint32_t beacon_a, beacon_b, beacon_c; 5552 uint32_t min_beacon_abc, max_beacon_10; 5553 uint32_t cck_fa, cck_bp; 5554 uint32_t cck_sum_fa_bp; 5555 uint32_t temp; 5556 struct statistics_rx_non_phy *rx_general_p = 5557 &sc->sc_statistics.rx.general; 5558 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5559 5560 cck_fa = LE_32(sc->sc_statistics.rx.cck.false_alarm_cnt); 5561 cck_bp = LE_32(sc->sc_statistics.rx.cck.plcp_err); 5562 5563 /* accumulate false alarm */ 5564 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) { 5565 temp = rx_sens_p->last_false_alarm_cnt_cck; 5566 rx_sens_p->last_false_alarm_cnt_cck = cck_fa; 5567 cck_fa += (0xFFFFFFFF - temp); 5568 } else { 5569 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck; 5570 rx_sens_p->last_false_alarm_cnt_cck += cck_fa; 5571 } 5572 5573 /* accumulate bad plcp */ 5574 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) { 5575 temp = rx_sens_p->last_bad_plcp_cnt_cck; 5576 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp; 5577 cck_bp += (0xFFFFFFFF - temp); 5578 } else { 5579 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck; 5580 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp; 5581 } 5582 5583 /* 5584 * calculate relative value 5585 */ 5586 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024; 5587 rx_sens_p->cck_noise_diff = 0; 5588 5589 noise_a = 5590 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF00) >> 5591 8); 5592 noise_b = 5593 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF00) >> 5594 8); 5595 noise_c = 5596 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF00) >> 5597 8); 5598 5599 beacon_a = LE_32(rx_general_p->beacon_energy_a); 5600 beacon_b = LE_32(rx_general_p->beacon_energy_b); 5601 beacon_c = LE_32(rx_general_p->beacon_energy_c); 5602 5603 /* determine maximum noise among 3 chains */ 5604 if ((noise_a >= noise_b) && (noise_a >= noise_c)) { 5605 max_noise_abc = noise_a; 5606 } else if (noise_b >= noise_c) { 5607 max_noise_abc = noise_b; 5608 } else { 5609 max_noise_abc = noise_c; 5610 } 5611 5612 /* record maximum noise among 3 chains */ 5613 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc; 5614 rx_sens_p->cck_noise_idx++; 5615 if (rx_sens_p->cck_noise_idx >= 20) { 5616 rx_sens_p->cck_noise_idx = 0; 5617 } 5618 5619 /* determine maximum noise among 20 max noise */ 5620 max_noise_20 = rx_sens_p->cck_noise_max[0]; 5621 for (i = 0; i < 20; i++) { 5622 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) { 5623 max_noise_20 = rx_sens_p->cck_noise_max[i]; 5624 } 5625 } 5626 5627 /* determine minimum beacon among 3 chains */ 5628 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) { 5629 min_beacon_abc = beacon_a; 5630 } else if (beacon_b <= beacon_c) { 5631 min_beacon_abc = beacon_b; 5632 } else { 5633 min_beacon_abc = beacon_c; 5634 } 5635 5636 /* record miminum beacon among 3 chains */ 5637 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc; 5638 rx_sens_p->cck_beacon_idx++; 5639 if (rx_sens_p->cck_beacon_idx >= 10) { 5640 rx_sens_p->cck_beacon_idx = 0; 5641 } 5642 5643 /* determine maximum beacon among 10 miminum beacon among 3 chains */ 5644 max_beacon_10 = rx_sens_p->cck_beacon_min[0]; 5645 for (i = 0; i < 10; i++) { 5646 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) { 5647 max_beacon_10 = rx_sens_p->cck_beacon_min[i]; 5648 } 5649 } 5650 5651 /* add a little margin */ 5652 max_beacon_10 += 6; 5653 5654 /* record the count of having no false alarms */ 5655 if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5656 rx_sens_p->cck_no_false_alarm_num++; 5657 } else { 5658 rx_sens_p->cck_no_false_alarm_num = 0; 5659 } 5660 5661 /* 5662 * adjust parameters in sensitivity command 5663 * according to different status. 5664 * for more infomation, please refer to iwk_calibration.h file 5665 */ 5666 if (cck_sum_fa_bp > (50 * actual_rx_time)) { 5667 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM; 5668 5669 if (rx_sens_p->auto_corr_cck_x4 > 160) { 5670 rx_sens_p->cck_noise_ref = max_noise_20; 5671 5672 if (rx_sens_p->min_energy_det_cck > 2) { 5673 rx_sens_p->min_energy_det_cck -= 2; 5674 } 5675 } 5676 5677 if (rx_sens_p->auto_corr_cck_x4 < 160) { 5678 rx_sens_p->auto_corr_cck_x4 = 160 + 1; 5679 } else { 5680 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) { 5681 rx_sens_p->auto_corr_cck_x4 += 3; 5682 } else { 5683 rx_sens_p->auto_corr_cck_x4 = 200; 5684 } 5685 } 5686 5687 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) { 5688 rx_sens_p->auto_corr_mrc_cck_x4 += 3; 5689 } else { 5690 rx_sens_p->auto_corr_mrc_cck_x4 = 400; 5691 } 5692 5693 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5694 5695 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) { 5696 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM; 5697 5698 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref - 5699 (int32_t)max_noise_20; 5700 5701 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) && 5702 ((rx_sens_p->cck_noise_diff > 2) || 5703 (rx_sens_p->cck_no_false_alarm_num > 100))) { 5704 if ((rx_sens_p->min_energy_det_cck + 2) < 97) { 5705 rx_sens_p->min_energy_det_cck += 2; 5706 } else { 5707 rx_sens_p->min_energy_det_cck = 97; 5708 } 5709 5710 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) { 5711 rx_sens_p->auto_corr_cck_x4 -= 3; 5712 } else { 5713 rx_sens_p->auto_corr_cck_x4 = 125; 5714 } 5715 5716 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) { 5717 rx_sens_p->auto_corr_mrc_cck_x4 -= 3; 5718 } else { 5719 rx_sens_p->auto_corr_mrc_cck_x4 = 200; 5720 } 5721 5722 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK; 5723 } else { 5724 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5725 } 5726 } else { 5727 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM; 5728 5729 rx_sens_p->cck_noise_ref = max_noise_20; 5730 5731 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) { 5732 rx_sens_p->min_energy_det_cck -= 8; 5733 } 5734 5735 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK); 5736 } 5737 5738 if (rx_sens_p->min_energy_det_cck < max_beacon_10) { 5739 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10; 5740 } 5741 5742 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state; 5743 5744 return (IWK_SUCCESS); 5745 } 5746 5747 /* 5748 * make Rx sensitivity calibration for OFDM mode. 5749 * This is preparing parameters for Sensitivity command 5750 */ 5751 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time) 5752 { 5753 uint32_t temp; 5754 uint16_t temp1; 5755 uint32_t ofdm_fa, ofdm_bp; 5756 uint32_t ofdm_sum_fa_bp; 5757 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens; 5758 5759 ofdm_fa = LE_32(sc->sc_statistics.rx.ofdm.false_alarm_cnt); 5760 ofdm_bp = LE_32(sc->sc_statistics.rx.ofdm.plcp_err); 5761 5762 /* accumulate false alarm */ 5763 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) { 5764 temp = rx_sens_p->last_false_alarm_cnt_ofdm; 5765 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa; 5766 ofdm_fa += (0xFFFFFFFF - temp); 5767 } else { 5768 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm; 5769 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa; 5770 } 5771 5772 /* accumulate bad plcp */ 5773 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) { 5774 temp = rx_sens_p->last_bad_plcp_cnt_ofdm; 5775 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp; 5776 ofdm_bp += (0xFFFFFFFF - temp); 5777 } else { 5778 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm; 5779 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp; 5780 } 5781 5782 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */ 5783 5784 /* 5785 * adjust parameter in sensitivity command according to different status 5786 */ 5787 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) { 5788 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1; 5789 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120; 5790 5791 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1; 5792 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5793 (temp1 <= 210) ? temp1 : 210; 5794 5795 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1; 5796 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140; 5797 5798 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1; 5799 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5800 (temp1 <= 270) ? temp1 : 270; 5801 5802 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5803 5804 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) { 5805 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1; 5806 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85; 5807 5808 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1; 5809 rx_sens_p->auto_corr_mrc_ofdm_x4 = 5810 (temp1 >= 170) ? temp1 : 170; 5811 5812 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1; 5813 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105; 5814 5815 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1; 5816 rx_sens_p->auto_corr_mrc_ofdm_x1 = 5817 (temp1 >= 220) ? temp1 : 220; 5818 5819 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK; 5820 5821 } else { 5822 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK); 5823 } 5824 5825 return (IWK_SUCCESS); 5826 } 5827 5828 /* 5829 * additional process to management frames 5830 */ 5831 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp, 5832 struct ieee80211_node *in, 5833 int subtype, int rssi, uint32_t rstamp) 5834 { 5835 iwk_sc_t *sc = (iwk_sc_t *)ic; 5836 struct ieee80211_frame *wh; 5837 uint8_t index1, index2; 5838 int err; 5839 5840 sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp); 5841 5842 mutex_enter(&sc->sc_glock); 5843 switch (subtype) { 5844 case IEEE80211_FC0_SUBTYPE_BEACON: 5845 if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss && 5846 ic->ic_state == IEEE80211_S_RUN) { 5847 if (ieee80211_beacon_update(ic, in, 5848 &sc->sc_ibss.ibss_beacon.iwk_boff, 5849 sc->sc_ibss.ibss_beacon.mp, 0)) { 5850 bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr, 5851 sc->sc_ibss.ibss_beacon.beacon_cmd. 5852 bcon_frame, 5853 MBLKL(sc->sc_ibss.ibss_beacon.mp)); 5854 } 5855 err = iwk_cmd(sc, REPLY_TX_BEACON, 5856 &sc->sc_ibss.ibss_beacon.beacon_cmd, 5857 sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1); 5858 if (err != IWK_SUCCESS) { 5859 cmn_err(CE_WARN, "iwk_recv_mgmt(): " 5860 "failed to TX beacon.\n"); 5861 } 5862 sc->sc_ibss.ibss_beacon.syncbeacon = 0; 5863 } 5864 if (ic->ic_opmode == IEEE80211_M_IBSS && 5865 ic->ic_state == IEEE80211_S_RUN) { 5866 wh = (struct ieee80211_frame *)mp->b_rptr; 5867 mutex_enter(&sc->sc_ibss.node_tb_lock); 5868 /* 5869 * search for node in ibss node table 5870 */ 5871 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT; 5872 index1++) { 5873 if (sc->sc_ibss.ibss_node_tb[index1].used && 5874 IEEE80211_ADDR_EQ(sc->sc_ibss. 5875 ibss_node_tb[index1].node.bssid, 5876 wh->i_addr2)) { 5877 break; 5878 } 5879 } 5880 /* 5881 * if don't find in ibss node table 5882 */ 5883 if (index1 >= IWK_BROADCAST_ID) { 5884 err = iwk_clean_add_node_ibss(ic, 5885 wh->i_addr2, &index2); 5886 if (err != IWK_SUCCESS) { 5887 cmn_err(CE_WARN, "iwk_recv_mgmt(): " 5888 "failed to clean all nodes " 5889 "and add one node\n"); 5890 } 5891 } 5892 mutex_exit(&sc->sc_ibss.node_tb_lock); 5893 } 5894 break; 5895 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 5896 break; 5897 } 5898 mutex_exit(&sc->sc_glock); 5899 } 5900 5901 /* 5902 * 1) log_event_table_ptr indicates base of the event log. This traces 5903 * a 256-entry history of uCode execution within a circular buffer. 5904 * Its header format is: 5905 * 5906 * uint32_t log_size; log capacity (in number of entries) 5907 * uint32_t type; (1) timestamp with each entry, (0) no timestamp 5908 * uint32_t wraps; # times uCode has wrapped to top of circular buffer 5909 * uint32_t write_index; next circular buffer entry that uCode would fill 5910 * 5911 * The header is followed by the circular buffer of log entries. Entries 5912 * with timestamps have the following format: 5913 * 5914 * uint32_t event_id; range 0 - 1500 5915 * uint32_t timestamp; low 32 bits of TSF (of network, if associated) 5916 * uint32_t data; event_id-specific data value 5917 * 5918 * Entries without timestamps contain only event_id and data. 5919 */ 5920 5921 /* 5922 * iwk_write_event_log - Write event log to dmesg 5923 */ 5924 static void iwk_write_event_log(iwk_sc_t *sc) 5925 { 5926 uint32_t log_event_table_ptr; /* Start address of event table */ 5927 uint32_t startptr; /* Start address of log data */ 5928 uint32_t logptr; /* address of log data entry */ 5929 uint32_t i, n, num_events; 5930 uint32_t event_id, data1, data2; /* log data */ 5931 5932 uint32_t log_size; /* log capacity (in number of entries) */ 5933 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */ 5934 uint32_t wraps; /* # times uCode has wrapped to */ 5935 /* the top of circular buffer */ 5936 uint32_t idx; /* index of entry to be filled in next */ 5937 5938 log_event_table_ptr = LE_32(sc->sc_card_alive_run.log_event_table_ptr); 5939 if (!(log_event_table_ptr)) { 5940 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n")); 5941 return; 5942 } 5943 5944 iwk_mac_access_enter(sc); 5945 5946 /* Read log header */ 5947 log_size = iwk_mem_read(sc, log_event_table_ptr); 5948 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */ 5949 type = iwk_mem_read(sc, log_event_table_ptr); 5950 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */ 5951 wraps = iwk_mem_read(sc, log_event_table_ptr); 5952 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */ 5953 idx = iwk_mem_read(sc, log_event_table_ptr); 5954 startptr = log_event_table_ptr + 5955 sizeof (uint32_t); /* addr of start of log data */ 5956 if (!log_size & !wraps) { 5957 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n")); 5958 iwk_mac_access_exit(sc); 5959 return; 5960 } 5961 5962 if (!wraps) { 5963 num_events = idx; 5964 logptr = startptr; 5965 } else { 5966 num_events = log_size - idx; 5967 n = type ? 2 : 3; 5968 logptr = startptr + (idx * n * sizeof (uint32_t)); 5969 } 5970 5971 for (i = 0; i < num_events; i++) { 5972 event_id = iwk_mem_read(sc, logptr); 5973 logptr += sizeof (uint32_t); 5974 data1 = iwk_mem_read(sc, logptr); 5975 logptr += sizeof (uint32_t); 5976 if (type == 0) { /* no timestamp */ 5977 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x", 5978 event_id, data1)); 5979 } else { /* timestamp */ 5980 data2 = iwk_mem_read(sc, logptr); 5981 IWK_DBG((IWK_DEBUG_EEPROM, 5982 "Time=%d, Event ID=%d, Data=0x%x\n", 5983 data1, event_id, data2)); 5984 logptr += sizeof (uint32_t); 5985 } 5986 } 5987 5988 /* 5989 * Print the wrapped around entries, if any 5990 */ 5991 if (wraps) { 5992 logptr = startptr; 5993 for (i = 0; i < idx; i++) { 5994 event_id = iwk_mem_read(sc, logptr); 5995 logptr += sizeof (uint32_t); 5996 data1 = iwk_mem_read(sc, logptr); 5997 logptr += sizeof (uint32_t); 5998 if (type == 0) { /* no timestamp */ 5999 IWK_DBG((IWK_DEBUG_EEPROM, 6000 "Event ID=%d, Data=%x0x", event_id, data1)); 6001 } else { /* timestamp */ 6002 data2 = iwk_mem_read(sc, logptr); 6003 IWK_DBG((IWK_DEBUG_EEPROM, 6004 "Time = %d, Event ID=%d, Data=0x%x\n", 6005 data1, event_id, data2)); 6006 logptr += sizeof (uint32_t); 6007 } 6008 } 6009 } 6010 6011 iwk_mac_access_exit(sc); 6012 } 6013 6014 /* 6015 * error_event_table_ptr indicates base of the error log. This contains 6016 * information about any uCode error that occurs. For 4965, the format is: 6017 * 6018 * uint32_t valid; (nonzero) valid, (0) log is empty 6019 * uint32_t error_id; type of error 6020 * uint32_t pc; program counter 6021 * uint32_t blink1; branch link 6022 * uint32_t blink2; branch link 6023 * uint32_t ilink1; interrupt link 6024 * uint32_t ilink2; interrupt link 6025 * uint32_t data1; error-specific data 6026 * uint32_t data2; error-specific data 6027 * uint32_t line; source code line of error 6028 * uint32_t bcon_time; beacon timer 6029 * uint32_t tsf_low; network timestamp function timer 6030 * uint32_t tsf_hi; network timestamp function timer 6031 */ 6032 /* 6033 * iwk_write_error_log - Write error log to dmesg 6034 */ 6035 static void iwk_write_error_log(iwk_sc_t *sc) 6036 { 6037 uint32_t err_ptr; /* Start address of error log */ 6038 uint32_t valid; /* is error log valid */ 6039 6040 err_ptr = LE_32(sc->sc_card_alive_run.error_event_table_ptr); 6041 if (!(err_ptr)) { 6042 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n")); 6043 return; 6044 } 6045 6046 iwk_mac_access_enter(sc); 6047 6048 valid = iwk_mem_read(sc, err_ptr); 6049 if (!(valid)) { 6050 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n")); 6051 iwk_mac_access_exit(sc); 6052 return; 6053 } 6054 err_ptr += sizeof (uint32_t); 6055 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr))); 6056 err_ptr += sizeof (uint32_t); 6057 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr))); 6058 err_ptr += sizeof (uint32_t); 6059 IWK_DBG((IWK_DEBUG_EEPROM, 6060 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr))); 6061 err_ptr += sizeof (uint32_t); 6062 IWK_DBG((IWK_DEBUG_EEPROM, 6063 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr))); 6064 err_ptr += sizeof (uint32_t); 6065 IWK_DBG((IWK_DEBUG_EEPROM, 6066 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr))); 6067 err_ptr += sizeof (uint32_t); 6068 IWK_DBG((IWK_DEBUG_EEPROM, 6069 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr))); 6070 err_ptr += sizeof (uint32_t); 6071 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr))); 6072 err_ptr += sizeof (uint32_t); 6073 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr))); 6074 err_ptr += sizeof (uint32_t); 6075 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr))); 6076 err_ptr += sizeof (uint32_t); 6077 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr))); 6078 err_ptr += sizeof (uint32_t); 6079 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr))); 6080 err_ptr += sizeof (uint32_t); 6081 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr))); 6082 6083 iwk_mac_access_exit(sc); 6084 } 6085 6086 static int 6087 iwk_run_state_config_ibss(ieee80211com_t *ic) 6088 { 6089 iwk_sc_t *sc = (iwk_sc_t *)ic; 6090 ieee80211_node_t *in = ic->ic_bss; 6091 int i, err = IWK_SUCCESS; 6092 6093 mutex_enter(&sc->sc_ibss.node_tb_lock); 6094 6095 /* 6096 * clean all nodes in ibss node table assure be 6097 * consistent with hardware 6098 */ 6099 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) { 6100 sc->sc_ibss.ibss_node_tb[i].used = 0; 6101 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node, 6102 0, 6103 sizeof (iwk_add_sta_t)); 6104 } 6105 6106 sc->sc_ibss.node_number = 0; 6107 6108 mutex_exit(&sc->sc_ibss.node_tb_lock); 6109 6110 /* 6111 * configure RX and TX 6112 */ 6113 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS; 6114 6115 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 6116 sc->sc_config.filter_flags = 6117 LE_32(RXON_FILTER_ACCEPT_GRP_MSK | 6118 RXON_FILTER_DIS_DECRYPT_MSK | 6119 RXON_FILTER_DIS_GRP_DECRYPT_MSK); 6120 6121 sc->sc_config.assoc_id = 0; 6122 6123 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid); 6124 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, 6125 in->in_chan)); 6126 6127 if (ic->ic_curmode == IEEE80211_MODE_11B) { 6128 sc->sc_config.cck_basic_rates = 0x03; 6129 sc->sc_config.ofdm_basic_rates = 0; 6130 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) && 6131 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) { 6132 sc->sc_config.cck_basic_rates = 0; 6133 sc->sc_config.ofdm_basic_rates = 0x15; 6134 6135 } else { 6136 sc->sc_config.cck_basic_rates = 0x0f; 6137 sc->sc_config.ofdm_basic_rates = 0xff; 6138 } 6139 6140 sc->sc_config.flags &= 6141 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 6142 RXON_FLG_SHORT_SLOT_MSK); 6143 6144 if (ic->ic_flags & IEEE80211_F_SHSLOT) { 6145 sc->sc_config.flags |= 6146 LE_32(RXON_FLG_SHORT_SLOT_MSK); 6147 } 6148 6149 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) { 6150 sc->sc_config.flags |= 6151 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 6152 } 6153 6154 sc->sc_config.filter_flags |= 6155 LE_32(RXON_FILTER_ASSOC_MSK); 6156 6157 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 6158 sizeof (iwk_rxon_cmd_t), 1); 6159 if (err != IWK_SUCCESS) { 6160 cmn_err(CE_WARN, "iwk_run_state_config_ibss(): " 6161 "failed to update configuration.\n"); 6162 return (err); 6163 } 6164 6165 return (err); 6166 6167 } 6168 6169 static int 6170 iwk_run_state_config_sta(ieee80211com_t *ic) 6171 { 6172 iwk_sc_t *sc = (iwk_sc_t *)ic; 6173 ieee80211_node_t *in = ic->ic_bss; 6174 int err = IWK_SUCCESS; 6175 6176 /* update adapter's configuration */ 6177 if (sc->sc_assoc_id != in->in_associd) { 6178 cmn_err(CE_WARN, "iwk_run_state_config_sta(): " 6179 "associate ID mismatch: expected %d, " 6180 "got %d\n", 6181 in->in_associd, sc->sc_assoc_id); 6182 } 6183 sc->sc_config.assoc_id = LE_16(in->in_associd & 0x3fff); 6184 6185 /* 6186 * short preamble/slot time are 6187 * negotiated when associating 6188 */ 6189 sc->sc_config.flags &= 6190 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK | 6191 RXON_FLG_SHORT_SLOT_MSK); 6192 6193 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6194 sc->sc_config.flags |= 6195 LE_32(RXON_FLG_SHORT_SLOT_MSK); 6196 6197 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6198 sc->sc_config.flags |= 6199 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK); 6200 6201 sc->sc_config.filter_flags |= 6202 LE_32(RXON_FILTER_ASSOC_MSK); 6203 6204 if (ic->ic_opmode != IEEE80211_M_STA) 6205 sc->sc_config.filter_flags |= 6206 LE_32(RXON_FILTER_BCON_AWARE_MSK); 6207 6208 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x" 6209 " filter_flags %x\n", 6210 sc->sc_config.chan, sc->sc_config.flags, 6211 sc->sc_config.filter_flags)); 6212 6213 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config, 6214 sizeof (iwk_rxon_cmd_t), 1); 6215 if (err != IWK_SUCCESS) { 6216 cmn_err(CE_WARN, "iwk_run_state_config_sta(): " 6217 "failed to update configuration\n"); 6218 return (err); 6219 } 6220 6221 return (err); 6222 } 6223 6224 static int 6225 iwk_fast_recover(iwk_sc_t *sc) 6226 { 6227 ieee80211com_t *ic = &sc->sc_ic; 6228 int err; 6229 6230 mutex_enter(&sc->sc_glock); 6231 6232 /* restore runtime configuration */ 6233 bcopy(&sc->sc_config_save, &sc->sc_config, 6234 sizeof (sc->sc_config)); 6235 6236 /* reset state to handle reassociations correctly */ 6237 sc->sc_config.assoc_id = 0; 6238 sc->sc_config.filter_flags &= 6239 ~LE_32(RXON_FILTER_ASSOC_MSK); 6240 6241 if ((err = iwk_hw_set_before_auth(sc)) != 0) { 6242 cmn_err(CE_WARN, "iwk_fast_recover(): " 6243 "failed to setup authentication\n"); 6244 mutex_exit(&sc->sc_glock); 6245 return (err); 6246 } 6247 6248 bcopy(&sc->sc_config_save, &sc->sc_config, 6249 sizeof (sc->sc_config)); 6250 6251 /* update adapter's configuration */ 6252 err = iwk_run_state_config_sta(ic); 6253 if (err != IWK_SUCCESS) { 6254 cmn_err(CE_WARN, "iwk_fast_recover(): " 6255 "failed to setup association\n"); 6256 mutex_exit(&sc->sc_glock); 6257 return (err); 6258 } 6259 6260 /* obtain current temperature of chipset */ 6261 sc->sc_tempera = iwk_curr_tempera(sc); 6262 6263 /* 6264 * make Tx power calibration to determine 6265 * the gains of DSP and radio 6266 */ 6267 err = iwk_tx_power_calibration(sc); 6268 if (err) { 6269 cmn_err(CE_WARN, "iwk_fast_recover(): " 6270 "failed to set tx power table\n"); 6271 mutex_exit(&sc->sc_glock); 6272 return (err); 6273 } 6274 6275 /* 6276 * make initialization for Receiver 6277 * sensitivity calibration 6278 */ 6279 err = iwk_rx_sens_init(sc); 6280 if (err) { 6281 cmn_err(CE_WARN, "iwk_fast_recover(): " 6282 "failed to init RX sensitivity\n"); 6283 mutex_exit(&sc->sc_glock); 6284 return (err); 6285 } 6286 6287 /* make initialization for Receiver gain balance */ 6288 err = iwk_rxgain_diff_init(sc); 6289 if (err) { 6290 cmn_err(CE_WARN, "iwk_fast_recover(): " 6291 "failed to init phy calibration\n"); 6292 mutex_exit(&sc->sc_glock); 6293 return (err); 6294 6295 } 6296 /* set LED on */ 6297 iwk_set_led(sc, 2, 0, 1); 6298 6299 mutex_exit(&sc->sc_glock); 6300 6301 /* update keys */ 6302 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 6303 for (int i = 0; i < IEEE80211_KEY_MAX; i++) { 6304 if (ic->ic_nw_keys[i].wk_keyix == IEEE80211_KEYIX_NONE) 6305 continue; 6306 err = iwk_key_set(ic, &ic->ic_nw_keys[i], 6307 ic->ic_bss->in_macaddr); 6308 /* failure */ 6309 if (err == 0) { 6310 cmn_err(CE_WARN, "iwk_fast_recover(): " 6311 "failed to setup hardware keys\n"); 6312 return (IWK_FAIL); 6313 } 6314 } 6315 } 6316 6317 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER; 6318 6319 /* start queue */ 6320 IWK_DBG((IWK_DEBUG_FW, "iwk_fast_recover(): resume xmit\n")); 6321 mac_tx_update(ic->ic_mach); 6322 6323 6324 return (IWK_SUCCESS); 6325 } 6326 6327 static int 6328 iwk_start_tx_beacon(ieee80211com_t *ic) 6329 { 6330 iwk_sc_t *sc = (iwk_sc_t *)ic; 6331 ieee80211_node_t *in = ic->ic_bss; 6332 int err = IWK_SUCCESS; 6333 iwk_tx_beacon_cmd_t *tx_beacon_p; 6334 uint16_t masks = 0; 6335 mblk_t *mp; 6336 int rate; 6337 6338 /* 6339 * allocate and transmit beacon frames 6340 */ 6341 tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd; 6342 6343 (void) memset(tx_beacon_p, 0, 6344 sizeof (iwk_tx_beacon_cmd_t)); 6345 rate = 0; 6346 masks = 0; 6347 6348 tx_beacon_p->config.sta_id = IWK_BROADCAST_ID; 6349 tx_beacon_p->config.stop_time.life_time = 6350 LE_32(0xffffffff); 6351 6352 if (sc->sc_ibss.ibss_beacon.mp != NULL) { 6353 freemsg(sc->sc_ibss.ibss_beacon.mp); 6354 sc->sc_ibss.ibss_beacon.mp = NULL; 6355 } 6356 6357 sc->sc_ibss.ibss_beacon.mp = 6358 ieee80211_beacon_alloc(ic, in, 6359 &sc->sc_ibss.ibss_beacon.iwk_boff); 6360 if (sc->sc_ibss.ibss_beacon.mp == NULL) { 6361 cmn_err(CE_WARN, "iwk_start_tx_beacon(): " 6362 "failed to get beacon frame.\n"); 6363 return (IWK_FAIL); 6364 } 6365 6366 mp = sc->sc_ibss.ibss_beacon.mp; 6367 6368 ASSERT(mp->b_cont == NULL); 6369 6370 bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp)); 6371 6372 tx_beacon_p->config.len = LE_16((uint16_t)(MBLKL(mp))); 6373 sc->sc_ibss.ibss_beacon.beacon_cmd_len = 6374 sizeof (iwk_tx_cmd_t) + 6375 4 + LE_16(tx_beacon_p->config.len); 6376 6377 /* 6378 * beacons are sent at 1M 6379 */ 6380 rate = in->in_rates.ir_rates[0]; 6381 rate &= IEEE80211_RATE_VAL; 6382 6383 if (2 == rate || 4 == rate || 11 == rate || 6384 22 == rate) { 6385 masks |= RATE_MCS_CCK_MSK; 6386 } 6387 6388 masks |= RATE_MCS_ANT_B_MSK; 6389 6390 tx_beacon_p->config.rate.r.rate_n_flags = 6391 LE_32(iwk_rate_to_plcp(rate) | masks); 6392 6393 6394 tx_beacon_p->config.tx_flags = 6395 LE_32(TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK); 6396 6397 if (ic->ic_bss->in_tstamp.tsf != 0) { 6398 sc->sc_ibss.ibss_beacon.syncbeacon = 1; 6399 } else { 6400 if (ieee80211_beacon_update(ic, in, 6401 &sc->sc_ibss.ibss_beacon.iwk_boff, 6402 mp, 0)) { 6403 bcopy(mp->b_rptr, 6404 tx_beacon_p->bcon_frame, 6405 MBLKL(mp)); 6406 } 6407 6408 err = iwk_cmd(sc, REPLY_TX_BEACON, 6409 tx_beacon_p, 6410 sc->sc_ibss.ibss_beacon.beacon_cmd_len, 6411 1); 6412 if (err != IWK_SUCCESS) { 6413 cmn_err(CE_WARN, "iwk_start_tx_beacon(): " 6414 "failed to TX beacon.\n"); 6415 return (err); 6416 } 6417 6418 sc->sc_ibss.ibss_beacon.syncbeacon = 0; 6419 } 6420 6421 return (err); 6422 } 6423 6424 static int 6425 iwk_clean_add_node_ibss(struct ieee80211com *ic, 6426 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2) 6427 { 6428 iwk_sc_t *sc = (iwk_sc_t *)ic; 6429 uint8_t index; 6430 iwk_add_sta_t bc_node; 6431 iwk_link_quality_cmd_t bc_link_quality; 6432 iwk_link_quality_cmd_t link_quality; 6433 uint16_t bc_masks = 0; 6434 uint16_t masks = 0; 6435 int i, rate; 6436 struct ieee80211_rateset rs; 6437 iwk_ibss_node_t *ibss_node_p; 6438 int err = IWK_SUCCESS; 6439 6440 /* 6441 * find a location that is not 6442 * used in ibss node table 6443 */ 6444 for (index = IWK_STA_ID; 6445 index < IWK_STATION_COUNT; index++) { 6446 if (!sc->sc_ibss.ibss_node_tb[index].used) { 6447 break; 6448 } 6449 } 6450 6451 /* 6452 * if have too many nodes in hardware, clean up 6453 */ 6454 if (index < IWK_BROADCAST_ID && 6455 sc->sc_ibss.node_number >= 25) { 6456 if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA, 6457 NULL, 0, 1) != IWK_SUCCESS) { 6458 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6459 "failed to remove all nodes in hardware\n"); 6460 return (IWK_FAIL); 6461 } 6462 6463 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) { 6464 sc->sc_ibss.ibss_node_tb[i].used = 0; 6465 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node, 6466 0, sizeof (iwk_add_sta_t)); 6467 } 6468 6469 sc->sc_ibss.node_number = 0; 6470 6471 /* 6472 * add broadcast node so that we 6473 * can send broadcast frame 6474 */ 6475 (void) memset(&bc_node, 0, sizeof (bc_node)); 6476 (void) memset(bc_node.bssid, 0xff, 6); 6477 bc_node.id = IWK_BROADCAST_ID; 6478 6479 err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1); 6480 if (err != IWK_SUCCESS) { 6481 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6482 "failed to add broadcast node\n"); 6483 return (err); 6484 } 6485 6486 /* TX_LINK_QUALITY cmd */ 6487 (void) memset(&bc_link_quality, 0, sizeof (bc_link_quality)); 6488 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 6489 bc_masks |= RATE_MCS_CCK_MSK; 6490 bc_masks |= RATE_MCS_ANT_B_MSK; 6491 bc_masks &= ~RATE_MCS_ANT_A_MSK; 6492 bc_link_quality.rate_n_flags[i] = 6493 LE_32(iwk_rate_to_plcp(2) | bc_masks); 6494 } 6495 6496 bc_link_quality.general_params.single_stream_ant_msk = 2; 6497 bc_link_quality.general_params.dual_stream_ant_msk = 3; 6498 bc_link_quality.agg_params.agg_dis_start_th = 3; 6499 bc_link_quality.agg_params.agg_time_limit = LE_16(4000); 6500 bc_link_quality.sta_id = IWK_BROADCAST_ID; 6501 6502 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, 6503 &bc_link_quality, sizeof (bc_link_quality), 1); 6504 if (err != IWK_SUCCESS) { 6505 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6506 "failed to config link quality table\n"); 6507 return (err); 6508 } 6509 } 6510 6511 if (index >= IWK_BROADCAST_ID) { 6512 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6513 "the count of node in hardware is too much\n"); 6514 return (IWK_FAIL); 6515 } 6516 6517 /* 6518 * add a node into hardware 6519 */ 6520 ibss_node_p = &sc->sc_ibss.ibss_node_tb[index]; 6521 6522 ibss_node_p->used = 1; 6523 6524 (void) memset(&ibss_node_p->node, 0, 6525 sizeof (iwk_add_sta_t)); 6526 6527 IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr); 6528 ibss_node_p->node.id = index; 6529 ibss_node_p->node.control = 0; 6530 ibss_node_p->node.flags = 0; 6531 6532 err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node, 6533 sizeof (iwk_add_sta_t), 1); 6534 if (err != IWK_SUCCESS) { 6535 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6536 "failed to add IBSS node\n"); 6537 ibss_node_p->used = 0; 6538 (void) memset(&ibss_node_p->node, 0, 6539 sizeof (iwk_add_sta_t)); 6540 return (err); 6541 } 6542 6543 sc->sc_ibss.node_number++; 6544 6545 (void) memset(&link_quality, 0, sizeof (link_quality)); 6546 6547 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, 6548 ic->ic_curchan)]; 6549 6550 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 6551 if (i < rs.ir_nrates) { 6552 rate = rs. 6553 ir_rates[rs.ir_nrates - i]; 6554 } else { 6555 rate = 2; 6556 } 6557 6558 if (2 == rate || 4 == rate || 6559 11 == rate || 22 == rate) { 6560 masks |= RATE_MCS_CCK_MSK; 6561 } 6562 6563 masks |= RATE_MCS_ANT_B_MSK; 6564 masks &= ~RATE_MCS_ANT_A_MSK; 6565 6566 link_quality.rate_n_flags[i] = 6567 LE_32(iwk_rate_to_plcp(rate) | masks); 6568 } 6569 6570 link_quality.general_params.single_stream_ant_msk = 2; 6571 link_quality.general_params.dual_stream_ant_msk = 3; 6572 link_quality.agg_params.agg_dis_start_th = 3; 6573 link_quality.agg_params.agg_time_limit = LE_16(4000); 6574 link_quality.sta_id = ibss_node_p->node.id; 6575 6576 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, 6577 &link_quality, sizeof (link_quality), 1); 6578 if (err != IWK_SUCCESS) { 6579 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): " 6580 "failed to set up TX link quality\n"); 6581 ibss_node_p->used = 0; 6582 (void) memset(ibss_node_p->node.bssid, 0, 6); 6583 return (err); 6584 } 6585 6586 *index2 = index; 6587 6588 return (err); 6589 } 6590