Lines Matching +full:mixed +full:- +full:signals

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 #include <linux/dma-mapping.h>
39 return -ETIMEDOUT; in _il_poll_bit()
48 spin_lock_irqsave(&p->reg_lock, reg_flags); in il_set_bit()
50 spin_unlock_irqrestore(&p->reg_lock, reg_flags); in il_set_bit()
59 spin_lock_irqsave(&p->reg_lock, reg_flags); in il_clear_bit()
61 spin_unlock_irqrestore(&p->reg_lock, reg_flags); in il_clear_bit()
79 * to/from host DRAM when sleeping/waking for power-saving. in _il_grab_nic_access()
120 return -ETIMEDOUT; in il_poll_bit()
130 spin_lock_irqsave(&il->reg_lock, reg_flags); in il_rd_prph()
134 spin_unlock_irqrestore(&il->reg_lock, reg_flags); in il_rd_prph()
144 spin_lock_irqsave(&il->reg_lock, reg_flags); in il_wr_prph()
149 spin_unlock_irqrestore(&il->reg_lock, reg_flags); in il_wr_prph()
159 spin_lock_irqsave(&il->reg_lock, reg_flags); in il_read_targ_mem()
166 spin_unlock_irqrestore(&il->reg_lock, reg_flags); in il_read_targ_mem()
176 spin_lock_irqsave(&il->reg_lock, reg_flags); in il_write_targ_mem()
182 spin_unlock_irqrestore(&il->reg_lock, reg_flags); in il_write_targ_mem()
244 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { in il_generic_cmd_callback()
246 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); in il_generic_cmd_callback()
250 switch (cmd->hdr.cmd) { in il_generic_cmd_callback()
254 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); in il_generic_cmd_callback()
257 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), in il_generic_cmd_callback()
258 pkt->hdr.flags); in il_generic_cmd_callback()
268 BUG_ON(!(cmd->flags & CMD_ASYNC)); in il_send_cmd_async()
271 BUG_ON(cmd->flags & CMD_WANT_SKB); in il_send_cmd_async()
274 if (!cmd->callback) in il_send_cmd_async()
275 cmd->callback = il_generic_cmd_callback; in il_send_cmd_async()
277 if (test_bit(S_EXIT_PENDING, &il->status)) in il_send_cmd_async()
278 return -EBUSY; in il_send_cmd_async()
283 il_get_cmd_string(cmd->id), ret); in il_send_cmd_async()
295 lockdep_assert_held(&il->mutex); in il_send_cmd_sync()
297 BUG_ON(cmd->flags & CMD_ASYNC); in il_send_cmd_sync()
300 BUG_ON(cmd->callback); in il_send_cmd_sync()
303 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
305 set_bit(S_HCMD_ACTIVE, &il->status); in il_send_cmd_sync()
307 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
313 il_get_cmd_string(cmd->id), ret); in il_send_cmd_sync()
317 ret = wait_event_timeout(il->wait_command_queue, in il_send_cmd_sync()
318 !test_bit(S_HCMD_ACTIVE, &il->status), in il_send_cmd_sync()
321 if (test_bit(S_HCMD_ACTIVE, &il->status)) { in il_send_cmd_sync()
323 il_get_cmd_string(cmd->id), in il_send_cmd_sync()
326 clear_bit(S_HCMD_ACTIVE, &il->status); in il_send_cmd_sync()
328 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
329 ret = -ETIMEDOUT; in il_send_cmd_sync()
334 if (test_bit(S_RFKILL, &il->status)) { in il_send_cmd_sync()
336 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
337 ret = -ECANCELED; in il_send_cmd_sync()
340 if (test_bit(S_FW_ERROR, &il->status)) { in il_send_cmd_sync()
342 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
343 ret = -EIO; in il_send_cmd_sync()
346 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { in il_send_cmd_sync()
348 il_get_cmd_string(cmd->id)); in il_send_cmd_sync()
349 ret = -EIO; in il_send_cmd_sync()
357 if (cmd->flags & CMD_WANT_SKB) { in il_send_cmd_sync()
362 * address (cmd->meta.source). in il_send_cmd_sync()
364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync()
367 if (cmd->reply_page) { in il_send_cmd_sync()
368 il_free_pages(il, cmd->reply_page); in il_send_cmd_sync()
369 cmd->reply_page = 0; in il_send_cmd_sync()
379 if (cmd->flags & CMD_ASYNC) in il_send_cmd()
439 {.throughput = 1 * 1024 - 1, .blink_time = 260},
440 {.throughput = 5 * 1024 - 1, .blink_time = 220},
441 {.throughput = 10 * 1024 - 1, .blink_time = 190},
442 {.throughput = 20 * 1024 - 1, .blink_time = 170},
443 {.throughput = 50 * 1024 - 1, .blink_time = 150},
444 {.throughput = 70 * 1024 - 1, .blink_time = 130},
445 {.throughput = 100 * 1024 - 1, .blink_time = 110},
446 {.throughput = 200 * 1024 - 1, .blink_time = 80},
447 {.throughput = 300 * 1024 - 1, .blink_time = 50},
456 * The calculation is: (100-averageDeviation)/100 * blinkTime
458 * compensation = (100 - averageDeviation) * 64 / 100
466 "use pre-defined blinking time\n"); in il_blink_compensation()
483 if (!test_bit(S_READY, &il->status)) in il_led_cmd()
484 return -EBUSY; in il_led_cmd()
486 if (il->blink_on == on && il->blink_off == off) in il_led_cmd()
495 il->cfg->led_compensation); in il_led_cmd()
498 il->cfg->led_compensation); in il_led_cmd()
501 il->cfg->led_compensation); in il_led_cmd()
503 ret = il->ops->send_led_cmd(il, &led_cmd); in il_led_cmd()
505 il->blink_on = on; in il_led_cmd()
506 il->blink_off = off; in il_led_cmd()
540 mode = il->cfg->led_mode; in il_leds_init()
542 il->led.name = in il_leds_init()
543 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); in il_leds_init()
544 if (!il->led.name) in il_leds_init()
547 il->led.brightness_set = il_led_brightness_set; in il_leds_init()
548 il->led.blink_set = il_led_blink_set; in il_leds_init()
549 il->led.max_brightness = 1; in il_leds_init()
556 il->led.default_trigger = in il_leds_init()
557 ieee80211_create_tpt_led_trigger(il->hw, in il_leds_init()
563 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); in il_leds_init()
567 ret = led_classdev_register(&il->pci_dev->dev, &il->led); in il_leds_init()
569 kfree(il->led.name); in il_leds_init()
573 il->led_registered = true; in il_leds_init()
580 if (!il->led_registered) in il_leds_exit()
583 led_classdev_unregister(&il->led); in il_leds_exit()
584 kfree(il->led.name); in il_leds_exit()
594 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
600 * information into il->channel_info_24/52 and il->channel_map_24/52
625 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
629 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
633 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
637 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
668 ret = -ENOENT; in il_eeprom_verify_signature()
677 BUG_ON(offset >= il->cfg->eeprom_size); in il_eeprom_query_addr()
678 return &il->eeprom[offset]; in il_eeprom_query_addr()
685 if (!il->eeprom) in il_eeprom_query16()
687 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); in il_eeprom_query16()
692 * il_eeprom_init - read EEPROM contents
694 * Load the EEPROM contents from adapter into il->eeprom
696 * NOTE: This routine uses the non-debug IO access functions.
708 sz = il->cfg->eeprom_size; in il_eeprom_init()
710 il->eeprom = kzalloc(sz, GFP_KERNEL); in il_eeprom_init()
711 if (!il->eeprom) in il_eeprom_init()
712 return -ENOMEM; in il_eeprom_init()
714 e = (__le16 *) il->eeprom; in il_eeprom_init()
716 il->ops->apm_init(il); in il_eeprom_init()
721 ret = -ENOENT; in il_eeprom_init()
726 ret = il->ops->eeprom_acquire_semaphore(il); in il_eeprom_init()
729 ret = -ENOENT; in il_eeprom_init()
758 il->ops->eeprom_release_semaphore(il); in il_eeprom_init()
772 kfree(il->eeprom); in il_eeprom_free()
773 il->eeprom = NULL; in il_eeprom_free()
783 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; in il_init_band_reference()
840 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
843 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
858 return -1; in il_mod_ht40_chan_info()
861 " Ad-Hoc %ssupported\n", ch_info->channel, in il_mod_ht40_chan_info()
865 CHECK_AND_PRINT(DFS), eeprom_ch->flags, in il_mod_ht40_chan_info()
866 eeprom_ch->max_power_avg, in il_mod_ht40_chan_info()
867 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && in il_mod_ht40_chan_info()
868 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); in il_mod_ht40_chan_info()
870 ch_info->ht40_eeprom = *eeprom_ch; in il_mod_ht40_chan_info()
871 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; in il_mod_ht40_chan_info()
872 ch_info->ht40_flags = eeprom_ch->flags; in il_mod_ht40_chan_info()
873 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) in il_mod_ht40_chan_info()
874 ch_info->ht40_extension_channel &= in il_mod_ht40_chan_info()
884 * il_init_channel_map - Set up driver's info for all possible channels
895 if (il->channel_count) { in il_init_channel_map()
902 il->channel_count = in il_init_channel_map()
907 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); in il_init_channel_map()
909 il->channel_info = in il_init_channel_map()
910 kcalloc(il->channel_count, sizeof(struct il_channel_info), in il_init_channel_map()
912 if (!il->channel_info) { in il_init_channel_map()
914 il->channel_count = 0; in il_init_channel_map()
915 return -ENOMEM; in il_init_channel_map()
918 ch_info = il->channel_info; in il_init_channel_map()
930 ch_info->channel = eeprom_ch_idx[ch]; in il_init_channel_map()
931 ch_info->band = in il_init_channel_map()
937 ch_info->eeprom = eeprom_ch_info[ch]; in il_init_channel_map()
939 /* Copy the run-time flags so they are there even on in il_init_channel_map()
941 ch_info->flags = eeprom_ch_info[ch].flags; in il_init_channel_map()
944 ch_info->ht40_extension_channel = in il_init_channel_map()
948 D_EEPROM("Ch. %d Flags %x [%sGHz] - " in il_init_channel_map()
949 "No traffic\n", ch_info->channel, in il_init_channel_map()
950 ch_info->flags, in il_init_channel_map()
957 /* Initialize regulatory-based run-time data */ in il_init_channel_map()
958 ch_info->max_power_avg = ch_info->curr_txpow = in il_init_channel_map()
960 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; in il_init_channel_map()
961 ch_info->min_power = 0; in il_init_channel_map()
964 " Ad-Hoc %ssupported\n", ch_info->channel, in il_init_channel_map()
985 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && in il_init_channel_map()
986 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) in il_init_channel_map()
1020 * il_free_channel_map - undo allocations in il_init_channel_map
1025 kfree(il->channel_info); in il_free_channel_map()
1026 il->channel_count = 0; in il_free_channel_map()
1031 * il_get_channel_info - Find driver's ilate channel info
1043 for (i = 14; i < il->channel_count; i++) { in il_get_channel_info()
1044 if (il->channel_info[i].channel == channel) in il_get_channel_info()
1045 return &il->channel_info[i]; in il_get_channel_info()
1050 return &il->channel_info[channel - 1]; in il_get_channel_info()
1089 if (il->power_data.pci_pm) in il_build_powertable_cmd()
1090 cmd->flags |= IL_POWER_PCI_PM_MSK; in il_build_powertable_cmd()
1093 if (il->power_data.ps_disabled) in il_build_powertable_cmd()
1096 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; in il_build_powertable_cmd()
1097 cmd->keep_alive_seconds = 0; in il_build_powertable_cmd()
1098 cmd->debug_flags = 0; in il_build_powertable_cmd()
1099 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); in il_build_powertable_cmd()
1100 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); in il_build_powertable_cmd()
1101 cmd->keep_alive_beacons = 0; in il_build_powertable_cmd()
1103 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; in il_build_powertable_cmd()
1106 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); in il_build_powertable_cmd()
1109 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); in il_build_powertable_cmd()
1112 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); in il_build_powertable_cmd()
1124 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; in il_build_powertable_cmd()
1131 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; in il_build_powertable_cmd()
1134 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; in il_build_powertable_cmd()
1138 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) in il_build_powertable_cmd()
1139 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); in il_build_powertable_cmd()
1146 D_POWER("Flags value = 0x%08X\n", cmd->flags); in il_set_power()
1147 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); in il_set_power()
1148 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); in il_set_power()
1150 le32_to_cpu(cmd->sleep_interval[0]), in il_set_power()
1151 le32_to_cpu(cmd->sleep_interval[1]), in il_set_power()
1152 le32_to_cpu(cmd->sleep_interval[2]), in il_set_power()
1153 le32_to_cpu(cmd->sleep_interval[3]), in il_set_power()
1154 le32_to_cpu(cmd->sleep_interval[4])); in il_set_power()
1166 lockdep_assert_held(&il->mutex); in il_power_set_mode()
1169 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || in il_power_set_mode()
1170 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; in il_power_set_mode()
1172 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) in il_power_set_mode()
1176 return -EIO; in il_power_set_mode()
1179 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); in il_power_set_mode()
1180 if (test_bit(S_SCANNING, &il->status) && !force) { in il_power_set_mode()
1185 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) in il_power_set_mode()
1186 set_bit(S_POWER_PMI, &il->status); in il_power_set_mode()
1190 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) in il_power_set_mode()
1191 clear_bit(S_POWER_PMI, &il->status); in il_power_set_mode()
1193 if (il->ops->update_chain_flags && update_chains) in il_power_set_mode()
1194 il->ops->update_chain_flags(il); in il_power_set_mode()
1195 else if (il->ops->update_chain_flags) in il_power_set_mode()
1198 il->chain_noise_data.state); in il_power_set_mode()
1200 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); in il_power_set_mode()
1224 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); in il_power_initialize()
1225 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); in il_power_initialize()
1227 il->power_data.debug_sleep_level_override = -1; in il_power_initialize()
1229 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); in il_power_initialize()
1263 if (!test_bit(S_READY, &il->status) || in il_send_scan_abort()
1264 !test_bit(S_GEO_CONFIGURED, &il->status) || in il_send_scan_abort()
1265 !test_bit(S_SCAN_HW, &il->status) || in il_send_scan_abort()
1266 test_bit(S_FW_ERROR, &il->status) || in il_send_scan_abort()
1267 test_bit(S_EXIT_PENDING, &il->status)) in il_send_scan_abort()
1268 return -EIO; in il_send_scan_abort()
1275 if (pkt->u.status != CAN_ABORT_STATUS) { in il_send_scan_abort()
1282 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); in il_send_scan_abort()
1283 ret = -EIO; in il_send_scan_abort()
1298 if (il->scan_request) { in il_complete_scan()
1300 ieee80211_scan_completed(il->hw, &info); in il_complete_scan()
1303 il->scan_vif = NULL; in il_complete_scan()
1304 il->scan_request = NULL; in il_complete_scan()
1310 lockdep_assert_held(&il->mutex); in il_force_scan_end()
1312 if (!test_bit(S_SCANNING, &il->status)) { in il_force_scan_end()
1318 clear_bit(S_SCANNING, &il->status); in il_force_scan_end()
1319 clear_bit(S_SCAN_HW, &il->status); in il_force_scan_end()
1320 clear_bit(S_SCAN_ABORTING, &il->status); in il_force_scan_end()
1329 lockdep_assert_held(&il->mutex); in il_do_scan_abort()
1331 if (!test_bit(S_SCANNING, &il->status)) { in il_do_scan_abort()
1336 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { in il_do_scan_abort()
1350 * il_scan_cancel - Cancel any currently executing HW scan
1356 queue_work(il->workqueue, &il->abort_scan); in il_scan_cancel()
1362 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1371 lockdep_assert_held(&il->mutex); in il_scan_cancel_timeout()
1378 if (!test_bit(S_SCAN_HW, &il->status)) in il_scan_cancel_timeout()
1383 return test_bit(S_SCAN_HW, &il->status); in il_scan_cancel_timeout()
1394 (struct il_scanreq_notification *)pkt->u.raw; in il_hdl_scan()
1396 D_SCAN("Scan request status = 0x%x\n", notif->status); in il_hdl_scan()
1406 (struct il_scanstart_notification *)pkt->u.raw; in il_hdl_scan_start()
1407 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); in il_hdl_scan_start()
1409 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, in il_hdl_scan_start()
1410 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), in il_hdl_scan_start()
1411 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); in il_hdl_scan_start()
1421 (struct il_scanresults_notification *)pkt->u.raw; in il_hdl_scan_results()
1423 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " in il_hdl_scan_results()
1424 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", in il_hdl_scan_results()
1425 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), in il_hdl_scan_results()
1426 le32_to_cpu(notif->stats[0]), in il_hdl_scan_results()
1427 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); in il_hdl_scan_results()
1437 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; in il_hdl_scan_complete()
1439 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", in il_hdl_scan_complete()
1440 scan_notif->scanned_channels, scan_notif->tsf_low, in il_hdl_scan_complete()
1441 scan_notif->tsf_high, scan_notif->status); in il_hdl_scan_complete()
1444 clear_bit(S_SCAN_HW, &il->status); in il_hdl_scan_complete()
1447 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", in il_hdl_scan_complete()
1448 jiffies_to_msecs(jiffies - il->scan_start)); in il_hdl_scan_complete()
1450 queue_work(il->workqueue, &il->scan_completed); in il_hdl_scan_complete()
1457 il->handlers[C_SCAN] = il_hdl_scan; in il_setup_rx_scan_handlers()
1458 il->handlers[N_SCAN_START] = il_hdl_scan_start; in il_setup_rx_scan_handlers()
1459 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; in il_setup_rx_scan_handlers()
1460 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; in il_setup_rx_scan_handlers()
1495 value = il->vif ? il->vif->bss_conf.beacon_int : 0; in il_get_passive_dwell_time()
1498 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; in il_get_passive_dwell_time()
1509 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; in il_init_scan_params()
1510 if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) in il_init_scan_params()
1511 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; in il_init_scan_params()
1512 if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) in il_init_scan_params()
1513 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; in il_init_scan_params()
1522 lockdep_assert_held(&il->mutex); in il_scan_initiate()
1524 cancel_delayed_work(&il->scan_check); in il_scan_initiate()
1528 return -EIO; in il_scan_initiate()
1531 if (test_bit(S_SCAN_HW, &il->status)) { in il_scan_initiate()
1533 return -EBUSY; in il_scan_initiate()
1536 if (test_bit(S_SCAN_ABORTING, &il->status)) { in il_scan_initiate()
1538 return -EBUSY; in il_scan_initiate()
1543 set_bit(S_SCANNING, &il->status); in il_scan_initiate()
1544 il->scan_start = jiffies; in il_scan_initiate()
1546 ret = il->ops->request_scan(il, vif); in il_scan_initiate()
1548 clear_bit(S_SCANNING, &il->status); in il_scan_initiate()
1552 queue_delayed_work(il->workqueue, &il->scan_check, in il_scan_initiate()
1562 struct cfg80211_scan_request *req = &hw_req->req; in il_mac_hw_scan()
1563 struct il_priv *il = hw->priv; in il_mac_hw_scan()
1566 if (req->n_channels == 0) { in il_mac_hw_scan()
1568 return -EINVAL; in il_mac_hw_scan()
1571 mutex_lock(&il->mutex); in il_mac_hw_scan()
1574 if (test_bit(S_SCANNING, &il->status)) { in il_mac_hw_scan()
1576 ret = -EAGAIN; in il_mac_hw_scan()
1581 il->scan_request = req; in il_mac_hw_scan()
1582 il->scan_vif = vif; in il_mac_hw_scan()
1583 il->scan_band = req->channels[0]->band; in il_mac_hw_scan()
1589 mutex_unlock(&il->mutex); in il_mac_hw_scan()
1606 mutex_lock(&il->mutex); in il_bg_scan_check()
1608 mutex_unlock(&il->mutex); in il_bg_scan_check()
1612 * il_fill_probe_req - fill in all required fields and IE for probe request
1623 left -= 24; in il_fill_probe_req()
1627 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); in il_fill_probe_req()
1628 eth_broadcast_addr(frame->da); in il_fill_probe_req()
1629 memcpy(frame->sa, ta, ETH_ALEN); in il_fill_probe_req()
1630 eth_broadcast_addr(frame->bssid); in il_fill_probe_req()
1631 frame->seq_ctrl = 0; in il_fill_probe_req()
1636 pos = &frame->u.probe_req.variable[0]; in il_fill_probe_req()
1639 left -= 2; in il_fill_probe_req()
1668 mutex_lock(&il->mutex); in il_bg_abort_scan()
1670 mutex_unlock(&il->mutex); in il_bg_abort_scan()
1681 cancel_delayed_work(&il->scan_check); in il_bg_scan_completed()
1683 mutex_lock(&il->mutex); in il_bg_scan_completed()
1685 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); in il_bg_scan_completed()
1689 if (!test_and_clear_bit(S_SCANNING, &il->status)) { in il_bg_scan_completed()
1705 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); in il_bg_scan_completed()
1706 il_set_tx_power(il, il->tx_power_next, false); in il_bg_scan_completed()
1708 il->ops->post_scan(il); in il_bg_scan_completed()
1711 mutex_unlock(&il->mutex); in il_bg_scan_completed()
1717 INIT_WORK(&il->scan_completed, il_bg_scan_completed); in il_setup_scan_deferred_work()
1718 INIT_WORK(&il->abort_scan, il_bg_abort_scan); in il_setup_scan_deferred_work()
1719 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); in il_setup_scan_deferred_work()
1726 cancel_work_sync(&il->abort_scan); in il_cancel_scan_deferred_work()
1727 cancel_work_sync(&il->scan_completed); in il_cancel_scan_deferred_work()
1729 if (cancel_delayed_work_sync(&il->scan_check)) { in il_cancel_scan_deferred_work()
1730 mutex_lock(&il->mutex); in il_cancel_scan_deferred_work()
1732 mutex_unlock(&il->mutex); in il_cancel_scan_deferred_work()
1737 /* il->sta_lock must be held */
1742 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) in il_sta_ucode_activate()
1744 sta_id, il->stations[sta_id].sta.sta.addr); in il_sta_ucode_activate()
1746 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { in il_sta_ucode_activate()
1749 il->stations[sta_id].sta.sta.addr); in il_sta_ucode_activate()
1751 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; in il_sta_ucode_activate()
1753 il->stations[sta_id].sta.sta.addr); in il_sta_ucode_activate()
1761 u8 sta_id = addsta->sta.sta_id; in il_process_add_sta_resp()
1763 int ret = -EIO; in il_process_add_sta_resp()
1765 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { in il_process_add_sta_resp()
1766 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); in il_process_add_sta_resp()
1772 spin_lock_irqsave(&il->sta_lock, flags); in il_process_add_sta_resp()
1774 switch (pkt->u.add_sta.status) { in il_process_add_sta_resp()
1788 IL_ERR("Attempting to modify non-existing station %d\n", in il_process_add_sta_resp()
1792 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); in il_process_add_sta_resp()
1797 il->stations[sta_id].sta.mode == in il_process_add_sta_resp()
1799 il->stations[sta_id].sta.sta.addr); in il_process_add_sta_resp()
1810 il->stations[sta_id].sta.mode == in il_process_add_sta_resp()
1811 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); in il_process_add_sta_resp()
1812 spin_unlock_irqrestore(&il->sta_lock, flags); in il_process_add_sta_resp()
1821 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; in il_add_sta_callback()
1838 u8 sta_id __maybe_unused = sta->sta.sta_id; in il_send_add_sta()
1840 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, in il_send_add_sta()
1850 cmd.len = il->ops->build_addsta_hcmd(sta, data); in il_send_add_sta()
1869 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap; in il_set_ht_add_station()
1872 if (!sta || !sta_ht_inf->ht_supported) in il_set_ht_add_station()
1876 (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" : in il_set_ht_add_station()
1877 (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : in il_set_ht_add_station()
1880 sta_flags = il->stations[idx].sta.station_flags; in il_set_ht_add_station()
1884 switch (sta->deflink.smps_mode) { in il_set_ht_add_station()
1894 IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode); in il_set_ht_add_station()
1899 cpu_to_le32((u32) sta_ht_inf-> in il_set_ht_add_station()
1903 cpu_to_le32((u32) sta_ht_inf-> in il_set_ht_add_station()
1906 if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap)) in il_set_ht_add_station()
1911 il->stations[idx].sta.station_flags = sta_flags; in il_set_ht_add_station()
1917 * il_prep_station - Prepare station information for addition
1933 sta_id = il->hw_params.bcast_id; in il_prep_station()
1935 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { in il_prep_station()
1936 if (ether_addr_equal(il->stations[i].sta.sta.addr, in il_prep_station()
1942 if (!il->stations[i].used && in il_prep_station()
1959 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { in il_prep_station()
1964 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && in il_prep_station()
1965 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && in il_prep_station()
1966 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { in il_prep_station()
1972 station = &il->stations[sta_id]; in il_prep_station()
1973 station->used = IL_STA_DRIVER_ACTIVE; in il_prep_station()
1975 il->num_stations++; in il_prep_station()
1978 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); in il_prep_station()
1979 memcpy(station->sta.sta.addr, addr, ETH_ALEN); in il_prep_station()
1980 station->sta.mode = 0; in il_prep_station()
1981 station->sta.sta.sta_id = sta_id; in il_prep_station()
1982 station->sta.station_flags = 0; in il_prep_station()
1992 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; in il_prep_station()
1994 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); in il_prep_station()
2004 * il_add_station_common -
2016 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_add_station_common()
2020 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_add_station_common()
2021 return -EINVAL; in il_add_station_common()
2029 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { in il_add_station_common()
2031 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_add_station_common()
2032 return -EEXIST; in il_add_station_common()
2035 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && in il_add_station_common()
2036 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { in il_add_station_common()
2039 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_add_station_common()
2040 return -EEXIST; in il_add_station_common()
2043 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; in il_add_station_common()
2044 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il_add_station_common()
2046 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_add_station_common()
2051 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_add_station_common()
2053 il->stations[sta_id].sta.sta.addr); in il_add_station_common()
2054 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; in il_add_station_common()
2055 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; in il_add_station_common()
2056 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_add_station_common()
2064 * il_sta_ucode_deactivate - deactivate ucode status for a station
2066 * il->sta_lock must be held
2072 if ((il->stations[sta_id]. in il_sta_ucode_deactivate()
2077 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; in il_sta_ucode_deactivate()
2079 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); in il_sta_ucode_deactivate()
2112 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { in il_send_remove_station()
2113 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); in il_send_remove_station()
2114 ret = -EIO; in il_send_remove_station()
2118 switch (pkt->u.rem_sta.status) { in il_send_remove_station()
2121 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_send_remove_station()
2123 spin_unlock_irqrestore(&il->sta_lock, in il_send_remove_station()
2129 ret = -EIO; in il_send_remove_station()
2140 * il_remove_station - Remove driver's knowledge of station.
2161 return -EINVAL; in il_remove_station()
2163 spin_lock_irqsave(&il->sta_lock, flags); in il_remove_station()
2165 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { in il_remove_station()
2170 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { in il_remove_station()
2175 if (il->stations[sta_id].used & IL_STA_LOCAL) { in il_remove_station()
2176 kfree(il->stations[sta_id].lq); in il_remove_station()
2177 il->stations[sta_id].lq = NULL; in il_remove_station()
2180 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; in il_remove_station()
2182 il->num_stations--; in il_remove_station()
2184 BUG_ON(il->num_stations < 0); in il_remove_station()
2186 spin_unlock_irqrestore(&il->sta_lock, flags); in il_remove_station()
2190 spin_unlock_irqrestore(&il->sta_lock, flags); in il_remove_station()
2191 return -EINVAL; in il_remove_station()
2196 * il_clear_ucode_stations - clear ucode station table bits
2212 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_clear_ucode_stations()
2213 for (i = 0; i < il->hw_params.max_stations; i++) { in il_clear_ucode_stations()
2214 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { in il_clear_ucode_stations()
2216 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; in il_clear_ucode_stations()
2220 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_clear_ucode_stations()
2228 * il_restore_stations() - Restore driver known stations to device
2252 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_restore_stations()
2253 for (i = 0; i < il->hw_params.max_stations; i++) { in il_restore_stations()
2254 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && in il_restore_stations()
2255 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { in il_restore_stations()
2257 il->stations[i].sta.sta.addr); in il_restore_stations()
2258 il->stations[i].sta.mode = 0; in il_restore_stations()
2259 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; in il_restore_stations()
2264 for (i = 0; i < il->hw_params.max_stations; i++) { in il_restore_stations()
2265 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { in il_restore_stations()
2266 memcpy(&sta_cmd, &il->stations[i].sta, in il_restore_stations()
2269 if (il->stations[i].lq) { in il_restore_stations()
2270 memcpy(&lq, il->stations[i].lq, in il_restore_stations()
2274 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_restore_stations()
2277 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_restore_stations()
2279 il->stations[i].sta.sta.addr); in il_restore_stations()
2280 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; in il_restore_stations()
2281 il->stations[i].used &= in il_restore_stations()
2283 spin_unlock_irqrestore(&il->sta_lock, in il_restore_stations()
2292 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_restore_stations()
2293 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; in il_restore_stations()
2297 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_restore_stations()
2311 for (i = 0; i < il->sta_key_max_num; i++) in il_get_free_ucode_key_idx()
2312 if (!test_and_set_bit(i, &il->ucode_key_table)) in il_get_free_ucode_key_idx()
2325 spin_lock_irqsave(&il->sta_lock, flags); in il_dealloc_bcast_stations()
2326 for (i = 0; i < il->hw_params.max_stations; i++) { in il_dealloc_bcast_stations()
2327 if (!(il->stations[i].used & IL_STA_BCAST)) in il_dealloc_bcast_stations()
2330 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; in il_dealloc_bcast_stations()
2331 il->num_stations--; in il_dealloc_bcast_stations()
2332 BUG_ON(il->num_stations < 0); in il_dealloc_bcast_stations()
2333 kfree(il->stations[i].lq); in il_dealloc_bcast_stations()
2334 il->stations[i].lq = NULL; in il_dealloc_bcast_stations()
2336 spin_unlock_irqrestore(&il->sta_lock, flags); in il_dealloc_bcast_stations()
2345 D_RATE("lq station id 0x%x\n", lq->sta_id); in il_dump_lq_cmd()
2346 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, in il_dump_lq_cmd()
2347 lq->general_params.dual_stream_ant_msk); in il_dump_lq_cmd()
2350 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); in il_dump_lq_cmd()
2360 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2375 if (il->ht.enabled) in il_is_lq_table_valid()
2378 D_INFO("Channel %u is not an HT channel\n", il->active.channel); in il_is_lq_table_valid()
2380 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { in il_is_lq_table_valid()
2389 * il_send_lq_cmd() - Send link quality command
2412 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) in il_send_lq_cmd()
2413 return -EINVAL; in il_send_lq_cmd()
2415 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_send_lq_cmd()
2416 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { in il_send_lq_cmd()
2417 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_send_lq_cmd()
2418 return -EINVAL; in il_send_lq_cmd()
2420 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_send_lq_cmd()
2428 ret = -EINVAL; in il_send_lq_cmd()
2436 lq->sta_id); in il_send_lq_cmd()
2437 spin_lock_irqsave(&il->sta_lock, flags_spin); in il_send_lq_cmd()
2438 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; in il_send_lq_cmd()
2439 spin_unlock_irqrestore(&il->sta_lock, flags_spin); in il_send_lq_cmd()
2449 struct il_priv *il = hw->priv; in il_mac_sta_remove()
2450 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; in il_mac_sta_remove()
2453 mutex_lock(&il->mutex); in il_mac_sta_remove()
2454 D_MAC80211("enter station %pM\n", sta->addr); in il_mac_sta_remove()
2456 ret = il_remove_station(il, sta_common->sta_id, sta->addr); in il_mac_sta_remove()
2458 IL_ERR("Error removing station %pM\n", sta->addr); in il_mac_sta_remove()
2461 mutex_unlock(&il->mutex); in il_mac_sta_remove()
2467 /************************** RX-FUNCTIONS ****************************/
2481 * to -- the driver can read up to (but not including) this position and get
2485 * The WRITE idx maps to the last position the driver has read from -- the
2488 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2492 * IDX position, and WRITE to the last (READ - 1 wrapped)
2500 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2501 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2502 * to replenish the iwl->rxq->rx_free.
2504 * iwl->rxq is replenished and the READ IDX is updated (updating the
2507 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2508 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2509 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2510 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2524 * -- enable interrupts --
2525 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2535 * il_rx_queue_space - Return number of free slots available in queue.
2540 int s = q->read - q->write; in il_rx_queue_space()
2544 s -= 2; in il_rx_queue_space()
2552 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2558 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; in il_rx_queue_update_write_ptr()
2561 spin_lock_irqsave(&q->lock, flags); in il_rx_queue_update_write_ptr()
2563 if (q->need_update == 0) in il_rx_queue_update_write_ptr()
2566 /* If power-saving is in use, make sure device is awake */ in il_rx_queue_update_write_ptr()
2567 if (test_bit(S_POWER_PMI, &il->status)) { in il_rx_queue_update_write_ptr()
2578 q->write_actual = (q->write & ~0x7); in il_rx_queue_update_write_ptr()
2579 il_wr(il, rx_wrt_ptr_reg, q->write_actual); in il_rx_queue_update_write_ptr()
2584 q->write_actual = (q->write & ~0x7); in il_rx_queue_update_write_ptr()
2585 il_wr(il, rx_wrt_ptr_reg, q->write_actual); in il_rx_queue_update_write_ptr()
2588 q->need_update = 0; in il_rx_queue_update_write_ptr()
2591 spin_unlock_irqrestore(&q->lock, flags); in il_rx_queue_update_write_ptr()
2598 struct il_rx_queue *rxq = &il->rxq; in il_rx_queue_alloc()
2599 struct device *dev = &il->pci_dev->dev; in il_rx_queue_alloc()
2602 spin_lock_init(&rxq->lock); in il_rx_queue_alloc()
2603 INIT_LIST_HEAD(&rxq->rx_free); in il_rx_queue_alloc()
2604 INIT_LIST_HEAD(&rxq->rx_used); in il_rx_queue_alloc()
2607 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, in il_rx_queue_alloc()
2609 if (!rxq->bd) in il_rx_queue_alloc()
2612 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), in il_rx_queue_alloc()
2613 &rxq->rb_stts_dma, GFP_KERNEL); in il_rx_queue_alloc()
2614 if (!rxq->rb_stts) in il_rx_queue_alloc()
2619 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il_rx_queue_alloc()
2623 rxq->read = rxq->write = 0; in il_rx_queue_alloc()
2624 rxq->write_actual = 0; in il_rx_queue_alloc()
2625 rxq->free_count = 0; in il_rx_queue_alloc()
2626 rxq->need_update = 0; in il_rx_queue_alloc()
2630 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, in il_rx_queue_alloc()
2631 rxq->bd_dma); in il_rx_queue_alloc()
2633 return -ENOMEM; in il_rx_queue_alloc()
2641 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); in il_hdl_spectrum_measurement()
2643 if (!report->state) { in il_hdl_spectrum_measurement()
2648 memcpy(&il->measure_report, report, sizeof(*report)); in il_hdl_spectrum_measurement()
2649 il->measurement_status |= MEASUREMENT_READY; in il_hdl_spectrum_measurement()
2654 * returns non-zero if packet should be dropped
2660 u16 fc = le16_to_cpu(hdr->frame_control); in il_set_decrypted_flag()
2666 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) in il_set_decrypted_flag()
2688 return -1; in il_set_decrypted_flag()
2695 stats->flag |= RX_FLAG_DECRYPTED; in il_set_decrypted_flag()
2707 * il_txq_update_write_ptr - Send new write idx to hardware
2713 int txq_id = txq->q.id; in il_txq_update_write_ptr()
2715 if (txq->need_update == 0) in il_txq_update_write_ptr()
2719 if (test_bit(S_POWER_PMI, &il->status)) { in il_txq_update_write_ptr()
2733 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2736 * else not in power-save mode, in il_txq_update_write_ptr()
2741 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr()
2742 txq->need_update = 0; in il_txq_update_write_ptr()
2747 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2752 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap()
2753 struct il_queue *q = &txq->q; in il_tx_queue_unmap()
2755 if (q->n_bd == 0) in il_tx_queue_unmap()
2758 while (q->write_ptr != q->read_ptr) { in il_tx_queue_unmap()
2759 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap()
2760 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); in il_tx_queue_unmap()
2766 * il_tx_queue_free - Deallocate DMA queue.
2771 * 0-fill, but do not free "txq" descriptor structure.
2776 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_free()
2777 struct device *dev = &il->pci_dev->dev; in il_tx_queue_free()
2782 /* De-alloc array of command/tx buffers */ in il_tx_queue_free()
2783 if (txq->cmd) { in il_tx_queue_free()
2785 kfree(txq->cmd[i]); in il_tx_queue_free()
2788 /* De-alloc circular buffer of TFDs */ in il_tx_queue_free()
2789 if (txq->q.n_bd) in il_tx_queue_free()
2790 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, in il_tx_queue_free()
2791 txq->tfds, txq->q.dma_addr); in il_tx_queue_free()
2793 /* De-alloc array of per-TFD driver data */ in il_tx_queue_free()
2794 kfree(txq->skbs); in il_tx_queue_free()
2795 txq->skbs = NULL; in il_tx_queue_free()
2798 kfree(txq->cmd); in il_tx_queue_free()
2799 kfree(txq->meta); in il_tx_queue_free()
2800 txq->cmd = NULL; in il_tx_queue_free()
2801 txq->meta = NULL; in il_tx_queue_free()
2803 /* 0-fill queue descriptor structure */ in il_tx_queue_free()
2809 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2814 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; in il_cmd_queue_unmap()
2815 struct il_queue *q = &txq->q; in il_cmd_queue_unmap()
2818 if (q->n_bd == 0) in il_cmd_queue_unmap()
2821 while (q->read_ptr != q->write_ptr) { in il_cmd_queue_unmap()
2822 i = il_get_cmd_idx(q, q->read_ptr, 0); in il_cmd_queue_unmap()
2824 if (txq->meta[i].flags & CMD_MAPPED) { in il_cmd_queue_unmap()
2825 dma_unmap_single(&il->pci_dev->dev, in il_cmd_queue_unmap()
2826 dma_unmap_addr(&txq->meta[i], mapping), in il_cmd_queue_unmap()
2827 dma_unmap_len(&txq->meta[i], len), in il_cmd_queue_unmap()
2829 txq->meta[i].flags = 0; in il_cmd_queue_unmap()
2832 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); in il_cmd_queue_unmap()
2835 i = q->n_win; in il_cmd_queue_unmap()
2836 if (txq->meta[i].flags & CMD_MAPPED) { in il_cmd_queue_unmap()
2837 dma_unmap_single(&il->pci_dev->dev, in il_cmd_queue_unmap()
2838 dma_unmap_addr(&txq->meta[i], mapping), in il_cmd_queue_unmap()
2839 dma_unmap_len(&txq->meta[i], len), in il_cmd_queue_unmap()
2841 txq->meta[i].flags = 0; in il_cmd_queue_unmap()
2847 * il_cmd_queue_free - Deallocate DMA queue.
2851 * 0-fill, but do not free "txq" descriptor structure.
2856 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; in il_cmd_queue_free()
2857 struct device *dev = &il->pci_dev->dev; in il_cmd_queue_free()
2862 /* De-alloc array of command/tx buffers */ in il_cmd_queue_free()
2863 if (txq->cmd) { in il_cmd_queue_free()
2865 kfree(txq->cmd[i]); in il_cmd_queue_free()
2868 /* De-alloc circular buffer of TFDs */ in il_cmd_queue_free()
2869 if (txq->q.n_bd) in il_cmd_queue_free()
2870 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, in il_cmd_queue_free()
2871 txq->tfds, txq->q.dma_addr); in il_cmd_queue_free()
2874 kfree(txq->cmd); in il_cmd_queue_free()
2875 kfree(txq->meta); in il_cmd_queue_free()
2876 txq->cmd = NULL; in il_cmd_queue_free()
2877 txq->meta = NULL; in il_cmd_queue_free()
2879 /* 0-fill queue descriptor structure */ in il_cmd_queue_free()
2884 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2910 int s = q->read_ptr - q->write_ptr; in il_queue_space()
2912 if (q->read_ptr > q->write_ptr) in il_queue_space()
2913 s -= q->n_bd; in il_queue_space()
2916 s += q->n_win; in il_queue_space()
2918 s -= 2; in il_queue_space()
2927 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2933 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise in il_queue_init()
2936 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); in il_queue_init()
2937 /* FIXME: remove q->n_bd */ in il_queue_init()
2938 q->n_bd = TFD_QUEUE_SIZE_MAX; in il_queue_init()
2940 q->n_win = slots; in il_queue_init()
2941 q->id = id; in il_queue_init()
2943 /* slots_must be power-of-two size, otherwise in il_queue_init()
2947 q->low_mark = q->n_win / 4; in il_queue_init()
2948 if (q->low_mark < 4) in il_queue_init()
2949 q->low_mark = 4; in il_queue_init()
2951 q->high_mark = q->n_win / 8; in il_queue_init()
2952 if (q->high_mark < 2) in il_queue_init()
2953 q->high_mark = 2; in il_queue_init()
2955 q->write_ptr = q->read_ptr = 0; in il_queue_init()
2961 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2966 struct device *dev = &il->pci_dev->dev; in il_tx_queue_alloc()
2967 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; in il_tx_queue_alloc()
2971 if (id != il->cmd_queue) { in il_tx_queue_alloc()
2972 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, in il_tx_queue_alloc()
2975 if (!txq->skbs) { in il_tx_queue_alloc()
2980 txq->skbs = NULL; in il_tx_queue_alloc()
2984 txq->tfds = in il_tx_queue_alloc()
2985 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); in il_tx_queue_alloc()
2986 if (!txq->tfds) in il_tx_queue_alloc()
2989 txq->q.id = id; in il_tx_queue_alloc()
2994 kfree(txq->skbs); in il_tx_queue_alloc()
2995 txq->skbs = NULL; in il_tx_queue_alloc()
2997 return -ENOMEM; in il_tx_queue_alloc()
3001 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
3008 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_init()
3015 * For normal Tx queues (all other queues), no super-size command in il_tx_queue_init()
3018 if (txq_id == il->cmd_queue) { in il_tx_queue_init()
3026 txq->meta = in il_tx_queue_init()
3028 txq->cmd = in il_tx_queue_init()
3031 if (!txq->meta || !txq->cmd) in il_tx_queue_init()
3040 txq->cmd[i] = kmalloc(len, GFP_KERNEL); in il_tx_queue_init()
3041 if (!txq->cmd[i]) in il_tx_queue_init()
3050 txq->need_update = 0; in il_tx_queue_init()
3053 * For the default queues 0-3, set up the swq_id in il_tx_queue_init()
3054 * already -- all others need to get one later in il_tx_queue_init()
3060 /* Initialize queue's high/low-water marks, and head/tail idxes */ in il_tx_queue_init()
3061 il_queue_init(il, &txq->q, slots, txq_id); in il_tx_queue_init()
3064 il->ops->txq_init(il, txq); in il_tx_queue_init()
3069 kfree(txq->cmd[i]); in il_tx_queue_init()
3071 kfree(txq->meta); in il_tx_queue_init()
3072 txq->meta = NULL; in il_tx_queue_init()
3073 kfree(txq->cmd); in il_tx_queue_init()
3074 txq->cmd = NULL; in il_tx_queue_init()
3076 return -ENOMEM; in il_tx_queue_init()
3084 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_reset()
3086 if (txq_id == il->cmd_queue) { in il_tx_queue_reset()
3094 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); in il_tx_queue_reset()
3095 txq->need_update = 0; in il_tx_queue_reset()
3097 /* Initialize queue's high/low-water marks, and head/tail idxes */ in il_tx_queue_reset()
3098 il_queue_init(il, &txq->q, slots, txq_id); in il_tx_queue_reset()
3101 il->ops->txq_init(il, txq); in il_tx_queue_reset()
3108 * il_enqueue_hcmd - enqueue a uCode command
3119 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; in il_enqueue_hcmd()
3120 struct il_queue *q = &txq->q; in il_enqueue_hcmd()
3129 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); in il_enqueue_hcmd()
3130 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); in il_enqueue_hcmd()
3138 !(cmd->flags & CMD_SIZE_HUGE)); in il_enqueue_hcmd()
3142 IL_WARN("Not sending command - %s KILL\n", in il_enqueue_hcmd()
3144 return -EIO; in il_enqueue_hcmd()
3147 spin_lock_irqsave(&il->hcmd_lock, flags); in il_enqueue_hcmd()
3149 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { in il_enqueue_hcmd()
3150 spin_unlock_irqrestore(&il->hcmd_lock, flags); in il_enqueue_hcmd()
3153 queue_work(il->workqueue, &il->restart); in il_enqueue_hcmd()
3154 return -ENOSPC; in il_enqueue_hcmd()
3157 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); in il_enqueue_hcmd()
3158 out_cmd = txq->cmd[idx]; in il_enqueue_hcmd()
3159 out_meta = &txq->meta[idx]; in il_enqueue_hcmd()
3163 * we're using a larger payload buffer to avoid "field- in il_enqueue_hcmd()
3164 * spanning write" warnings at run-time for huge commands. in il_enqueue_hcmd()
3166 if (cmd->flags & CMD_SIZE_HUGE) in il_enqueue_hcmd()
3167 out_payload = ((struct il_device_cmd_huge *)out_cmd)->cmd.payload; in il_enqueue_hcmd()
3169 out_payload = out_cmd->cmd.payload; in il_enqueue_hcmd()
3171 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { in il_enqueue_hcmd()
3172 spin_unlock_irqrestore(&il->hcmd_lock, flags); in il_enqueue_hcmd()
3173 return -ENOSPC; in il_enqueue_hcmd()
3176 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ in il_enqueue_hcmd()
3177 out_meta->flags = cmd->flags | CMD_MAPPED; in il_enqueue_hcmd()
3178 if (cmd->flags & CMD_WANT_SKB) in il_enqueue_hcmd()
3179 out_meta->source = cmd; in il_enqueue_hcmd()
3180 if (cmd->flags & CMD_ASYNC) in il_enqueue_hcmd()
3181 out_meta->callback = cmd->callback; in il_enqueue_hcmd()
3183 out_cmd->hdr.cmd = cmd->id; in il_enqueue_hcmd()
3184 memcpy(out_payload, cmd->data, cmd->len); in il_enqueue_hcmd()
3189 out_cmd->hdr.flags = 0; in il_enqueue_hcmd()
3190 out_cmd->hdr.sequence = in il_enqueue_hcmd()
3191 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); in il_enqueue_hcmd()
3192 if (cmd->flags & CMD_SIZE_HUGE) in il_enqueue_hcmd()
3193 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; in il_enqueue_hcmd()
3196 switch (out_cmd->hdr.cmd) { in il_enqueue_hcmd()
3201 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, in il_enqueue_hcmd()
3202 le16_to_cpu(out_cmd->hdr.sequence), fix_size, in il_enqueue_hcmd()
3203 q->write_ptr, idx, il->cmd_queue); in il_enqueue_hcmd()
3208 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, in il_enqueue_hcmd()
3209 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, in il_enqueue_hcmd()
3210 idx, il->cmd_queue); in il_enqueue_hcmd()
3214 phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size, in il_enqueue_hcmd()
3216 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) { in il_enqueue_hcmd()
3217 idx = -ENOMEM; in il_enqueue_hcmd()
3223 txq->need_update = 1; in il_enqueue_hcmd()
3225 if (il->ops->txq_update_byte_cnt_tbl) in il_enqueue_hcmd()
3227 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); in il_enqueue_hcmd()
3229 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, in il_enqueue_hcmd()
3230 U32_PAD(cmd->len)); in il_enqueue_hcmd()
3233 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); in il_enqueue_hcmd()
3237 spin_unlock_irqrestore(&il->hcmd_lock, flags); in il_enqueue_hcmd()
3242 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3251 struct il_tx_queue *txq = &il->txq[txq_id]; in il_hcmd_queue_reclaim()
3252 struct il_queue *q = &txq->q; in il_hcmd_queue_reclaim()
3255 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { in il_hcmd_queue_reclaim()
3257 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, in il_hcmd_queue_reclaim()
3258 q->write_ptr, q->read_ptr); in il_hcmd_queue_reclaim()
3262 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; in il_hcmd_queue_reclaim()
3263 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { in il_hcmd_queue_reclaim()
3267 q->write_ptr, q->read_ptr); in il_hcmd_queue_reclaim()
3268 queue_work(il->workqueue, &il->restart); in il_hcmd_queue_reclaim()
3275 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3286 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in il_tx_cmd_complete()
3290 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); in il_tx_cmd_complete()
3293 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; in il_tx_cmd_complete()
3300 (txq_id != il->cmd_queue, in il_tx_cmd_complete()
3302 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, in il_tx_cmd_complete()
3303 il->txq[il->cmd_queue].q.write_ptr)) { in il_tx_cmd_complete()
3308 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); in il_tx_cmd_complete()
3309 cmd = txq->cmd[cmd_idx]; in il_tx_cmd_complete()
3310 meta = &txq->meta[cmd_idx]; in il_tx_cmd_complete()
3312 txq->time_stamp = jiffies; in il_tx_cmd_complete()
3314 dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping), in il_tx_cmd_complete()
3318 if (meta->flags & CMD_WANT_SKB) { in il_tx_cmd_complete()
3319 meta->source->reply_page = (unsigned long)rxb_addr(rxb); in il_tx_cmd_complete()
3320 rxb->page = NULL; in il_tx_cmd_complete()
3321 } else if (meta->callback) in il_tx_cmd_complete()
3322 meta->callback(il, cmd, pkt); in il_tx_cmd_complete()
3324 spin_lock_irqsave(&il->hcmd_lock, flags); in il_tx_cmd_complete()
3328 if (!(meta->flags & CMD_ASYNC)) { in il_tx_cmd_complete()
3329 clear_bit(S_HCMD_ACTIVE, &il->status); in il_tx_cmd_complete()
3331 il_get_cmd_string(cmd->hdr.cmd)); in il_tx_cmd_complete()
3332 wake_up(&il->wait_command_queue); in il_tx_cmd_complete()
3336 meta->flags = 0; in il_tx_cmd_complete()
3338 spin_unlock_irqrestore(&il->hcmd_lock, flags); in il_tx_cmd_complete()
3342 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3349 * every time the priority line is asserted (BT is sending signals on the
3355 * co-exist problem. The possible behaviors are:
3365 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3381 u8 rx_chains_num = il->hw_params.rx_chains_num; in il_init_ht_hw_capab()
3382 u8 tx_chains_num = il->hw_params.tx_chains_num; in il_init_ht_hw_capab()
3384 ht_info->cap = 0; in il_init_ht_hw_capab()
3385 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); in il_init_ht_hw_capab()
3387 ht_info->ht_supported = true; in il_init_ht_hw_capab()
3389 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; in il_init_ht_hw_capab()
3391 if (il->hw_params.ht40_channel & BIT(band)) { in il_init_ht_hw_capab()
3392 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; in il_init_ht_hw_capab()
3393 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; in il_init_ht_hw_capab()
3394 ht_info->mcs.rx_mask[4] = 0x01; in il_init_ht_hw_capab()
3398 if (il->cfg->mod_params->amsdu_size_8K) in il_init_ht_hw_capab()
3399 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; in il_init_ht_hw_capab()
3401 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; in il_init_ht_hw_capab()
3402 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; in il_init_ht_hw_capab()
3404 ht_info->mcs.rx_mask[0] = 0xFF; in il_init_ht_hw_capab()
3406 ht_info->mcs.rx_mask[1] = 0xFF; in il_init_ht_hw_capab()
3408 ht_info->mcs.rx_mask[2] = 0xFF; in il_init_ht_hw_capab()
3413 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); in il_init_ht_hw_capab()
3416 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; in il_init_ht_hw_capab()
3418 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; in il_init_ht_hw_capab()
3419 ht_info->mcs.tx_params |= in il_init_ht_hw_capab()
3420 ((tx_chains_num - in il_init_ht_hw_capab()
3426 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3439 if (il->bands[NL80211_BAND_2GHZ].n_bitrates || in il_init_geos()
3440 il->bands[NL80211_BAND_5GHZ].n_bitrates) { in il_init_geos()
3442 set_bit(S_GEO_CONFIGURED, &il->status); in il_init_geos()
3447 kcalloc(il->channel_count, sizeof(struct ieee80211_channel), in il_init_geos()
3450 return -ENOMEM; in il_init_geos()
3455 return -ENOMEM; in il_init_geos()
3459 sband = &il->bands[NL80211_BAND_5GHZ]; in il_init_geos()
3460 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; in il_init_geos()
3462 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; in il_init_geos()
3463 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; in il_init_geos()
3465 if (il->cfg->sku & IL_SKU_N) in il_init_geos()
3466 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); in il_init_geos()
3468 sband = &il->bands[NL80211_BAND_2GHZ]; in il_init_geos()
3469 sband->channels = channels; in il_init_geos()
3471 sband->bitrates = rates; in il_init_geos()
3472 sband->n_bitrates = RATE_COUNT_LEGACY; in il_init_geos()
3474 if (il->cfg->sku & IL_SKU_N) in il_init_geos()
3475 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); in il_init_geos()
3477 il->ieee_channels = channels; in il_init_geos()
3478 il->ieee_rates = rates; in il_init_geos()
3480 for (i = 0; i < il->channel_count; i++) { in il_init_geos()
3481 ch = &il->channel_info[i]; in il_init_geos()
3486 sband = &il->bands[ch->band]; in il_init_geos()
3488 geo_ch = &sband->channels[sband->n_channels++]; in il_init_geos()
3490 geo_ch->center_freq = in il_init_geos()
3491 ieee80211_channel_to_frequency(ch->channel, ch->band); in il_init_geos()
3492 geo_ch->max_power = ch->max_power_avg; in il_init_geos()
3493 geo_ch->max_antenna_gain = 0xff; in il_init_geos()
3494 geo_ch->hw_value = ch->channel; in il_init_geos()
3497 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) in il_init_geos()
3498 geo_ch->flags |= IEEE80211_CHAN_NO_IR; in il_init_geos()
3500 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) in il_init_geos()
3501 geo_ch->flags |= IEEE80211_CHAN_NO_IR; in il_init_geos()
3503 if (ch->flags & EEPROM_CHANNEL_RADAR) in il_init_geos()
3504 geo_ch->flags |= IEEE80211_CHAN_RADAR; in il_init_geos()
3506 geo_ch->flags |= ch->ht40_extension_channel; in il_init_geos()
3508 if (ch->max_power_avg > max_tx_power) in il_init_geos()
3509 max_tx_power = ch->max_power_avg; in il_init_geos()
3511 geo_ch->flags |= IEEE80211_CHAN_DISABLED; in il_init_geos()
3514 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, in il_init_geos()
3515 geo_ch->center_freq, in il_init_geos()
3517 geo_ch-> in il_init_geos()
3519 geo_ch->flags); in il_init_geos()
3522 il->tx_power_device_lmt = max_tx_power; in il_init_geos()
3523 il->tx_power_user_lmt = max_tx_power; in il_init_geos()
3524 il->tx_power_next = max_tx_power; in il_init_geos()
3526 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && in il_init_geos()
3527 (il->cfg->sku & IL_SKU_A)) { in il_init_geos()
3530 il->pci_dev->device, il->pci_dev->subsystem_device); in il_init_geos()
3531 il->cfg->sku &= ~IL_SKU_A; in il_init_geos()
3535 il->bands[NL80211_BAND_2GHZ].n_channels, in il_init_geos()
3536 il->bands[NL80211_BAND_5GHZ].n_channels); in il_init_geos()
3538 set_bit(S_GEO_CONFIGURED, &il->status); in il_init_geos()
3545 * il_free_geos - undo allocations in il_init_geos
3550 kfree(il->ieee_channels); in il_free_geos()
3551 kfree(il->ieee_rates); in il_free_geos()
3552 clear_bit(S_GEO_CONFIGURED, &il->status); in il_free_geos()
3567 return !(ch_info-> in il_is_channel_extension()
3570 return !(ch_info-> in il_is_channel_extension()
3579 if (!il->ht.enabled || !il->ht.is_40mhz) in il_is_ht40_tx_allowed()
3586 if (ht_cap && !ht_cap->ht_supported) in il_is_ht40_tx_allowed()
3590 if (il->disable_ht40) in il_is_ht40_tx_allowed()
3594 return il_is_channel_extension(il, il->band, in il_is_ht40_tx_allowed()
3595 le16_to_cpu(il->staging.channel), in il_is_ht40_tx_allowed()
3596 il->ht.extension_chan_offset); in il_is_ht40_tx_allowed()
3641 struct ieee80211_vif *vif = il->vif; in il_send_rxon_timing()
3643 conf = &il->hw->conf; in il_send_rxon_timing()
3645 lockdep_assert_held(&il->mutex); in il_send_rxon_timing()
3647 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); in il_send_rxon_timing()
3649 il->timing.timestamp = cpu_to_le64(il->timestamp); in il_send_rxon_timing()
3650 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); in il_send_rxon_timing()
3652 beacon_int = vif ? vif->bss_conf.beacon_int : 0; in il_send_rxon_timing()
3658 il->timing.atim_win = 0; in il_send_rxon_timing()
3662 il->hw_params.max_beacon_itrvl * in il_send_rxon_timing()
3664 il->timing.beacon_interval = cpu_to_le16(beacon_int); in il_send_rxon_timing()
3666 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ in il_send_rxon_timing()
3669 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); in il_send_rxon_timing()
3671 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; in il_send_rxon_timing()
3674 le16_to_cpu(il->timing.beacon_interval), in il_send_rxon_timing()
3675 le32_to_cpu(il->timing.beacon_init_val), in il_send_rxon_timing()
3676 le16_to_cpu(il->timing.atim_win)); in il_send_rxon_timing()
3678 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), in il_send_rxon_timing()
3679 &il->timing); in il_send_rxon_timing()
3686 struct il_rxon_cmd *rxon = &il->staging; in il_set_rxon_hwcrypto()
3689 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; in il_set_rxon_hwcrypto()
3691 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; in il_set_rxon_hwcrypto()
3700 struct il_rxon_cmd *rxon = &il->staging; in il_check_rxon_cmd()
3703 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { in il_check_rxon_cmd()
3704 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { in il_check_rxon_cmd()
3708 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { in il_check_rxon_cmd()
3713 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { in il_check_rxon_cmd()
3717 if (rxon->flags & RXON_FLG_CCK_MSK) { in il_check_rxon_cmd()
3722 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { in il_check_rxon_cmd()
3728 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && in il_check_rxon_cmd()
3729 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { in il_check_rxon_cmd()
3734 if (le16_to_cpu(rxon->assoc_id) > 2007) { in il_check_rxon_cmd()
3739 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == in il_check_rxon_cmd()
3745 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == in il_check_rxon_cmd()
3751 if ((rxon-> in il_check_rxon_cmd()
3754 IL_WARN("TGg but no auto-detect\n"); in il_check_rxon_cmd()
3759 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); in il_check_rxon_cmd()
3763 return -EINVAL; in il_check_rxon_cmd()
3770 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3780 const struct il_rxon_cmd *staging = &il->staging; in il_full_rxon_required()
3781 const struct il_rxon_cmd *active = &il->active; in il_full_rxon_required()
3785 D_INFO("need full RXON - " #cond "\n"); \ in il_full_rxon_required()
3791 D_INFO("need full RXON - " \ in il_full_rxon_required()
3792 #c1 " != " #c2 " - %d != %d\n", \ in il_full_rxon_required()
3799 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); in il_full_rxon_required()
3800 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); in il_full_rxon_required()
3801 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, in il_full_rxon_required()
3802 active->wlap_bssid_addr)); in il_full_rxon_required()
3803 CHK_NEQ(staging->dev_type, active->dev_type); in il_full_rxon_required()
3804 CHK_NEQ(staging->channel, active->channel); in il_full_rxon_required()
3805 CHK_NEQ(staging->air_propagation, active->air_propagation); in il_full_rxon_required()
3806 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, in il_full_rxon_required()
3807 active->ofdm_ht_single_stream_basic_rates); in il_full_rxon_required()
3808 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, in il_full_rxon_required()
3809 active->ofdm_ht_dual_stream_basic_rates); in il_full_rxon_required()
3810 CHK_NEQ(staging->assoc_id, active->assoc_id); in il_full_rxon_required()
3813 * be updated with the RXON_ASSOC command -- however only some in il_full_rxon_required()
3817 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, in il_full_rxon_required()
3818 active->flags & RXON_FLG_BAND_24G_MSK); in il_full_rxon_required()
3821 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, in il_full_rxon_required()
3822 active->filter_flags & RXON_FILTER_ASSOC_MSK); in il_full_rxon_required()
3835 * Assign the lowest rate -- should really get this from in il_get_lowest_plcp()
3838 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) in il_get_lowest_plcp()
3848 struct il_rxon_cmd *rxon = &il->staging; in _il_set_rxon_ht()
3850 if (!il->ht.enabled) { in _il_set_rxon_ht()
3851 rxon->flags &= in _il_set_rxon_ht()
3858 rxon->flags |= in _il_set_rxon_ht()
3859 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); in _il_set_rxon_ht()
3862 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ in _il_set_rxon_ht()
3864 rxon->flags &= in _il_set_rxon_ht()
3868 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { in _il_set_rxon_ht()
3869 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; in _il_set_rxon_ht()
3871 switch (il->ht.extension_chan_offset) { in _il_set_rxon_ht()
3873 rxon->flags &= in _il_set_rxon_ht()
3877 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; in _il_set_rxon_ht()
3882 switch (il->ht.extension_chan_offset) { in _il_set_rxon_ht()
3884 rxon->flags &= in _il_set_rxon_ht()
3886 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; in _il_set_rxon_ht()
3889 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; in _il_set_rxon_ht()
3890 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; in _il_set_rxon_ht()
3894 /* channel location only valid if in Mixed mode */ in _il_set_rxon_ht()
3900 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; in _il_set_rxon_ht()
3903 if (il->ops->set_rxon_chain) in _il_set_rxon_ht()
3904 il->ops->set_rxon_chain(il); in _il_set_rxon_ht()
3907 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), in _il_set_rxon_ht()
3908 il->ht.protection, il->ht.extension_chan_offset); in _il_set_rxon_ht()
3929 max = il->channel_count; in il_get_single_channel_number()
3936 channel = il->channel_info[i].channel; in il_get_single_channel_number()
3937 if (channel == le16_to_cpu(il->staging.channel)) in il_get_single_channel_number()
3950 * il_set_rxon_channel - Set the band and channel values in staging RXON
3954 * in the staging RXON flag structure based on the ch->band
3959 enum nl80211_band band = ch->band; in il_set_rxon_channel()
3960 u16 channel = ch->hw_value; in il_set_rxon_channel()
3962 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) in il_set_rxon_channel()
3965 il->staging.channel = cpu_to_le16(channel); in il_set_rxon_channel()
3967 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; in il_set_rxon_channel()
3969 il->staging.flags |= RXON_FLG_BAND_24G_MSK; in il_set_rxon_channel()
3971 il->band = band; in il_set_rxon_channel()
3984 il->staging.flags &= in il_set_flags_for_band()
3987 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; in il_set_flags_for_band()
3990 if (vif && vif->bss_conf.use_short_slot) in il_set_flags_for_band()
3991 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; in il_set_flags_for_band()
3993 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; in il_set_flags_for_band()
3995 il->staging.flags |= RXON_FLG_BAND_24G_MSK; in il_set_flags_for_band()
3996 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; in il_set_flags_for_band()
3997 il->staging.flags &= ~RXON_FLG_CCK_MSK; in il_set_flags_for_band()
4010 memset(&il->staging, 0, sizeof(il->staging)); in il_connection_init_rx_config()
4012 switch (il->iw_mode) { in il_connection_init_rx_config()
4014 il->staging.dev_type = RXON_DEV_TYPE_ESS; in il_connection_init_rx_config()
4017 il->staging.dev_type = RXON_DEV_TYPE_ESS; in il_connection_init_rx_config()
4018 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; in il_connection_init_rx_config()
4021 il->staging.dev_type = RXON_DEV_TYPE_IBSS; in il_connection_init_rx_config()
4022 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; in il_connection_init_rx_config()
4023 il->staging.filter_flags = in il_connection_init_rx_config()
4027 IL_ERR("Unsupported interface type %d\n", il->vif->type); in il_connection_init_rx_config()
4034 if (!hw_to_local(il->hw)->short_preamble) in il_connection_init_rx_config()
4035 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; in il_connection_init_rx_config()
4037 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; in il_connection_init_rx_config()
4041 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); in il_connection_init_rx_config()
4044 ch_info = &il->channel_info[0]; in il_connection_init_rx_config()
4046 il->staging.channel = cpu_to_le16(ch_info->channel); in il_connection_init_rx_config()
4047 il->band = ch_info->band; in il_connection_init_rx_config()
4049 il_set_flags_for_band(il, il->band, il->vif); in il_connection_init_rx_config()
4051 il->staging.ofdm_basic_rates = in il_connection_init_rx_config()
4053 il->staging.cck_basic_rates = in il_connection_init_rx_config()
4057 il->staging.flags &= in il_connection_init_rx_config()
4059 if (il->vif) in il_connection_init_rx_config()
4060 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); in il_connection_init_rx_config()
4062 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; in il_connection_init_rx_config()
4063 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; in il_connection_init_rx_config()
4074 hw = il_get_hw_mode(il, il->band); in il_set_rate()
4080 il->active_rate = 0; in il_set_rate()
4082 for (i = 0; i < hw->n_bitrates; i++) { in il_set_rate()
4083 rate = &(hw->bitrates[i]); in il_set_rate()
4084 if (rate->hw_value < RATE_COUNT_LEGACY) in il_set_rate()
4085 il->active_rate |= (1 << rate->hw_value); in il_set_rate()
4088 D_RATE("Set active_rate = %0x\n", il->active_rate); in il_set_rate()
4090 il->staging.cck_basic_rates = in il_set_rate()
4093 il->staging.ofdm_basic_rates = in il_set_rate()
4101 if (test_bit(S_EXIT_PENDING, &il->status)) in il_chswitch_done()
4104 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) in il_chswitch_done()
4105 ieee80211_chswitch_done(il->vif, is_success, 0); in il_chswitch_done()
4113 struct il_csa_notification *csa = &(pkt->u.csa_notif); in il_hdl_csa()
4114 struct il_rxon_cmd *rxon = (void *)&il->active; in il_hdl_csa()
4116 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) in il_hdl_csa()
4119 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { in il_hdl_csa()
4120 rxon->channel = csa->channel; in il_hdl_csa()
4121 il->staging.channel = csa->channel; in il_hdl_csa()
4122 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); in il_hdl_csa()
4126 le16_to_cpu(csa->channel)); in il_hdl_csa()
4136 struct il_rxon_cmd *rxon = &il->staging; in il_print_rx_config_cmd()
4140 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); in il_print_rx_config_cmd()
4141 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); in il_print_rx_config_cmd()
4142 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); in il_print_rx_config_cmd()
4143 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); in il_print_rx_config_cmd()
4144 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); in il_print_rx_config_cmd()
4145 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); in il_print_rx_config_cmd()
4146 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); in il_print_rx_config_cmd()
4147 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); in il_print_rx_config_cmd()
4148 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); in il_print_rx_config_cmd()
4153 * il_irq_handle_error - called for HW or SW error interrupt from card
4158 /* Set the FW error flag -- cleared on il_down */ in il_irq_handle_error()
4159 set_bit(S_FW_ERROR, &il->status); in il_irq_handle_error()
4162 clear_bit(S_HCMD_ACTIVE, &il->status); in il_irq_handle_error()
4164 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); in il_irq_handle_error()
4166 il->ops->dump_nic_error_log(il); in il_irq_handle_error()
4167 if (il->ops->dump_fh) in il_irq_handle_error()
4168 il->ops->dump_fh(il, NULL, false); in il_irq_handle_error()
4174 wake_up(&il->wait_command_queue); in il_irq_handle_error()
4178 clear_bit(S_READY, &il->status); in il_irq_handle_error()
4180 if (!test_bit(S_EXIT_PENDING, &il->status)) { in il_irq_handle_error()
4184 if (il->cfg->mod_params->restart_fw) in il_irq_handle_error()
4185 queue_work(il->workqueue, &il->restart); in il_irq_handle_error()
4212 lockdep_assert_held(&il->reg_lock); in _il_apm_stop()
4226 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. in _il_apm_stop()
4237 spin_lock_irqsave(&il->reg_lock, flags); in il_apm_stop()
4239 spin_unlock_irqrestore(&il->reg_lock, flags); in il_apm_stop()
4277 * wake device's PCI Express link L1a -> L0s in il_apm_init()
4278 * NOTE: This is no-op for 3945 (non-existent bit) in il_apm_init()
4284 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. in il_apm_init()
4285 * Check if BIOS (or OS) enabled L1-ASPM on this device. in il_apm_init()
4286 * If so (likely), disable L0S, so device moves directly L0->L1; in il_apm_init()
4291 if (il->cfg->set_l0s) { in il_apm_init()
4292 ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); in il_apm_init()
4294 /* L1-ASPM enabled; disable(!) L0S */ in il_apm_init()
4299 /* L1-ASPM disabled; enable(!) L0S */ in il_apm_init()
4306 /* Configure analog phase-lock-loop before activating to D0A */ in il_apm_init()
4307 if (il->cfg->pll_cfg_val) in il_apm_init()
4309 il->cfg->pll_cfg_val); in il_apm_init()
4313 * D0U* --> D0A* (powered-up active) state. in il_apm_init()
4319 * device-internal resources is supported, e.g. il_wr_prph() in il_apm_init()
4339 if (il->cfg->use_bsm) in il_apm_init()
4346 /* Disable L1-Active */ in il_apm_init()
4362 lockdep_assert_held(&il->mutex); in il_set_tx_power()
4364 if (il->tx_power_user_lmt == tx_power && !force) in il_set_tx_power()
4367 if (!il->ops->send_tx_power) in il_set_tx_power()
4368 return -EOPNOTSUPP; in il_set_tx_power()
4373 return -EINVAL; in il_set_tx_power()
4376 if (tx_power > il->tx_power_device_lmt) { in il_set_tx_power()
4378 tx_power, il->tx_power_device_lmt); in il_set_tx_power()
4379 return -EINVAL; in il_set_tx_power()
4383 return -EIO; in il_set_tx_power()
4387 il->tx_power_next = tx_power; in il_set_tx_power()
4390 defer = test_bit(S_SCANNING, &il->status) || in il_set_tx_power()
4391 memcmp(&il->active, &il->staging, sizeof(il->staging)); in il_set_tx_power()
4397 prev_tx_power = il->tx_power_user_lmt; in il_set_tx_power()
4398 il->tx_power_user_lmt = tx_power; in il_set_tx_power()
4400 ret = il->ops->send_tx_power(il); in il_set_tx_power()
4404 il->tx_power_user_lmt = prev_tx_power; in il_set_tx_power()
4405 il->tx_power_next = prev_tx_power; in il_set_tx_power()
4455 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); in il_hdl_pm_sleep()
4457 sleep->pm_sleep_mode, sleep->pm_wakeup_src); in il_hdl_pm_sleep()
4466 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; in il_hdl_pm_debug_stats()
4468 il_get_cmd_string(pkt->hdr.cmd)); in il_hdl_pm_debug_stats()
4469 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); in il_hdl_pm_debug_stats()
4480 le32_to_cpu(pkt->u.err_resp.error_type), in il_hdl_error()
4481 il_get_cmd_string(pkt->u.err_resp.cmd_id), in il_hdl_error()
4482 pkt->u.err_resp.cmd_id, in il_hdl_error()
4483 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), in il_hdl_error()
4484 le32_to_cpu(pkt->u.err_resp.error_info)); in il_hdl_error()
4491 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); in il_clear_isr_stats()
4499 struct il_priv *il = hw->priv; in il_mac_conf_tx()
4506 D_MAC80211("leave - RF not ready\n"); in il_mac_conf_tx()
4507 return -EIO; in il_mac_conf_tx()
4511 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); in il_mac_conf_tx()
4515 q = AC_NUM - 1 - queue; in il_mac_conf_tx()
4517 spin_lock_irqsave(&il->lock, flags); in il_mac_conf_tx()
4519 il->qos_data.def_qos_parm.ac[q].cw_min = in il_mac_conf_tx()
4520 cpu_to_le16(params->cw_min); in il_mac_conf_tx()
4521 il->qos_data.def_qos_parm.ac[q].cw_max = in il_mac_conf_tx()
4522 cpu_to_le16(params->cw_max); in il_mac_conf_tx()
4523 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; in il_mac_conf_tx()
4524 il->qos_data.def_qos_parm.ac[q].edca_txop = in il_mac_conf_tx()
4525 cpu_to_le16((params->txop * 32)); in il_mac_conf_tx()
4527 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; in il_mac_conf_tx()
4529 spin_unlock_irqrestore(&il->lock, flags); in il_mac_conf_tx()
4539 struct il_priv *il = hw->priv; in il_mac_tx_last_beacon()
4544 ret = (il->ibss_manager == IL_IBSS_MANAGER); in il_mac_tx_last_beacon()
4556 if (il->ops->set_rxon_chain) in il_set_mode()
4557 il->ops->set_rxon_chain(il); in il_set_mode()
4565 struct il_priv *il = hw->priv; in il_mac_add_interface()
4569 mutex_lock(&il->mutex); in il_mac_add_interface()
4570 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); in il_mac_add_interface()
4574 err = -EINVAL; in il_mac_add_interface()
4582 reset = (il->vif == vif); in il_mac_add_interface()
4583 if (il->vif && !reset) { in il_mac_add_interface()
4584 err = -EOPNOTSUPP; in il_mac_add_interface()
4588 il->vif = vif; in il_mac_add_interface()
4589 il->iw_mode = vif->type; in il_mac_add_interface()
4593 IL_WARN("Fail to set mode %d\n", vif->type); in il_mac_add_interface()
4595 il->vif = NULL; in il_mac_add_interface()
4596 il->iw_mode = NL80211_IFTYPE_STATION; in il_mac_add_interface()
4602 mutex_unlock(&il->mutex); in il_mac_add_interface()
4611 lockdep_assert_held(&il->mutex); in il_teardown_interface()
4613 if (il->scan_vif == vif) { in il_teardown_interface()
4624 struct il_priv *il = hw->priv; in il_mac_remove_interface()
4626 mutex_lock(&il->mutex); in il_mac_remove_interface()
4627 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); in il_mac_remove_interface()
4629 WARN_ON(il->vif != vif); in il_mac_remove_interface()
4630 il->vif = NULL; in il_mac_remove_interface()
4631 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; in il_mac_remove_interface()
4633 eth_zero_addr(il->bssid); in il_mac_remove_interface()
4636 mutex_unlock(&il->mutex); in il_mac_remove_interface()
4643 if (!il->txq) in il_alloc_txq_mem()
4644 il->txq = in il_alloc_txq_mem()
4645 kcalloc(il->cfg->num_of_queues, in il_alloc_txq_mem()
4648 if (!il->txq) { in il_alloc_txq_mem()
4650 return -ENOMEM; in il_alloc_txq_mem()
4659 kfree(il->txq); in il_free_txq_mem()
4660 il->txq = NULL; in il_free_txq_mem()
4669 if (test_bit(S_EXIT_PENDING, &il->status)) in il_force_reset()
4670 return -EINVAL; in il_force_reset()
4672 force_reset = &il->force_reset; in il_force_reset()
4673 force_reset->reset_request_count++; in il_force_reset()
4675 if (force_reset->last_force_reset_jiffies && in il_force_reset()
4676 time_after(force_reset->last_force_reset_jiffies + in il_force_reset()
4677 force_reset->reset_duration, jiffies)) { in il_force_reset()
4679 force_reset->reset_reject_count++; in il_force_reset()
4680 return -EAGAIN; in il_force_reset()
4683 force_reset->reset_success_count++; in il_force_reset()
4684 force_reset->last_force_reset_jiffies = jiffies; in il_force_reset()
4695 if (!external && !il->cfg->mod_params->restart_fw) { in il_force_reset()
4703 /* Set the FW error flag -- cleared on il_down */ in il_force_reset()
4704 set_bit(S_FW_ERROR, &il->status); in il_force_reset()
4705 wake_up(&il->wait_command_queue); in il_force_reset()
4710 clear_bit(S_READY, &il->status); in il_force_reset()
4711 queue_work(il->workqueue, &il->restart); in il_force_reset()
4721 struct il_priv *il = hw->priv; in il_mac_change_interface()
4724 mutex_lock(&il->mutex); in il_mac_change_interface()
4726 vif->type, vif->addr, newtype, newp2p); in il_mac_change_interface()
4729 err = -EOPNOTSUPP; in il_mac_change_interface()
4733 if (!il->vif || !il_is_ready_rf(il)) { in il_mac_change_interface()
4738 err = -EBUSY; in il_mac_change_interface()
4743 vif->type = newtype; in il_mac_change_interface()
4744 vif->p2p = false; in il_mac_change_interface()
4745 il->iw_mode = newtype; in il_mac_change_interface()
4751 mutex_unlock(&il->mutex); in il_mac_change_interface()
4760 struct il_priv *il = hw->priv; in il_mac_flush()
4764 mutex_lock(&il->mutex); in il_mac_flush()
4767 if (il->txq == NULL) in il_mac_flush()
4770 for (i = 0; i < il->hw_params.max_txq_num; i++) { in il_mac_flush()
4773 if (i == il->cmd_queue) in il_mac_flush()
4776 q = &il->txq[i].q; in il_mac_flush()
4777 if (q->read_ptr == q->write_ptr) in il_mac_flush()
4781 IL_ERR("Failed to flush queue %d\n", q->id); in il_mac_flush()
4789 mutex_unlock(&il->mutex); in il_mac_flush()
4800 struct il_tx_queue *txq = &il->txq[cnt]; in il_check_stuck_queue()
4801 struct il_queue *q = &txq->q; in il_check_stuck_queue()
4806 if (q->read_ptr == q->write_ptr) { in il_check_stuck_queue()
4807 txq->time_stamp = now; in il_check_stuck_queue()
4812 txq->time_stamp + in il_check_stuck_queue()
4813 msecs_to_jiffies(il->cfg->wd_timeout); in il_check_stuck_queue()
4816 IL_ERR("Queue %d stuck for %u ms.\n", q->id, in il_check_stuck_queue()
4817 jiffies_to_msecs(now - txq->time_stamp)); in il_check_stuck_queue()
4819 return (ret == -EAGAIN) ? 0 : 1; in il_check_stuck_queue()
4842 if (test_bit(S_EXIT_PENDING, &il->status)) in il_bg_watchdog()
4845 timeout = il->cfg->wd_timeout; in il_bg_watchdog()
4850 if (il_check_stuck_queue(il, il->cmd_queue)) in il_bg_watchdog()
4854 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { in il_bg_watchdog()
4856 if (cnt == il->cmd_queue) in il_bg_watchdog()
4862 mod_timer(&il->watchdog, in il_bg_watchdog()
4870 unsigned int timeout = il->cfg->wd_timeout; in il_setup_watchdog()
4873 mod_timer(&il->watchdog, in il_setup_watchdog()
4876 del_timer(&il->watchdog); in il_setup_watchdog()
4882 * time in usec will be changed into a 32-bit value in extended:internal format
4899 il->hw_params. in il_usecs_to_beacons()
4900 beacon_time_tsf_bits) >> il-> in il_usecs_to_beacons()
4904 il->hw_params. in il_usecs_to_beacons()
4907 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; in il_usecs_to_beacons()
4919 il->hw_params. in il_add_beacon_time()
4922 il->hw_params. in il_add_beacon_time()
4926 il->hw_params. in il_add_beacon_time()
4929 il->hw_params. in il_add_beacon_time()
4933 res += base_low - addon_low; in il_add_beacon_time()
4935 res += interval + base_low - addon_low; in il_add_beacon_time()
4936 res += (1 << il->hw_params.beacon_time_tsf_bits); in il_add_beacon_time()
4938 res += (1 << il->hw_params.beacon_time_tsf_bits); in il_add_beacon_time()
4984 set_bit(S_RFKILL, &il->status); in il_pci_resume()
4986 clear_bit(S_RFKILL, &il->status); in il_pci_resume()
4988 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); in il_pci_resume()
5001 if (test_bit(S_EXIT_PENDING, &il->status)) in il_update_qos()
5004 il->qos_data.def_qos_parm.qos_flags = 0; in il_update_qos()
5006 if (il->qos_data.qos_active) in il_update_qos()
5007 il->qos_data.def_qos_parm.qos_flags |= in il_update_qos()
5010 if (il->ht.enabled) in il_update_qos()
5011 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; in il_update_qos()
5014 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); in il_update_qos()
5017 &il->qos_data.def_qos_parm, NULL); in il_update_qos()
5021 * il_mac_config - mac80211 config callback
5026 struct il_priv *il = hw->priv; in il_mac_config()
5028 struct ieee80211_conf *conf = &hw->conf; in il_mac_config()
5029 struct ieee80211_channel *channel = conf->chandef.chan; in il_mac_config()
5030 struct il_ht_config *ht_conf = &il->current_ht_config; in il_mac_config()
5037 mutex_lock(&il->mutex); in il_mac_config()
5038 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, in il_mac_config()
5041 if (unlikely(test_bit(S_SCANNING, &il->status))) { in il_mac_config()
5048 /* mac80211 uses static for non-HT which is what we want */ in il_mac_config()
5049 il->current_ht_config.smps = conf->smps_mode; in il_mac_config()
5058 if (il->ops->set_rxon_chain) in il_mac_config()
5059 il->ops->set_rxon_chain(il); in il_mac_config()
5070 ch = channel->hw_value; in il_mac_config()
5071 ch_info = il_get_channel_info(il, channel->band, ch); in il_mac_config()
5073 D_MAC80211("leave - invalid channel\n"); in il_mac_config()
5074 ret = -EINVAL; in il_mac_config()
5078 if (il->iw_mode == NL80211_IFTYPE_ADHOC && in il_mac_config()
5080 D_MAC80211("leave - not IBSS channel\n"); in il_mac_config()
5081 ret = -EINVAL; in il_mac_config()
5085 spin_lock_irqsave(&il->lock, flags); in il_mac_config()
5088 if (il->ht.enabled != conf_is_ht(conf)) { in il_mac_config()
5089 il->ht.enabled = conf_is_ht(conf); in il_mac_config()
5092 if (il->ht.enabled) { in il_mac_config()
5094 il->ht.extension_chan_offset = in il_mac_config()
5096 il->ht.is_40mhz = true; in il_mac_config()
5098 il->ht.extension_chan_offset = in il_mac_config()
5100 il->ht.is_40mhz = true; in il_mac_config()
5102 il->ht.extension_chan_offset = in il_mac_config()
5104 il->ht.is_40mhz = false; in il_mac_config()
5107 il->ht.is_40mhz = false; in il_mac_config()
5113 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; in il_mac_config()
5118 if ((le16_to_cpu(il->staging.channel) != ch)) in il_mac_config()
5119 il->staging.flags = 0; in il_mac_config()
5124 il_set_flags_for_band(il, channel->band, il->vif); in il_mac_config()
5126 spin_unlock_irqrestore(&il->lock, flags); in il_mac_config()
5128 if (il->ops->update_bcast_stations) in il_mac_config()
5129 ret = il->ops->update_bcast_stations(il); in il_mac_config()
5139 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); in il_mac_config()
5140 if (!il->power_data.ps_disabled) in il_mac_config()
5148 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, in il_mac_config()
5149 conf->power_level); in il_mac_config()
5151 il_set_tx_power(il, conf->power_level, false); in il_mac_config()
5155 D_MAC80211("leave - not ready\n"); in il_mac_config()
5162 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) in il_mac_config()
5165 D_INFO("Not re-sending same RXON configuration.\n"); in il_mac_config()
5171 mutex_unlock(&il->mutex); in il_mac_config()
5180 struct il_priv *il = hw->priv; in il_mac_reset_tsf()
5183 mutex_lock(&il->mutex); in il_mac_reset_tsf()
5184 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); in il_mac_reset_tsf()
5186 spin_lock_irqsave(&il->lock, flags); in il_mac_reset_tsf()
5188 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); in il_mac_reset_tsf()
5191 dev_consume_skb_irq(il->beacon_skb); in il_mac_reset_tsf()
5192 il->beacon_skb = NULL; in il_mac_reset_tsf()
5193 il->timestamp = 0; in il_mac_reset_tsf()
5195 spin_unlock_irqrestore(&il->lock, flags); in il_mac_reset_tsf()
5199 D_MAC80211("leave - not ready\n"); in il_mac_reset_tsf()
5200 mutex_unlock(&il->mutex); in il_mac_reset_tsf()
5205 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; in il_mac_reset_tsf()
5211 mutex_unlock(&il->mutex); in il_mac_reset_tsf()
5218 struct il_ht_config *ht_conf = &il->current_ht_config; in il_ht_conf()
5220 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; in il_ht_conf()
5224 if (!il->ht.enabled) in il_ht_conf()
5227 il->ht.protection = in il_ht_conf()
5228 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; in il_ht_conf()
5229 il->ht.non_gf_sta_present = in il_ht_conf()
5230 !!(bss_conf-> in il_ht_conf()
5233 ht_conf->single_chain_sufficient = false; in il_ht_conf()
5235 switch (vif->type) { in il_ht_conf()
5238 sta = ieee80211_find_sta(vif, bss_conf->bssid); in il_ht_conf()
5240 struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; in il_ht_conf()
5244 (ht_cap->mcs. in il_ht_conf()
5249 if (ht_cap->mcs.rx_mask[1] == 0 && in il_ht_conf()
5250 ht_cap->mcs.rx_mask[2] == 0) in il_ht_conf()
5251 ht_conf->single_chain_sufficient = true; in il_ht_conf()
5253 ht_conf->single_chain_sufficient = true; in il_ht_conf()
5261 ht_conf->single_chain_sufficient = true; in il_ht_conf()
5266 ht_conf->single_chain_sufficient = true; in il_ht_conf()
5283 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; in il_set_no_assoc()
5284 il->staging.assoc_id = 0; in il_set_no_assoc()
5291 struct il_priv *il = hw->priv; in il_beacon_update()
5301 lockdep_assert_held(&il->mutex); in il_beacon_update()
5303 if (!il->beacon_enabled) { in il_beacon_update()
5309 spin_lock_irqsave(&il->lock, flags); in il_beacon_update()
5310 dev_consume_skb_irq(il->beacon_skb); in il_beacon_update()
5311 il->beacon_skb = skb; in il_beacon_update()
5313 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; in il_beacon_update()
5314 il->timestamp = le64_to_cpu(timestamp); in il_beacon_update()
5317 spin_unlock_irqrestore(&il->lock, flags); in il_beacon_update()
5320 D_MAC80211("leave - RF not ready\n"); in il_beacon_update()
5324 il->ops->post_associate(il); in il_beacon_update()
5331 struct il_priv *il = hw->priv; in il_mac_bss_info_changed()
5334 mutex_lock(&il->mutex); in il_mac_bss_info_changed()
5338 D_MAC80211("leave - not alive\n"); in il_mac_bss_info_changed()
5339 mutex_unlock(&il->mutex); in il_mac_bss_info_changed()
5346 spin_lock_irqsave(&il->lock, flags); in il_mac_bss_info_changed()
5347 il->qos_data.qos_active = bss_conf->qos; in il_mac_bss_info_changed()
5349 spin_unlock_irqrestore(&il->lock, flags); in il_mac_bss_info_changed()
5354 if (vif->bss_conf.enable_beacon) in il_mac_bss_info_changed()
5355 il->beacon_enabled = true; in il_mac_bss_info_changed()
5357 il->beacon_enabled = false; in il_mac_bss_info_changed()
5361 D_MAC80211("BSSID %pM\n", bss_conf->bssid); in il_mac_bss_info_changed()
5371 if (is_zero_ether_addr(bss_conf->bssid)) in il_mac_bss_info_changed()
5380 D_MAC80211("leave - scan abort failed\n"); in il_mac_bss_info_changed()
5381 mutex_unlock(&il->mutex); in il_mac_bss_info_changed()
5386 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); in il_mac_bss_info_changed()
5389 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); in il_mac_bss_info_changed()
5397 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) in il_mac_bss_info_changed()
5401 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); in il_mac_bss_info_changed()
5402 if (bss_conf->use_short_preamble) in il_mac_bss_info_changed()
5403 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; in il_mac_bss_info_changed()
5405 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; in il_mac_bss_info_changed()
5409 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); in il_mac_bss_info_changed()
5410 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) in il_mac_bss_info_changed()
5411 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; in il_mac_bss_info_changed()
5413 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; in il_mac_bss_info_changed()
5414 if (bss_conf->use_cts_prot) in il_mac_bss_info_changed()
5415 il->staging.flags |= RXON_FLG_SELF_CTS_EN; in il_mac_bss_info_changed()
5417 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; in il_mac_bss_info_changed()
5426 if (A-band) in il_mac_bss_info_changed()
5427 il->staging.ofdm_basic_rates = in il_mac_bss_info_changed()
5428 bss_conf->basic_rates; in il_mac_bss_info_changed()
5430 il->staging.ofdm_basic_rates = in il_mac_bss_info_changed()
5431 bss_conf->basic_rates >> 4; in il_mac_bss_info_changed()
5432 il->staging.cck_basic_rates = in il_mac_bss_info_changed()
5433 bss_conf->basic_rates & 0xF; in il_mac_bss_info_changed()
5440 if (il->ops->set_rxon_chain) in il_mac_bss_info_changed()
5441 il->ops->set_rxon_chain(il); in il_mac_bss_info_changed()
5445 D_MAC80211("ASSOC %d\n", vif->cfg.assoc); in il_mac_bss_info_changed()
5446 if (vif->cfg.assoc) { in il_mac_bss_info_changed()
5447 il->timestamp = bss_conf->sync_tsf; in il_mac_bss_info_changed()
5450 il->ops->post_associate(il); in il_mac_bss_info_changed()
5455 if (changes && il_is_associated(il) && vif->cfg.aid) { in il_mac_bss_info_changed()
5460 memcpy((void *)&il->active, &il->staging, in il_mac_bss_info_changed()
5466 if (vif->bss_conf.enable_beacon) { in il_mac_bss_info_changed()
5467 memcpy(il->staging.bssid_addr, bss_conf->bssid, in il_mac_bss_info_changed()
5469 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); in il_mac_bss_info_changed()
5470 il->ops->config_ap(il); in il_mac_bss_info_changed()
5476 ret = il->ops->manage_ibss_station(il, vif, in il_mac_bss_info_changed()
5477 vif->cfg.ibss_joined); in il_mac_bss_info_changed()
5480 vif->cfg.ibss_joined ? "add" : "remove", in il_mac_bss_info_changed()
5481 bss_conf->bssid); in il_mac_bss_info_changed()
5485 mutex_unlock(&il->mutex); in il_mac_bss_info_changed()
5499 spin_lock_irqsave(&il->lock, flags); in il_isr()
5502 * back-to-back ISRs and sporadic interrupts from our NIC. in il_isr()
5503 * If we have something to service, the tasklet will re-enable ints. in il_isr()
5504 * If we *don't* have something, we'll re-enable before leaving here. */ in il_isr()
5532 /* il_irq_tasklet() will service interrupts and re-enable them */ in il_isr()
5534 tasklet_schedule(&il->irq_tasklet); in il_isr()
5537 spin_unlock_irqrestore(&il->lock, flags); in il_isr()
5541 /* re-enable interrupts here since we don't have anything to service. */ in il_isr()
5542 /* only Re-enable if disabled by irq */ in il_isr()
5543 if (test_bit(S_INT_ENABLED, &il->status)) in il_isr()
5545 spin_unlock_irqrestore(&il->lock, flags); in il_isr()
5558 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { in il_tx_cmd_protection()
5575 } else if (info->control.rates[0]. in il_tx_cmd_protection()