Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
11 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 #include <linux/dma-mapping.h>
70 IL_ERR("Tx flush command to flush out all frames\n"); in il4965_check_abort_status()
71 if (!test_bit(S_EXIT_PENDING, &il->status)) in il4965_check_abort_status()
72 queue_work(il->workqueue, &il->tx_flush); in il4965_check_abort_status()
89 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_queue_reset()
90 INIT_LIST_HEAD(&rxq->rx_free); in il4965_rx_queue_reset()
91 INIT_LIST_HEAD(&rxq->rx_used); in il4965_rx_queue_reset()
92 /* Fill the rx_used queue with _all_ of the Rx buffers */ in il4965_rx_queue_reset()
96 if (rxq->pool[i].page != NULL) { in il4965_rx_queue_reset()
97 dma_unmap_page(&il->pci_dev->dev, in il4965_rx_queue_reset()
98 rxq->pool[i].page_dma, in il4965_rx_queue_reset()
99 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_queue_reset()
101 __il_free_pages(il, rxq->pool[i].page); in il4965_rx_queue_reset()
102 rxq->pool[i].page = NULL; in il4965_rx_queue_reset()
104 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il4965_rx_queue_reset()
108 rxq->queue[i] = NULL; in il4965_rx_queue_reset()
111 * not restocked the Rx queue with fresh buffers */ in il4965_rx_queue_reset()
112 rxq->read = rxq->write = 0; in il4965_rx_queue_reset()
113 rxq->write_actual = 0; in il4965_rx_queue_reset()
114 rxq->free_count = 0; in il4965_rx_queue_reset()
115 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_queue_reset()
125 if (il->cfg->mod_params->amsdu_size_8K) in il4965_rx_init()
133 /* Reset driver's Rx queue write idx */ in il4965_rx_init()
137 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8)); in il4965_rx_init()
140 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); in il4965_rx_init()
169 if (pci_pme_capable(il->pci_dev, PCI_D3cold)) in il4965_set_pwr_vmain()
184 struct il_rx_queue *rxq = &il->rxq; in il4965_hw_nic_init()
187 spin_lock_irqsave(&il->lock, flags); in il4965_hw_nic_init()
191 spin_unlock_irqrestore(&il->lock, flags); in il4965_hw_nic_init()
196 /* Allocate the RX queue, or reset if it is already allocated */ in il4965_hw_nic_init()
197 if (!rxq->bd) { in il4965_hw_nic_init()
200 IL_ERR("Unable to initialize Rx queue\n"); in il4965_hw_nic_init()
201 return -ENOMEM; in il4965_hw_nic_init()
210 spin_lock_irqsave(&il->lock, flags); in il4965_hw_nic_init()
212 rxq->need_update = 1; in il4965_hw_nic_init()
215 spin_unlock_irqrestore(&il->lock, flags); in il4965_hw_nic_init()
217 /* Allocate or reset and init all Tx and Command queues */ in il4965_hw_nic_init()
218 if (!il->txq) { in il4965_hw_nic_init()
225 set_bit(S_INIT, &il->status); in il4965_hw_nic_init()
231 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
240 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
242 * If there are slots in the RX queue that need to be restocked,
243 * and we have free pre-allocated buffers, fill the ranks as much
253 struct il_rx_queue *rxq = &il->rxq; in il4965_rx_queue_restock()
258 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_queue_restock()
259 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { in il4965_rx_queue_restock()
261 rxb = rxq->queue[rxq->write]; in il4965_rx_queue_restock()
262 BUG_ON(rxb && rxb->page); in il4965_rx_queue_restock()
265 element = rxq->rx_free.next; in il4965_rx_queue_restock()
270 rxq->bd[rxq->write] = in il4965_rx_queue_restock()
271 il4965_dma_addr2rbd_ptr(il, rxb->page_dma); in il4965_rx_queue_restock()
272 rxq->queue[rxq->write] = rxb; in il4965_rx_queue_restock()
273 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il4965_rx_queue_restock()
274 rxq->free_count--; in il4965_rx_queue_restock()
276 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_queue_restock()
277 /* If the pre-allocated buffer pool is dropping low, schedule to in il4965_rx_queue_restock()
279 if (rxq->free_count <= RX_LOW_WATERMARK) in il4965_rx_queue_restock()
280 queue_work(il->workqueue, &il->rx_replenish); in il4965_rx_queue_restock()
284 if (rxq->write_actual != (rxq->write & ~0x7)) { in il4965_rx_queue_restock()
285 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_queue_restock()
286 rxq->need_update = 1; in il4965_rx_queue_restock()
287 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_queue_restock()
293 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
297 * Also restock the Rx queue via il_rx_queue_restock.
303 struct il_rx_queue *rxq = &il->rxq; in il4965_rx_allocate()
312 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_allocate()
313 if (list_empty(&rxq->rx_used)) { in il4965_rx_allocate()
314 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_allocate()
317 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_allocate()
319 if (rxq->free_count > RX_LOW_WATERMARK) in il4965_rx_allocate()
322 if (il->hw_params.rx_page_order > 0) in il4965_rx_allocate()
326 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); in il4965_rx_allocate()
330 il->hw_params.rx_page_order); in il4965_rx_allocate()
332 if (rxq->free_count <= RX_LOW_WATERMARK && in il4965_rx_allocate()
338 rxq->free_count); in il4965_rx_allocate()
339 /* We don't reschedule replenish work here -- we will in il4965_rx_allocate()
346 page_dma = dma_map_page(&il->pci_dev->dev, page, 0, in il4965_rx_allocate()
347 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_allocate()
349 if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { in il4965_rx_allocate()
350 __free_pages(page, il->hw_params.rx_page_order); in il4965_rx_allocate()
354 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_allocate()
356 if (list_empty(&rxq->rx_used)) { in il4965_rx_allocate()
357 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_allocate()
358 dma_unmap_page(&il->pci_dev->dev, page_dma, in il4965_rx_allocate()
359 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_allocate()
361 __free_pages(page, il->hw_params.rx_page_order); in il4965_rx_allocate()
365 element = rxq->rx_used.next; in il4965_rx_allocate()
369 BUG_ON(rxb->page); in il4965_rx_allocate()
371 rxb->page = page; in il4965_rx_allocate()
372 rxb->page_dma = page_dma; in il4965_rx_allocate()
373 list_add_tail(&rxb->list, &rxq->rx_free); in il4965_rx_allocate()
374 rxq->free_count++; in il4965_rx_allocate()
375 il->alloc_rxb_page++; in il4965_rx_allocate()
377 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_allocate()
388 spin_lock_irqsave(&il->lock, flags); in il4965_rx_replenish()
390 spin_unlock_irqrestore(&il->lock, flags); in il4965_rx_replenish()
411 if (rxq->pool[i].page != NULL) { in il4965_rx_queue_free()
412 dma_unmap_page(&il->pci_dev->dev, in il4965_rx_queue_free()
413 rxq->pool[i].page_dma, in il4965_rx_queue_free()
414 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_queue_free()
416 __il_free_pages(il, rxq->pool[i].page); in il4965_rx_queue_free()
417 rxq->pool[i].page = NULL; in il4965_rx_queue_free()
421 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, in il4965_rx_queue_free()
422 rxq->bd_dma); in il4965_rx_queue_free()
423 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), in il4965_rx_queue_free()
424 rxq->rb_stts, rxq->rb_stts_dma); in il4965_rx_queue_free()
425 rxq->bd = NULL; in il4965_rx_queue_free()
426 rxq->rb_stts = NULL; in il4965_rx_queue_free()
461 return idx - band_offset; in il4965_hwrate_to_mac80211_idx()
464 return -1; in il4965_hwrate_to_mac80211_idx()
473 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; in il4965_calc_rssi()
475 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >> in il4965_calc_rssi()
479 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK) in il4965_calc_rssi()
491 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); in il4965_calc_rssi()
494 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], in il4965_calc_rssi()
497 /* dBm = max_rssi dB - agc dB - constant. in il4965_calc_rssi()
499 return max_rssi - agc - IL4965_RSSI_OFFSET; in il4965_calc_rssi()
570 __le16 fc = hdr->frame_control; in il4965_pass_packet_to_mac80211()
573 if (unlikely(!il->is_open)) { in il4965_pass_packet_to_mac80211()
578 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { in il4965_pass_packet_to_mac80211()
580 D_INFO("Woke queues - frame received on passive channel\n"); in il4965_pass_packet_to_mac80211()
584 if (!il->cfg->mod_params->sw_crypto && in il4965_pass_packet_to_mac80211()
597 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), in il4965_pass_packet_to_mac80211()
598 len, PAGE_SIZE << il->hw_params.rx_page_order); in il4965_pass_packet_to_mac80211()
599 il->alloc_rxb_page--; in il4965_pass_packet_to_mac80211()
600 rxb->page = NULL; in il4965_pass_packet_to_mac80211()
606 ieee80211_rx(il->hw, skb); in il4965_pass_packet_to_mac80211()
610 * N_RX_MPDU (HT high-throughput N frames). */
616 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_rx() local
628 * command and cached in il->last_phy_res in il4965_hdl_rx()
633 if (pkt->hdr.cmd == N_RX) { in il4965_hdl_rx()
634 phy_res = (struct il_rx_phy_res *)pkt->u.raw; in il4965_hdl_rx()
636 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + in il4965_hdl_rx()
637 phy_res->cfg_phy_cnt); in il4965_hdl_rx()
639 len = le16_to_cpu(phy_res->byte_count); in il4965_hdl_rx()
641 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) + in il4965_hdl_rx()
642 phy_res->cfg_phy_cnt + len); in il4965_hdl_rx()
645 if (!il->_4965.last_phy_res_valid) { in il4965_hdl_rx()
649 phy_res = &il->_4965.last_phy_res; in il4965_hdl_rx()
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw; in il4965_hdl_rx()
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); in il4965_hdl_rx()
652 len = le16_to_cpu(amsdu->byte_count); in il4965_hdl_rx()
653 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len); in il4965_hdl_rx()
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { in il4965_hdl_rx()
660 phy_res->cfg_phy_cnt); in il4965_hdl_rx()
671 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); in il4965_hdl_rx()
674 rx_status.mactime = le64_to_cpu(phy_res->timestamp); in il4965_hdl_rx()
676 (phy_res-> in il4965_hdl_rx()
680 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), in il4965_hdl_rx()
690 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); in il4965_hdl_rx()
712 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> in il4965_hdl_rx()
716 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) in il4965_hdl_rx()
729 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { in il4965_hdl_rx()
730 /* We know which subframes of an A-MPDU belong in il4965_hdl_rx()
736 rx_status.ampdu_reference = il->_4965.ampdu_ref; in il4965_hdl_rx()
748 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_rx_phy() local
749 il->_4965.last_phy_res_valid = true; in il4965_hdl_rx_phy()
750 il->_4965.ampdu_ref++; in il4965_hdl_rx_phy()
751 memcpy(&il->_4965.last_phy_res, pkt->u.raw, in il4965_hdl_rx_phy()
778 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { in il4965_get_channels_for_scan()
779 chan = il->scan_request->channels[i]; in il4965_get_channels_for_scan()
781 if (chan->band != band) in il4965_get_channels_for_scan()
784 channel = chan->hw_value; in il4965_get_channels_for_scan()
785 scan_ch->channel = cpu_to_le16(channel); in il4965_get_channels_for_scan()
795 (chan->flags & IEEE80211_CHAN_NO_IR)) in il4965_get_channels_for_scan()
796 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; in il4965_get_channels_for_scan()
798 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; in il4965_get_channels_for_scan()
801 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes); in il4965_get_channels_for_scan()
803 scan_ch->active_dwell = cpu_to_le16(active_dwell); in il4965_get_channels_for_scan()
804 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); in il4965_get_channels_for_scan()
807 scan_ch->dsp_atten = 110; in il4965_get_channels_for_scan()
811 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; in il4965_get_channels_for_scan()
814 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; in il4965_get_channels_for_scan()
816 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); in il4965_get_channels_for_scan()
819 le32_to_cpu(scan_ch->type), in il4965_get_channels_for_scan()
820 (scan_ch-> in il4965_get_channels_for_scan()
822 (scan_ch-> in il4965_get_channels_for_scan()
840 for (i = 0; i < RATE_ANT_NUM - 1; i++) { in il4965_toggle_tx_ant()
863 u8 rx_ant = il->hw_params.valid_rx_ant; in il4965_request_scan()
868 u8 scan_tx_antennas = il->hw_params.valid_tx_ant; in il4965_request_scan()
871 lockdep_assert_held(&il->mutex); in il4965_request_scan()
873 if (!il->scan_cmd) { in il4965_request_scan()
874 il->scan_cmd = in il4965_request_scan()
877 if (!il->scan_cmd) { in il4965_request_scan()
879 return -ENOMEM; in il4965_request_scan()
882 scan = il->scan_cmd; in il4965_request_scan()
885 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; in il4965_request_scan()
886 scan->quiet_time = IL_ACTIVE_QUIET_TIME; in il4965_request_scan()
895 interval = vif->bss_conf.beacon_int; in il4965_request_scan()
897 scan->suspend_time = 0; in il4965_request_scan()
898 scan->max_out_time = cpu_to_le32(200 * 1024); in il4965_request_scan()
905 scan->suspend_time = cpu_to_le32(scan_suspend_time); in il4965_request_scan()
910 if (il->scan_request->n_ssids) { in il4965_request_scan()
913 for (i = 0; i < il->scan_request->n_ssids; i++) { in il4965_request_scan()
915 if (!il->scan_request->ssids[i].ssid_len) in il4965_request_scan()
917 scan->direct_scan[p].id = WLAN_EID_SSID; in il4965_request_scan()
918 scan->direct_scan[p].len = in il4965_request_scan()
919 il->scan_request->ssids[i].ssid_len; in il4965_request_scan()
920 memcpy(scan->direct_scan[p].ssid, in il4965_request_scan()
921 il->scan_request->ssids[i].ssid, in il4965_request_scan()
922 il->scan_request->ssids[i].ssid_len); in il4965_request_scan()
930 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; in il4965_request_scan()
931 scan->tx_cmd.sta_id = il->hw_params.bcast_id; in il4965_request_scan()
932 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; in il4965_request_scan()
934 switch (il->scan_band) { in il4965_request_scan()
936 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; in il4965_request_scan()
938 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> in il4965_request_scan()
952 return -EIO; in il4965_request_scan()
967 * sending out probes -- setting this to a huge value will in il4965_request_scan()
972 scan->good_CRC_th = in il4965_request_scan()
975 band = il->scan_band; in il4965_request_scan()
977 if (il->cfg->scan_rx_antennas[band]) in il4965_request_scan()
978 rx_ant = il->cfg->scan_rx_antennas[band]; in il4965_request_scan()
980 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas); in il4965_request_scan()
981 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS; in il4965_request_scan()
982 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags); in il4965_request_scan()
985 if (test_bit(S_POWER_PMI, &il->status)) { in il4965_request_scan()
988 rx_ant & ((u8) (il->chain_noise_data.active_chains)); in il4965_request_scan()
993 il->chain_noise_data.active_chains); in il4965_request_scan()
999 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; in il4965_request_scan()
1003 scan->rx_chain = cpu_to_le16(rx_chain); in il4965_request_scan()
1006 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, in il4965_request_scan()
1007 vif->addr, il->scan_request->ie, in il4965_request_scan()
1008 il->scan_request->ie_len, in il4965_request_scan()
1009 IL_MAX_SCAN_SIZE - sizeof(*scan)); in il4965_request_scan()
1010 scan->tx_cmd.len = cpu_to_le16(cmd_len); in il4965_request_scan()
1012 scan->filter_flags |= in il4965_request_scan()
1015 scan->channel_count = in il4965_request_scan()
1017 (void *)&scan->data[cmd_len]); in il4965_request_scan()
1018 if (scan->channel_count == 0) { in il4965_request_scan()
1019 D_SCAN("channel count %d\n", scan->channel_count); in il4965_request_scan()
1020 return -EIO; in il4965_request_scan()
1024 le16_to_cpu(scan->tx_cmd.len) + in il4965_request_scan()
1025 scan->channel_count * sizeof(struct il_scan_channel); in il4965_request_scan()
1027 scan->len = cpu_to_le16(cmd.len); in il4965_request_scan()
1029 set_bit(S_SCAN_HW, &il->status); in il4965_request_scan()
1033 clear_bit(S_SCAN_HW, &il->status); in il4965_request_scan()
1042 struct il_vif_priv *vif_priv = (void *)vif->drv_priv; in il4965_manage_ibss_station()
1045 return il4965_add_bssid_station(il, vif->bss_conf.bssid, in il4965_manage_ibss_station()
1046 &vif_priv->ibss_bssid_sta_id); in il4965_manage_ibss_station()
1047 return il_remove_station(il, vif_priv->ibss_bssid_sta_id, in il4965_manage_ibss_station()
1048 vif->bss_conf.bssid); in il4965_manage_ibss_station()
1054 lockdep_assert_held(&il->sta_lock); in il4965_free_tfds_in_queue()
1056 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed) in il4965_free_tfds_in_queue()
1057 il->stations[sta_id].tid[tid].tfds_in_queue -= freed; in il4965_free_tfds_in_queue()
1060 il->stations[sta_id].tid[tid].tfds_in_queue, freed); in il4965_free_tfds_in_queue()
1061 il->stations[sta_id].tid[tid].tfds_in_queue = 0; in il4965_free_tfds_in_queue()
1070 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC || in il4965_is_single_rx_stream()
1071 il->current_ht_config.single_chain_sufficient; in il4965_is_single_rx_stream()
1107 switch (il->current_ht_config.smps) { in il4965_get_idle_rx_chain_count()
1114 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps); in il4965_get_idle_rx_chain_count()
1132 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1141 bool is_cam = !test_bit(S_POWER_PMI, &il->status); in il4965_set_rxon_chain()
1150 if (il->chain_noise_data.active_chains) in il4965_set_rxon_chain()
1151 active_chains = il->chain_noise_data.active_chains; in il4965_set_rxon_chain()
1153 active_chains = il->hw_params.valid_rx_ant; in il4965_set_rxon_chain()
1174 il->staging.rx_chain = cpu_to_le16(rx_chain); in il4965_set_rxon_chain()
1177 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; in il4965_set_rxon_chain()
1179 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; in il4965_set_rxon_chain()
1181 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain, in il4965_set_rxon_chain()
1230 return -ENOMEM; in il4965_dump_fh()
1232 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); in il4965_dump_fh()
1235 scnprintf(*buf + pos, bufsz - pos, in il4965_dump_fh()
1254 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_missed_beacon() local
1257 missed_beacon = &pkt->u.missed_beacon; in il4965_hdl_missed_beacon()
1258 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > in il4965_hdl_missed_beacon()
1259 il->missed_beacon_threshold) { in il4965_hdl_missed_beacon()
1261 le32_to_cpu(missed_beacon->consecutive_missed_beacons), in il4965_hdl_missed_beacon()
1262 le32_to_cpu(missed_beacon->total_missed_becons), in il4965_hdl_missed_beacon()
1263 le32_to_cpu(missed_beacon->num_recvd_beacons), in il4965_hdl_missed_beacon()
1264 le32_to_cpu(missed_beacon->num_expected_beacons)); in il4965_hdl_missed_beacon()
1265 if (!test_bit(S_SCANNING, &il->status)) in il4965_hdl_missed_beacon()
1282 rx_info = &(il->_4965.stats.rx.general); in il4965_rx_calc_noise()
1284 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; in il4965_rx_calc_noise()
1286 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; in il4965_rx_calc_noise()
1288 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; in il4965_rx_calc_noise()
1305 last_rx_noise = (total_silence / num_active_rx) - 107; in il4965_rx_calc_noise()
1317 * the case of counters roll-over.
1328 prev_stats = (__le32 *) &il->_4965.stats; in il4965_accumulative_stats()
1329 accum_stats = (u32 *) &il->_4965.accum_stats; in il4965_accumulative_stats()
1331 general = &il->_4965.stats.general.common; in il4965_accumulative_stats()
1332 accum_general = &il->_4965.accum_stats.general.common; in il4965_accumulative_stats()
1333 delta = (u32 *) &il->_4965.delta_stats; in il4965_accumulative_stats()
1334 max_delta = (u32 *) &il->_4965.max_delta; in il4965_accumulative_stats()
1342 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); in il4965_accumulative_stats()
1349 /* reset accumulative stats for "no-counter" type stats */ in il4965_accumulative_stats()
1350 accum_general->temperature = general->temperature; in il4965_accumulative_stats()
1351 accum_general->ttl_timestamp = general->ttl_timestamp; in il4965_accumulative_stats()
1360 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_stats() local
1364 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); in il4965_hdl_stats()
1367 ((il->_4965.stats.general.common.temperature != in il4965_hdl_stats()
1368 pkt->u.stats.general.common.temperature) || in il4965_hdl_stats()
1369 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) != in il4965_hdl_stats()
1370 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK))); in il4965_hdl_stats()
1372 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats); in il4965_hdl_stats()
1376 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats)); in il4965_hdl_stats()
1378 set_bit(S_STATS, &il->status); in il4965_hdl_stats()
1384 mod_timer(&il->stats_periodic, in il4965_hdl_stats()
1387 if (unlikely(!test_bit(S_SCANNING, &il->status)) && in il4965_hdl_stats()
1388 (pkt->hdr.cmd == N_STATS)) { in il4965_hdl_stats()
1390 queue_work(il->workqueue, &il->run_time_calib_work); in il4965_hdl_stats()
1400 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_c_stats() local
1402 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) { in il4965_hdl_c_stats()
1404 memset(&il->_4965.accum_stats, 0, in il4965_hdl_c_stats()
1406 memset(&il->_4965.delta_stats, 0, in il4965_hdl_c_stats()
1408 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats)); in il4965_hdl_c_stats()
1430 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1431 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1432 * own queue per aggregation session (RA/TID combination), such queues are
1433 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1434 * order to map frames to the right queue, we also need an AC->hw queue
1438 * 4965.c), the AC->hw queue mapping is the identity
1459 /* no support for TIDs 8-15 yet */ in il4965_get_ac_from_tid()
1460 return -EINVAL; in il4965_get_ac_from_tid()
1476 /* no support for TIDs 8-15 yet */ in il4965_get_fifo_from_tid()
1477 return -EINVAL; in il4965_get_fifo_from_tid()
1489 __le16 fc = hdr->frame_control; in il4965_tx_cmd_build_basic()
1490 __le32 tx_flags = tx_cmd->tx_flags; in il4965_tx_cmd_build_basic()
1492 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; in il4965_tx_cmd_build_basic()
1493 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { in il4965_tx_cmd_build_basic()
1498 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) in il4965_tx_cmd_build_basic()
1508 tx_cmd->sta_id = std_id; in il4965_tx_cmd_build_basic()
1514 tx_cmd->tid_tspec = qc[0] & 0xf; in il4965_tx_cmd_build_basic()
1525 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); in il4965_tx_cmd_build_basic()
1527 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); in il4965_tx_cmd_build_basic()
1529 tx_cmd->timeout.pm_frame_timeout = 0; in il4965_tx_cmd_build_basic()
1532 tx_cmd->driver_txop = 0; in il4965_tx_cmd_build_basic()
1533 tx_cmd->tx_flags = tx_flags; in il4965_tx_cmd_build_basic()
1534 tx_cmd->next_frame_len = 0; in il4965_tx_cmd_build_basic()
1555 tx_cmd->data_retry_limit = data_retry_limit; in il4965_tx_cmd_build_rate()
1557 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); in il4965_tx_cmd_build_rate()
1562 tx_cmd->initial_rate_idx = 0; in il4965_tx_cmd_build_rate()
1563 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; in il4965_tx_cmd_build_rate()
1568 * If the current TX rate stored in mac80211 has the MCS bit set, it's in il4965_tx_cmd_build_rate()
1569 * not really a TX rate. Thus, we use the lowest supported rate for in il4965_tx_cmd_build_rate()
1573 rate_idx = info->control.rates[0].idx; in il4965_tx_cmd_build_rate()
1574 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 in il4965_tx_cmd_build_rate()
1576 rate_idx = rate_lowest_index(&il->bands[info->band], sta); in il4965_tx_cmd_build_rate()
1578 if (info->band == NL80211_BAND_5GHZ) { in il4965_tx_cmd_build_rate()
1583 /* Get PLCP rate for tx_cmd->rate_n_flags */ in il4965_tx_cmd_build_rate()
1593 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); in il4965_tx_cmd_build_rate()
1594 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; in il4965_tx_cmd_build_rate()
1596 /* Set the rate in the TX cmd */ in il4965_tx_cmd_build_rate()
1597 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags); in il4965_tx_cmd_build_rate()
1605 struct ieee80211_key_conf *keyconf = info->control.hw_key; in il4965_tx_cmd_build_hwcrypto()
1607 switch (keyconf->cipher) { in il4965_tx_cmd_build_hwcrypto()
1609 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; in il4965_tx_cmd_build_hwcrypto()
1610 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); in il4965_tx_cmd_build_hwcrypto()
1611 if (info->flags & IEEE80211_TX_CTL_AMPDU) in il4965_tx_cmd_build_hwcrypto()
1612 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; in il4965_tx_cmd_build_hwcrypto()
1617 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; in il4965_tx_cmd_build_hwcrypto()
1618 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); in il4965_tx_cmd_build_hwcrypto()
1623 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; in il4965_tx_cmd_build_hwcrypto()
1626 tx_cmd->sec_ctl |= in il4965_tx_cmd_build_hwcrypto()
1627 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << in il4965_tx_cmd_build_hwcrypto()
1630 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); in il4965_tx_cmd_build_hwcrypto()
1633 keyconf->keyidx); in il4965_tx_cmd_build_hwcrypto()
1637 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); in il4965_tx_cmd_build_hwcrypto()
1650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in il4965_tx_skb()
1673 spin_lock_irqsave(&il->lock, flags); in il4965_tx_skb()
1675 D_DROP("Dropping - RF KILL\n"); in il4965_tx_skb()
1679 fc = hdr->frame_control; in il4965_tx_skb()
1694 sta_id = il->hw_params.bcast_id; in il4965_tx_skb()
1700 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); in il4965_tx_skb()
1708 sta_priv = (void *)sta->drv_priv; in il4965_tx_skb()
1710 if (sta_priv && sta_priv->asleep && in il4965_tx_skb()
1711 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { in il4965_tx_skb()
1715 * next frame is processed -- and the next frame to in il4965_tx_skb()
1725 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); in il4965_tx_skb()
1727 /* Access category (AC) is also the queue number */ in il4965_tx_skb()
1730 /* irqs already disabled/saved above when locking il->lock */ in il4965_tx_skb()
1731 spin_lock(&il->sta_lock); in il4965_tx_skb()
1737 spin_unlock(&il->sta_lock); in il4965_tx_skb()
1740 seq_number = il->stations[sta_id].tid[tid].seq_number; in il4965_tx_skb()
1742 hdr->seq_ctrl = in il4965_tx_skb()
1743 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); in il4965_tx_skb()
1744 hdr->seq_ctrl |= cpu_to_le16(seq_number); in il4965_tx_skb()
1747 if (info->flags & IEEE80211_TX_CTL_AMPDU && in il4965_tx_skb()
1748 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { in il4965_tx_skb()
1749 txq_id = il->stations[sta_id].tid[tid].agg.txq_id; in il4965_tx_skb()
1754 txq = &il->txq[txq_id]; in il4965_tx_skb()
1755 q = &txq->q; in il4965_tx_skb()
1757 if (unlikely(il_queue_space(q) < q->high_mark)) { in il4965_tx_skb()
1758 spin_unlock(&il->sta_lock); in il4965_tx_skb()
1763 il->stations[sta_id].tid[tid].tfds_in_queue++; in il4965_tx_skb()
1765 il->stations[sta_id].tid[tid].seq_number = seq_number; in il4965_tx_skb()
1768 spin_unlock(&il->sta_lock); in il4965_tx_skb()
1770 txq->skbs[q->write_ptr] = skb; in il4965_tx_skb()
1772 /* Set up first empty entry in queue's array of Tx/cmd buffers */ in il4965_tx_skb()
1773 out_cmd = txq->cmd[q->write_ptr]; in il4965_tx_skb()
1774 out_meta = &txq->meta[q->write_ptr]; in il4965_tx_skb()
1775 tx_cmd = container_of(&out_cmd->cmd.tx, struct il_tx_cmd, __hdr); in il4965_tx_skb()
1776 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); in il4965_tx_skb()
1780 * Set up the Tx-command (not MAC!) header. in il4965_tx_skb()
1781 * Store the chosen Tx queue and TFD idx within the sequence field; in il4965_tx_skb()
1782 * after Tx, uCode's Tx response will return this value so driver can in il4965_tx_skb()
1783 * locate the frame within the tx queue and do post-tx processing. in il4965_tx_skb()
1785 out_cmd->hdr.cmd = C_TX; in il4965_tx_skb()
1786 out_cmd->hdr.sequence = in il4965_tx_skb()
1788 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); in il4965_tx_skb()
1791 memcpy(tx_cmd->hdr, hdr, hdr_len); in il4965_tx_skb()
1794 tx_cmd->len = cpu_to_le16((u16) skb->len); in il4965_tx_skb()
1796 if (info->control.hw_key) in il4965_tx_skb()
1805 * Use the first empty entry in this queue's command buffer array in il4965_tx_skb()
1806 * to contain the Tx command and MAC header concatenated together in il4965_tx_skb()
1816 /* Tell NIC about any 2-byte padding after MAC header */ in il4965_tx_skb()
1818 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; in il4965_tx_skb()
1820 /* Physical address of this Tx command's header (not MAC header!), in il4965_tx_skb()
1822 txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, in il4965_tx_skb()
1824 if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) in il4965_tx_skb()
1829 secondlen = skb->len - hdr_len; in il4965_tx_skb()
1831 phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, in il4965_tx_skb()
1833 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) in il4965_tx_skb()
1837 /* Add buffer containing Tx command and MAC(!) header to TFD's in il4965_tx_skb()
1839 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); in il4965_tx_skb()
1843 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, in il4965_tx_skb()
1846 if (!ieee80211_has_morefrags(hdr->frame_control)) { in il4965_tx_skb()
1847 txq->need_update = 1; in il4965_tx_skb()
1850 txq->need_update = 0; in il4965_tx_skb()
1858 dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen, in il4965_tx_skb()
1860 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); in il4965_tx_skb()
1861 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); in il4965_tx_skb()
1863 il_update_stats(il, true, fc, skb->len); in il4965_tx_skb()
1865 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); in il4965_tx_skb()
1866 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); in il4965_tx_skb()
1868 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len); in il4965_tx_skb()
1870 /* Set up entry for this TFD in Tx byte-count array */ in il4965_tx_skb()
1871 if (info->flags & IEEE80211_TX_CTL_AMPDU) in il4965_tx_skb()
1872 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len)); in il4965_tx_skb()
1874 dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen, in il4965_tx_skb()
1878 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); in il4965_tx_skb()
1880 spin_unlock_irqrestore(&il->lock, flags); in il4965_tx_skb()
1884 * and we will get a TX status notification eventually, in il4965_tx_skb()
1896 if (sta_priv && sta_priv->client && !is_agg) in il4965_tx_skb()
1897 atomic_inc(&sta_priv->pending_frames); in il4965_tx_skb()
1899 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { in il4965_tx_skb()
1901 spin_lock_irqsave(&il->lock, flags); in il4965_tx_skb()
1902 txq->need_update = 1; in il4965_tx_skb()
1904 spin_unlock_irqrestore(&il->lock, flags); in il4965_tx_skb()
1913 spin_unlock_irqrestore(&il->lock, flags); in il4965_tx_skb()
1914 return -1; in il4965_tx_skb()
1920 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, in il4965_alloc_dma_ptr()
1922 if (!ptr->addr) in il4965_alloc_dma_ptr()
1923 return -ENOMEM; in il4965_alloc_dma_ptr()
1924 ptr->size = size; in il4965_alloc_dma_ptr()
1931 if (unlikely(!ptr->addr)) in il4965_free_dma_ptr()
1934 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); in il4965_free_dma_ptr()
1939 * il4965_hw_txq_ctx_free - Free TXQ Context
1941 * Destroy all TX DMA queues and structures
1948 /* Tx queues */ in il4965_hw_txq_ctx_free()
1949 if (il->txq) { in il4965_hw_txq_ctx_free()
1950 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) in il4965_hw_txq_ctx_free()
1951 if (txq_id == il->cmd_queue) in il4965_hw_txq_ctx_free()
1956 il4965_free_dma_ptr(il, &il->kw); in il4965_hw_txq_ctx_free()
1958 il4965_free_dma_ptr(il, &il->scd_bc_tbls); in il4965_hw_txq_ctx_free()
1960 /* free tx queue structure */ in il4965_hw_txq_ctx_free()
1965 * il4965_txq_ctx_alloc - allocate TX queue context
1966 * Allocate all Tx DMA structures and initialize them
1974 /* Free all tx/cmd queues and keep-warm buffer */ in il4965_txq_ctx_alloc()
1978 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, in il4965_txq_ctx_alloc()
1979 il->hw_params.scd_bc_tbls_size); in il4965_txq_ctx_alloc()
1984 /* Alloc keep-warm buffer */ in il4965_txq_ctx_alloc()
1985 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); in il4965_txq_ctx_alloc()
1991 /* allocate tx queue structure */ in il4965_txq_ctx_alloc()
1996 spin_lock_irqsave(&il->lock, flags); in il4965_txq_ctx_alloc()
1998 /* Turn off all Tx DMA fifos */ in il4965_txq_ctx_alloc()
2002 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); in il4965_txq_ctx_alloc()
2004 spin_unlock_irqrestore(&il->lock, flags); in il4965_txq_ctx_alloc()
2006 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ in il4965_txq_ctx_alloc()
2007 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { in il4965_txq_ctx_alloc()
2010 IL_ERR("Tx %d queue init failed\n", txq_id); in il4965_txq_ctx_alloc()
2019 il4965_free_dma_ptr(il, &il->kw); in il4965_txq_ctx_alloc()
2021 il4965_free_dma_ptr(il, &il->scd_bc_tbls); in il4965_txq_ctx_alloc()
2032 spin_lock_irqsave(&il->lock, flags); in il4965_txq_ctx_reset()
2034 /* Turn off all Tx DMA fifos */ in il4965_txq_ctx_reset()
2037 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); in il4965_txq_ctx_reset()
2039 spin_unlock_irqrestore(&il->lock, flags); in il4965_txq_ctx_reset()
2041 /* Alloc and init all Tx queues, including the command queue (#4) */ in il4965_txq_ctx_reset()
2042 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) in il4965_txq_ctx_reset()
2051 if (!il->txq) in il4965_txq_ctx_unmap()
2055 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) in il4965_txq_ctx_unmap()
2056 if (txq_id == il->cmd_queue) in il4965_txq_ctx_unmap()
2063 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2072 /* Stop each Tx DMA channel, and wait for it to be idle */ in il4965_txq_ctx_stop()
2073 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { in il4965_txq_ctx_stop()
2087 * Find first available (lowest unused) Tx Queue, mark it "active".
2088 * Called only when finding queue for aggregation.
2090 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2097 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) in il4965_txq_ctx_activate_free()
2098 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) in il4965_txq_ctx_activate_free()
2100 return -1; in il4965_txq_ctx_activate_free()
2104 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2109 /* Simply stop the queue, but don't change any configuration; in il4965_tx_queue_stop_scheduler()
2110 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ in il4965_tx_queue_stop_scheduler()
2117 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2129 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); in il4965_tx_queue_set_q2ratid()
2144 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2159 il->cfg->num_of_ampdu_queues <= txq_id)) { in il4965_txq_agg_enable()
2160 IL_WARN("queue number out of range: %d, must be %d to %d\n", in il4965_txq_agg_enable()
2163 il->cfg->num_of_ampdu_queues - 1); in il4965_txq_agg_enable()
2164 return -EINVAL; in il4965_txq_agg_enable()
2169 /* Modify device's station table to Tx this TID */ in il4965_txq_agg_enable()
2174 spin_lock_irqsave(&il->lock, flags); in il4965_txq_agg_enable()
2176 /* Stop this Tx queue before configuring it */ in il4965_txq_agg_enable()
2179 /* Map receiver-address / traffic-ID to this queue */ in il4965_txq_agg_enable()
2182 /* Set this queue as a chain-building queue */ in il4965_txq_agg_enable()
2187 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); in il4965_txq_agg_enable()
2188 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); in il4965_txq_agg_enable()
2191 /* Set up Tx win size and frame limit for this queue */ in il4965_txq_agg_enable()
2193 il->scd_base_addr + in il4965_txq_agg_enable()
2199 il->scd_base_addr + in il4965_txq_agg_enable()
2207 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ in il4965_txq_agg_enable()
2208 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); in il4965_txq_agg_enable()
2210 spin_unlock_irqrestore(&il->lock, flags); in il4965_txq_agg_enable()
2226 /* FIXME: warning if tx fifo not found ? */ in il4965_tx_agg_start()
2231 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid); in il4965_tx_agg_start()
2236 return -ENXIO; in il4965_tx_agg_start()
2239 return -EINVAL; in il4965_tx_agg_start()
2241 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { in il4965_tx_agg_start()
2243 return -ENXIO; in il4965_tx_agg_start()
2247 if (txq_id == -1) { in il4965_tx_agg_start()
2248 IL_ERR("No free aggregation queue available\n"); in il4965_tx_agg_start()
2249 return -ENXIO; in il4965_tx_agg_start()
2252 spin_lock_irqsave(&il->sta_lock, flags); in il4965_tx_agg_start()
2253 tid_data = &il->stations[sta_id].tid[tid]; in il4965_tx_agg_start()
2254 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); in il4965_tx_agg_start()
2255 tid_data->agg.txq_id = txq_id; in il4965_tx_agg_start()
2256 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); in il4965_tx_agg_start()
2257 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_tx_agg_start()
2263 spin_lock_irqsave(&il->sta_lock, flags); in il4965_tx_agg_start()
2264 tid_data = &il->stations[sta_id].tid[tid]; in il4965_tx_agg_start()
2265 if (tid_data->tfds_in_queue == 0) { in il4965_tx_agg_start()
2266 D_HT("HW queue is empty\n"); in il4965_tx_agg_start()
2267 tid_data->agg.state = IL_AGG_ON; in il4965_tx_agg_start()
2270 D_HT("HW queue is NOT empty: %d packets in HW queue\n", in il4965_tx_agg_start()
2271 tid_data->tfds_in_queue); in il4965_tx_agg_start()
2272 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; in il4965_tx_agg_start()
2274 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_tx_agg_start()
2280 * il->lock must be held by the caller
2287 il->cfg->num_of_ampdu_queues <= txq_id)) { in il4965_txq_agg_disable()
2288 IL_WARN("queue number out of range: %d, must be %d to %d\n", in il4965_txq_agg_disable()
2291 il->cfg->num_of_ampdu_queues - 1); in il4965_txq_agg_disable()
2292 return -EINVAL; in il4965_txq_agg_disable()
2299 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); in il4965_txq_agg_disable()
2300 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); in il4965_txq_agg_disable()
2306 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); in il4965_txq_agg_disable()
2329 return -ENXIO; in il4965_tx_agg_stop()
2332 spin_lock_irqsave(&il->sta_lock, flags); in il4965_tx_agg_stop()
2334 tid_data = &il->stations[sta_id].tid[tid]; in il4965_tx_agg_stop()
2335 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; in il4965_tx_agg_stop()
2336 txq_id = tid_data->agg.txq_id; in il4965_tx_agg_stop()
2338 switch (il->stations[sta_id].tid[tid].agg.state) { in il4965_tx_agg_stop()
2343 * queue we selected previously, i.e. before the in il4965_tx_agg_stop()
2354 write_ptr = il->txq[txq_id].q.write_ptr; in il4965_tx_agg_stop()
2355 read_ptr = il->txq[txq_id].q.read_ptr; in il4965_tx_agg_stop()
2357 /* The queue is not empty */ in il4965_tx_agg_stop()
2359 D_HT("Stopping a non empty AGG HW QUEUE\n"); in il4965_tx_agg_stop()
2360 il->stations[sta_id].tid[tid].agg.state = in il4965_tx_agg_stop()
2362 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_tx_agg_stop()
2366 D_HT("HW queue is empty\n"); in il4965_tx_agg_stop()
2368 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; in il4965_tx_agg_stop()
2371 spin_unlock(&il->sta_lock); in il4965_tx_agg_stop()
2372 spin_lock(&il->lock); in il4965_tx_agg_stop()
2375 * the only reason this call can fail is queue number out of range, in il4965_tx_agg_stop()
2378 * to deactivate the uCode queue, just return "success" to allow in il4965_tx_agg_stop()
2382 spin_unlock_irqrestore(&il->lock, flags); in il4965_tx_agg_stop()
2384 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); in il4965_tx_agg_stop()
2392 struct il_queue *q = &il->txq[txq_id].q; in il4965_txq_check_empty()
2393 u8 *addr = il->stations[sta_id].sta.sta.addr; in il4965_txq_check_empty()
2394 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; in il4965_txq_check_empty()
2396 lockdep_assert_held(&il->sta_lock); in il4965_txq_check_empty()
2398 switch (il->stations[sta_id].tid[tid].agg.state) { in il4965_txq_check_empty()
2401 /* aggregated HW queue */ in il4965_txq_check_empty()
2402 if (txq_id == tid_data->agg.txq_id && in il4965_txq_check_empty()
2403 q->read_ptr == q->write_ptr) { in il4965_txq_check_empty()
2404 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); in il4965_txq_check_empty()
2406 D_HT("HW queue empty: continue DELBA flow\n"); in il4965_txq_check_empty()
2408 tid_data->agg.state = IL_AGG_OFF; in il4965_txq_check_empty()
2409 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid); in il4965_txq_check_empty()
2413 /* We are reclaiming the last packet of the queue */ in il4965_txq_check_empty()
2414 if (tid_data->tfds_in_queue == 0) { in il4965_txq_check_empty()
2415 D_HT("HW queue empty: continue ADDBA flow\n"); in il4965_txq_check_empty()
2416 tid_data->agg.state = IL_AGG_ON; in il4965_txq_check_empty()
2417 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid); in il4965_txq_check_empty()
2432 sta = ieee80211_find_sta(il->vif, addr1); in il4965_non_agg_tx_status()
2434 sta_priv = (void *)sta->drv_priv; in il4965_non_agg_tx_status()
2436 if (sta_priv->client && in il4965_non_agg_tx_status()
2437 atomic_dec_return(&sta_priv->pending_frames) == 0) in il4965_non_agg_tx_status()
2438 ieee80211_sta_block_awake(il->hw, sta, false); in il4965_non_agg_tx_status()
2446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in il4965_tx_status()
2449 il4965_non_agg_tx_status(il, hdr->addr1); in il4965_tx_status()
2451 ieee80211_tx_status_irqsafe(il->hw, skb); in il4965_tx_status()
2457 struct il_tx_queue *txq = &il->txq[txq_id]; in il4965_tx_queue_reclaim()
2458 struct il_queue *q = &txq->q; in il4965_tx_queue_reclaim()
2463 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { in il4965_tx_queue_reclaim()
2464 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " in il4965_tx_queue_reclaim()
2465 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, in il4965_tx_queue_reclaim()
2466 q->write_ptr, q->read_ptr); in il4965_tx_queue_reclaim()
2470 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; in il4965_tx_queue_reclaim()
2471 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { in il4965_tx_queue_reclaim()
2473 skb = txq->skbs[txq->q.read_ptr]; in il4965_tx_queue_reclaim()
2478 hdr = (struct ieee80211_hdr *) skb->data; in il4965_tx_queue_reclaim()
2479 if (ieee80211_is_data_qos(hdr->frame_control)) in il4965_tx_queue_reclaim()
2484 txq->skbs[txq->q.read_ptr] = NULL; in il4965_tx_queue_reclaim()
2485 il->ops->txq_free_tfd(il, txq); in il4965_tx_queue_reclaim()
2491 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2493 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2501 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); in il4965_tx_status_reply_compressed_ba()
2502 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); in il4965_tx_status_reply_compressed_ba()
2507 if (unlikely(!agg->wait_for_ba)) { in il4965_tx_status_reply_compressed_ba()
2508 if (unlikely(ba_resp->bitmap)) in il4965_tx_status_reply_compressed_ba()
2510 return -EINVAL; in il4965_tx_status_reply_compressed_ba()
2513 /* Mark that the expected block-ack response arrived */ in il4965_tx_status_reply_compressed_ba()
2514 agg->wait_for_ba = 0; in il4965_tx_status_reply_compressed_ba()
2515 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); in il4965_tx_status_reply_compressed_ba()
2517 /* Calculate shift to align block-ack bits with our Tx win bits */ in il4965_tx_status_reply_compressed_ba()
2518 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); in il4965_tx_status_reply_compressed_ba()
2522 if (agg->frame_count > (64 - sh)) { in il4965_tx_status_reply_compressed_ba()
2524 return -1; in il4965_tx_status_reply_compressed_ba()
2527 /* don't use 64-bit values for now */ in il4965_tx_status_reply_compressed_ba()
2528 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; in il4965_tx_status_reply_compressed_ba()
2531 * transmitted bitmap and block-ack bitmap */ in il4965_tx_status_reply_compressed_ba()
2532 sent_bitmap = bitmap & agg->bitmap; in il4965_tx_status_reply_compressed_ba()
2535 * update driver's record of tx frame's status. */ in il4965_tx_status_reply_compressed_ba()
2541 i, (agg->start_idx + i) & 0xff, agg->start_idx + i); in il4965_tx_status_reply_compressed_ba()
2548 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]); in il4965_tx_status_reply_compressed_ba()
2549 memset(&info->status, 0, sizeof(info->status)); in il4965_tx_status_reply_compressed_ba()
2550 info->flags |= IEEE80211_TX_STAT_ACK; in il4965_tx_status_reply_compressed_ba()
2551 info->flags |= IEEE80211_TX_STAT_AMPDU; in il4965_tx_status_reply_compressed_ba()
2552 info->status.ampdu_ack_len = successes; in il4965_tx_status_reply_compressed_ba()
2553 info->status.ampdu_len = agg->frame_count; in il4965_tx_status_reply_compressed_ba()
2554 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); in il4965_tx_status_reply_compressed_ba()
2574 if (il->iw_mode == NL80211_IFTYPE_ADHOC) in il4965_find_station()
2578 return il->hw_params.bcast_id; in il4965_find_station()
2580 spin_lock_irqsave(&il->sta_lock, flags); in il4965_find_station()
2581 for (i = start; i < il->hw_params.max_stations; i++) in il4965_find_station()
2582 if (il->stations[i].used && in il4965_find_station()
2583 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) { in il4965_find_station()
2588 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations); in il4965_find_station()
2597 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) || in il4965_find_station()
2598 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS))) { in il4965_find_station()
2603 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_find_station()
2610 if (il->iw_mode == NL80211_IFTYPE_STATION) in il4965_get_ra_sta_id()
2622 return le32_to_cpup(&tx_resp->u.status + in il4965_get_scd_ssn()
2623 tx_resp->frame_count) & IEEE80211_MAX_SN; in il4965_get_scd_ssn()
2643 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
2651 struct agg_tx_status *frame_status = tx_resp->u.agg_status; in il4965_tx_status_reply_tx()
2654 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); in il4965_tx_status_reply_tx()
2657 if (agg->wait_for_ba) in il4965_tx_status_reply_tx()
2658 D_TX_REPLY("got tx response w/o block-ack\n"); in il4965_tx_status_reply_tx()
2660 agg->frame_count = tx_resp->frame_count; in il4965_tx_status_reply_tx()
2661 agg->start_idx = start_idx; in il4965_tx_status_reply_tx()
2662 agg->rate_n_flags = rate_n_flags; in il4965_tx_status_reply_tx()
2663 agg->bitmap = 0; in il4965_tx_status_reply_tx()
2665 /* num frames attempted by Tx command */ in il4965_tx_status_reply_tx()
2666 if (agg->frame_count == 1) { in il4965_tx_status_reply_tx()
2667 /* Only one frame was attempted; no block-ack will arrive */ in il4965_tx_status_reply_tx()
2672 agg->frame_count, agg->start_idx, idx); in il4965_tx_status_reply_tx()
2674 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]); in il4965_tx_status_reply_tx()
2675 info->status.rates[0].count = tx_resp->failure_frame + 1; in il4965_tx_status_reply_tx()
2676 info->flags &= ~IEEE80211_TX_CTL_AMPDU; in il4965_tx_status_reply_tx()
2677 info->flags |= il4965_tx_status_to_mac80211(status); in il4965_tx_status_reply_tx()
2681 tx_resp->failure_frame); in il4965_tx_status_reply_tx()
2684 agg->wait_for_ba = 0; in il4965_tx_status_reply_tx()
2686 /* Two or more frames were attempted; expect block-ack */ in il4965_tx_status_reply_tx()
2688 int start = agg->start_idx; in il4965_tx_status_reply_tx()
2691 /* Construct bit-map of pending frames within Tx win */ in il4965_tx_status_reply_tx()
2692 for (i = 0; i < agg->frame_count; i++) { in il4965_tx_status_reply_tx()
2705 agg->frame_count, txq_id, idx); in il4965_tx_status_reply_tx()
2707 skb = il->txq[txq_id].skbs[idx]; in il4965_tx_status_reply_tx()
2709 return -1; in il4965_tx_status_reply_tx()
2710 hdr = (struct ieee80211_hdr *) skb->data; in il4965_tx_status_reply_tx()
2712 sc = le16_to_cpu(hdr->seq_ctrl); in il4965_tx_status_reply_tx()
2716 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl); in il4965_tx_status_reply_tx()
2717 return -1; in il4965_tx_status_reply_tx()
2723 sh = idx - start; in il4965_tx_status_reply_tx()
2725 sh = (start - idx) + 0xff; in il4965_tx_status_reply_tx()
2729 } else if (sh < -64) in il4965_tx_status_reply_tx()
2730 sh = 0xff - (start - idx); in il4965_tx_status_reply_tx()
2732 sh = start - idx; in il4965_tx_status_reply_tx()
2742 agg->bitmap = bitmap; in il4965_tx_status_reply_tx()
2743 agg->start_idx = start; in il4965_tx_status_reply_tx()
2745 agg->frame_count, agg->start_idx, in il4965_tx_status_reply_tx()
2746 (unsigned long long)agg->bitmap); in il4965_tx_status_reply_tx()
2749 agg->wait_for_ba = 1; in il4965_tx_status_reply_tx()
2755 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
2760 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_tx() local
2761 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in il4965_hdl_tx()
2764 struct il_tx_queue *txq = &il->txq[txq_id]; in il4965_hdl_tx()
2768 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; in il4965_hdl_tx()
2769 u32 status = le32_to_cpu(tx_resp->u.status); in il4965_hdl_tx()
2776 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { in il4965_hdl_tx()
2777 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " in il4965_hdl_tx()
2778 "is out of range [0-%d] %d %d\n", txq_id, idx, in il4965_hdl_tx()
2779 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); in il4965_hdl_tx()
2783 txq->time_stamp = jiffies; in il4965_hdl_tx()
2785 skb = txq->skbs[txq->q.read_ptr]; in il4965_hdl_tx()
2787 memset(&info->status, 0, sizeof(info->status)); in il4965_hdl_tx()
2789 hdr = (struct ieee80211_hdr *) skb->data; in il4965_hdl_tx()
2790 if (ieee80211_is_data_qos(hdr->frame_control)) { in il4965_hdl_tx()
2796 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) { in il4965_hdl_tx()
2809 il->iw_mode == NL80211_IFTYPE_STATION) { in il4965_hdl_tx()
2811 D_INFO("Stopped queues - RX waiting on passive channel\n"); in il4965_hdl_tx()
2814 spin_lock_irqsave(&il->sta_lock, flags); in il4965_hdl_tx()
2815 if (txq->sched_retry) { in il4965_hdl_tx()
2822 agg = &il->stations[sta_id].tid[tid].agg; in il4965_hdl_tx()
2827 if (tx_resp->frame_count == 1 && in il4965_hdl_tx()
2829 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; in il4965_hdl_tx()
2831 if (txq->q.read_ptr != (scd_ssn & 0xff)) { in il4965_hdl_tx()
2832 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); in il4965_hdl_tx()
2838 if (il->mac80211_registered && in il4965_hdl_tx()
2839 il_queue_space(&txq->q) > txq->q.low_mark && in il4965_hdl_tx()
2840 agg->state != IL_EMPTYING_HW_QUEUE_DELBA) in il4965_hdl_tx()
2844 info->status.rates[0].count = tx_resp->failure_frame + 1; in il4965_hdl_tx()
2845 info->flags |= il4965_tx_status_to_mac80211(status); in il4965_hdl_tx()
2847 le32_to_cpu(tx_resp->rate_n_flags), in il4965_hdl_tx()
2853 le32_to_cpu(tx_resp->rate_n_flags), in il4965_hdl_tx()
2854 tx_resp->failure_frame); in il4965_hdl_tx()
2862 if (il->mac80211_registered && in il4965_hdl_tx()
2863 il_queue_space(&txq->q) > txq->q.low_mark) in il4965_hdl_tx()
2870 il4965_check_abort_status(il, tx_resp->frame_count, status); in il4965_hdl_tx()
2872 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_hdl_tx()
2876 * translate ucode response to mac80211 tx status control values
2882 struct ieee80211_tx_rate *r = &info->status.rates[0]; in il4965_hwrate_to_tx_control()
2884 info->status.antenna = in il4965_hwrate_to_tx_control()
2887 r->flags |= IEEE80211_TX_RC_MCS; in il4965_hwrate_to_tx_control()
2889 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; in il4965_hwrate_to_tx_control()
2891 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; in il4965_hwrate_to_tx_control()
2893 r->flags |= IEEE80211_TX_RC_DUP_DATA; in il4965_hwrate_to_tx_control()
2895 r->flags |= IEEE80211_TX_RC_SHORT_GI; in il4965_hwrate_to_tx_control()
2896 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); in il4965_hwrate_to_tx_control()
2900 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2902 * Handles block-acknowledge notification from device, which reports success
2908 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_compressed_ba() local
2909 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; in il4965_hdl_compressed_ba()
2917 /* "flow" corresponds to Tx queue */ in il4965_hdl_compressed_ba()
2918 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); in il4965_hdl_compressed_ba()
2920 /* "ssn" is start of block-ack Tx win, corresponds to idx in il4965_hdl_compressed_ba()
2921 * (in Tx queue's circular buffer) of first TFD/frame in win */ in il4965_hdl_compressed_ba()
2922 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); in il4965_hdl_compressed_ba()
2924 if (scd_flow >= il->hw_params.max_txq_num) { in il4965_hdl_compressed_ba()
2929 txq = &il->txq[scd_flow]; in il4965_hdl_compressed_ba()
2930 sta_id = ba_resp->sta_id; in il4965_hdl_compressed_ba()
2931 tid = ba_resp->tid; in il4965_hdl_compressed_ba()
2932 agg = &il->stations[sta_id].tid[tid].agg; in il4965_hdl_compressed_ba()
2933 if (unlikely(agg->txq_id != scd_flow)) { in il4965_hdl_compressed_ba()
2941 scd_flow, agg->txq_id); in il4965_hdl_compressed_ba()
2945 /* Find idx just before block-ack win */ in il4965_hdl_compressed_ba()
2946 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); in il4965_hdl_compressed_ba()
2948 spin_lock_irqsave(&il->sta_lock, flags); in il4965_hdl_compressed_ba()
2951 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, in il4965_hdl_compressed_ba()
2952 ba_resp->sta_id); in il4965_hdl_compressed_ba()
2954 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, in il4965_hdl_compressed_ba()
2955 (unsigned long long)le64_to_cpu(ba_resp->bitmap), in il4965_hdl_compressed_ba()
2956 ba_resp->scd_flow, ba_resp->scd_ssn); in il4965_hdl_compressed_ba()
2957 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx, in il4965_hdl_compressed_ba()
2958 (unsigned long long)agg->bitmap); in il4965_hdl_compressed_ba()
2964 * block-ack win (we assume that they've been successfully in il4965_hdl_compressed_ba()
2966 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { in il4965_hdl_compressed_ba()
2967 /* calculate mac80211 ampdu sw queue to wake */ in il4965_hdl_compressed_ba()
2971 if (il_queue_space(&txq->q) > txq->q.low_mark && in il4965_hdl_compressed_ba()
2972 il->mac80211_registered && in il4965_hdl_compressed_ba()
2973 agg->state != IL_EMPTYING_HW_QUEUE_DELBA) in il4965_hdl_compressed_ba()
2979 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_hdl_compressed_ba()
3037 if (il->band == NL80211_BAND_5GHZ) in il4965_sta_alloc_lq()
3046 il4965_first_antenna(il->hw_params. in il4965_sta_alloc_lq()
3050 link_cmd->rs_table[i].rate_n_flags = rate_n_flags; in il4965_sta_alloc_lq()
3052 link_cmd->general_params.single_stream_ant_msk = in il4965_sta_alloc_lq()
3053 il4965_first_antenna(il->hw_params.valid_tx_ant); in il4965_sta_alloc_lq()
3055 link_cmd->general_params.dual_stream_ant_msk = in il4965_sta_alloc_lq()
3056 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. in il4965_sta_alloc_lq()
3058 if (!link_cmd->general_params.dual_stream_ant_msk) { in il4965_sta_alloc_lq()
3059 link_cmd->general_params.dual_stream_ant_msk = ANT_AB; in il4965_sta_alloc_lq()
3060 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { in il4965_sta_alloc_lq()
3061 link_cmd->general_params.dual_stream_ant_msk = in il4965_sta_alloc_lq()
3062 il->hw_params.valid_tx_ant; in il4965_sta_alloc_lq()
3065 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; in il4965_sta_alloc_lq()
3066 link_cmd->agg_params.agg_time_limit = in il4965_sta_alloc_lq()
3069 link_cmd->sta_id = sta_id; in il4965_sta_alloc_lq()
3075 * il4965_add_bssid_station - Add the special IBSS BSSID station
3099 spin_lock_irqsave(&il->sta_lock, flags); in il4965_add_bssid_station()
3100 il->stations[sta_id].used |= IL_STA_LOCAL; in il4965_add_bssid_station()
3101 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_add_bssid_station()
3108 return -ENOMEM; in il4965_add_bssid_station()
3115 spin_lock_irqsave(&il->sta_lock, flags); in il4965_add_bssid_station()
3116 il->stations[sta_id].lq = link_cmd; in il4965_add_bssid_station()
3117 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_add_bssid_station()
3143 u8 key_size = il->_4965.wep_keys[i].key_size; in il4965_static_wepkey_cmd()
3145 wep_cmd->key[i].key_idx = i; in il4965_static_wepkey_cmd()
3147 wep_cmd->key[i].key_offset = i; in il4965_static_wepkey_cmd()
3150 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; in il4965_static_wepkey_cmd()
3152 wep_cmd->key[i].key_size = key_size; in il4965_static_wepkey_cmd()
3153 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size); in il4965_static_wepkey_cmd()
3156 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; in il4965_static_wepkey_cmd()
3157 wep_cmd->num_keys = WEP_KEYS_MAX; in il4965_static_wepkey_cmd()
3171 lockdep_assert_held(&il->mutex); in il4965_restore_default_wep_keys()
3181 int idx = keyconf->keyidx; in il4965_remove_default_wep_key()
3183 lockdep_assert_held(&il->mutex); in il4965_remove_default_wep_key()
3187 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key)); in il4965_remove_default_wep_key()
3204 int len = keyconf->keylen; in il4965_set_default_wep_key()
3205 int idx = keyconf->keyidx; in il4965_set_default_wep_key()
3207 lockdep_assert_held(&il->mutex); in il4965_set_default_wep_key()
3210 D_WEP("Bad WEP key length %d\n", keyconf->keylen); in il4965_set_default_wep_key()
3211 return -EINVAL; in il4965_set_default_wep_key()
3214 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; in il4965_set_default_wep_key()
3215 keyconf->hw_key_idx = HW_KEY_DEFAULT; in il4965_set_default_wep_key()
3216 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher; in il4965_set_default_wep_key()
3218 il->_4965.wep_keys[idx].key_size = len; in il4965_set_default_wep_key()
3219 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len); in il4965_set_default_wep_key()
3235 lockdep_assert_held(&il->mutex); in il4965_set_wep_dynamic_key_info()
3237 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; in il4965_set_wep_dynamic_key_info()
3240 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); in il4965_set_wep_dynamic_key_info()
3243 if (keyconf->keylen == WEP_KEY_LEN_128) in il4965_set_wep_dynamic_key_info()
3246 if (sta_id == il->hw_params.bcast_id) in il4965_set_wep_dynamic_key_info()
3249 spin_lock_irqsave(&il->sta_lock, flags); in il4965_set_wep_dynamic_key_info()
3251 il->stations[sta_id].keyinfo.cipher = keyconf->cipher; in il4965_set_wep_dynamic_key_info()
3252 il->stations[sta_id].keyinfo.keylen = keyconf->keylen; in il4965_set_wep_dynamic_key_info()
3253 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; in il4965_set_wep_dynamic_key_info()
3255 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); in il4965_set_wep_dynamic_key_info()
3257 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key, in il4965_set_wep_dynamic_key_info()
3258 keyconf->keylen); in il4965_set_wep_dynamic_key_info()
3260 if ((il->stations[sta_id].sta.key. in il4965_set_wep_dynamic_key_info()
3262 il->stations[sta_id].sta.key.key_offset = in il4965_set_wep_dynamic_key_info()
3267 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, in il4965_set_wep_dynamic_key_info()
3270 il->stations[sta_id].sta.key.key_flags = key_flags; in il4965_set_wep_dynamic_key_info()
3271 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; in il4965_set_wep_dynamic_key_info()
3272 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_set_wep_dynamic_key_info()
3274 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_set_wep_dynamic_key_info()
3276 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_set_wep_dynamic_key_info()
3289 lockdep_assert_held(&il->mutex); in il4965_set_ccmp_dynamic_key_info()
3292 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); in il4965_set_ccmp_dynamic_key_info()
3295 if (sta_id == il->hw_params.bcast_id) in il4965_set_ccmp_dynamic_key_info()
3298 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; in il4965_set_ccmp_dynamic_key_info()
3300 spin_lock_irqsave(&il->sta_lock, flags); in il4965_set_ccmp_dynamic_key_info()
3301 il->stations[sta_id].keyinfo.cipher = keyconf->cipher; in il4965_set_ccmp_dynamic_key_info()
3302 il->stations[sta_id].keyinfo.keylen = keyconf->keylen; in il4965_set_ccmp_dynamic_key_info()
3304 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); in il4965_set_ccmp_dynamic_key_info()
3306 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); in il4965_set_ccmp_dynamic_key_info()
3308 if ((il->stations[sta_id].sta.key. in il4965_set_ccmp_dynamic_key_info()
3310 il->stations[sta_id].sta.key.key_offset = in il4965_set_ccmp_dynamic_key_info()
3315 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, in il4965_set_ccmp_dynamic_key_info()
3318 il->stations[sta_id].sta.key.key_flags = key_flags; in il4965_set_ccmp_dynamic_key_info()
3319 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; in il4965_set_ccmp_dynamic_key_info()
3320 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_set_ccmp_dynamic_key_info()
3322 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_set_ccmp_dynamic_key_info()
3324 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_set_ccmp_dynamic_key_info()
3337 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); in il4965_set_tkip_dynamic_key_info()
3340 if (sta_id == il->hw_params.bcast_id) in il4965_set_tkip_dynamic_key_info()
3343 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; in il4965_set_tkip_dynamic_key_info()
3344 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; in il4965_set_tkip_dynamic_key_info()
3346 spin_lock_irqsave(&il->sta_lock, flags); in il4965_set_tkip_dynamic_key_info()
3348 il->stations[sta_id].keyinfo.cipher = keyconf->cipher; in il4965_set_tkip_dynamic_key_info()
3349 il->stations[sta_id].keyinfo.keylen = 16; in il4965_set_tkip_dynamic_key_info()
3351 if ((il->stations[sta_id].sta.key. in il4965_set_tkip_dynamic_key_info()
3353 il->stations[sta_id].sta.key.key_offset = in il4965_set_tkip_dynamic_key_info()
3358 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, in il4965_set_tkip_dynamic_key_info()
3361 il->stations[sta_id].sta.key.key_flags = key_flags; in il4965_set_tkip_dynamic_key_info()
3363 /* This copy is acutally not needed: we get the key with each TX */ in il4965_set_tkip_dynamic_key_info()
3364 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16); in il4965_set_tkip_dynamic_key_info()
3366 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16); in il4965_set_tkip_dynamic_key_info()
3368 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_set_tkip_dynamic_key_info()
3391 spin_lock_irqsave(&il->sta_lock, flags); in il4965_update_tkip_key()
3393 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; in il4965_update_tkip_key()
3396 il->stations[sta_id].sta.key.tkip_rx_ttak[i] = in il4965_update_tkip_key()
3399 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; in il4965_update_tkip_key()
3400 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_update_tkip_key()
3402 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); in il4965_update_tkip_key()
3404 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_update_tkip_key()
3416 lockdep_assert_held(&il->mutex); in il4965_remove_dynamic_key()
3418 il->_4965.key_mapping_keys--; in il4965_remove_dynamic_key()
3420 spin_lock_irqsave(&il->sta_lock, flags); in il4965_remove_dynamic_key()
3421 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags); in il4965_remove_dynamic_key()
3424 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); in il4965_remove_dynamic_key()
3426 if (keyconf->keyidx != keyidx) { in il4965_remove_dynamic_key()
3432 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_remove_dynamic_key()
3436 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) { in il4965_remove_dynamic_key()
3437 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, in il4965_remove_dynamic_key()
3439 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_remove_dynamic_key()
3444 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table)) in il4965_remove_dynamic_key()
3446 il->stations[sta_id].sta.key.key_offset); in il4965_remove_dynamic_key()
3447 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); in il4965_remove_dynamic_key()
3448 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); in il4965_remove_dynamic_key()
3449 il->stations[sta_id].sta.key.key_flags = in il4965_remove_dynamic_key()
3451 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx; in il4965_remove_dynamic_key()
3452 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; in il4965_remove_dynamic_key()
3453 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_remove_dynamic_key()
3458 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_remove_dynamic_key()
3461 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_remove_dynamic_key()
3463 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_remove_dynamic_key()
3474 lockdep_assert_held(&il->mutex); in il4965_set_dynamic_key()
3476 il->_4965.key_mapping_keys++; in il4965_set_dynamic_key()
3477 keyconf->hw_key_idx = HW_KEY_DYNAMIC; in il4965_set_dynamic_key()
3479 switch (keyconf->cipher) { in il4965_set_dynamic_key()
3494 keyconf->cipher); in il4965_set_dynamic_key()
3495 ret = -EINVAL; in il4965_set_dynamic_key()
3499 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); in il4965_set_dynamic_key()
3505 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3518 spin_lock_irqsave(&il->sta_lock, flags); in il4965_alloc_bcast_station()
3522 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_alloc_bcast_station()
3524 return -EINVAL; in il4965_alloc_bcast_station()
3527 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; in il4965_alloc_bcast_station()
3528 il->stations[sta_id].used |= IL_STA_BCAST; in il4965_alloc_bcast_station()
3529 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_alloc_bcast_station()
3535 return -ENOMEM; in il4965_alloc_bcast_station()
3538 spin_lock_irqsave(&il->sta_lock, flags); in il4965_alloc_bcast_station()
3539 il->stations[sta_id].lq = link_cmd; in il4965_alloc_bcast_station()
3540 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_alloc_bcast_station()
3546 * il4965_update_bcast_station - update broadcast station's LQ command
3556 u8 sta_id = il->hw_params.bcast_id; in il4965_update_bcast_station()
3561 return -ENOMEM; in il4965_update_bcast_station()
3564 spin_lock_irqsave(&il->sta_lock, flags); in il4965_update_bcast_station()
3565 if (il->stations[sta_id].lq) in il4965_update_bcast_station()
3566 kfree(il->stations[sta_id].lq); in il4965_update_bcast_station()
3569 il->stations[sta_id].lq = link_cmd; in il4965_update_bcast_station()
3570 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_update_bcast_station()
3582 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3590 lockdep_assert_held(&il->mutex); in il4965_sta_tx_modify_enable_tid()
3592 /* Remove "disable" flag, to enable Tx for this TID */ in il4965_sta_tx_modify_enable_tid()
3593 spin_lock_irqsave(&il->sta_lock, flags); in il4965_sta_tx_modify_enable_tid()
3594 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; in il4965_sta_tx_modify_enable_tid()
3595 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); in il4965_sta_tx_modify_enable_tid()
3596 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_sta_tx_modify_enable_tid()
3597 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_sta_tx_modify_enable_tid()
3599 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_sta_tx_modify_enable_tid()
3612 lockdep_assert_held(&il->mutex); in il4965_sta_rx_agg_start()
3616 return -ENXIO; in il4965_sta_rx_agg_start()
3618 spin_lock_irqsave(&il->sta_lock, flags); in il4965_sta_rx_agg_start()
3619 il->stations[sta_id].sta.station_flags_msk = 0; in il4965_sta_rx_agg_start()
3620 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; in il4965_sta_rx_agg_start()
3621 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid; in il4965_sta_rx_agg_start()
3622 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); in il4965_sta_rx_agg_start()
3623 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_sta_rx_agg_start()
3624 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_sta_rx_agg_start()
3626 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_sta_rx_agg_start()
3638 lockdep_assert_held(&il->mutex); in il4965_sta_rx_agg_stop()
3643 return -ENXIO; in il4965_sta_rx_agg_stop()
3646 spin_lock_irqsave(&il->sta_lock, flags); in il4965_sta_rx_agg_stop()
3647 il->stations[sta_id].sta.station_flags_msk = 0; in il4965_sta_rx_agg_stop()
3648 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; in il4965_sta_rx_agg_stop()
3649 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid; in il4965_sta_rx_agg_stop()
3650 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_sta_rx_agg_stop()
3651 memcpy(&sta_cmd, &il->stations[sta_id].sta, in il4965_sta_rx_agg_stop()
3653 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_sta_rx_agg_stop()
3663 spin_lock_irqsave(&il->sta_lock, flags); in il4965_sta_modify_sleep_tx_count()
3664 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; in il4965_sta_modify_sleep_tx_count()
3665 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; in il4965_sta_modify_sleep_tx_count()
3666 il->stations[sta_id].sta.sta.modify_mask = in il4965_sta_modify_sleep_tx_count()
3668 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); in il4965_sta_modify_sleep_tx_count()
3669 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; in il4965_sta_modify_sleep_tx_count()
3670 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); in il4965_sta_modify_sleep_tx_count()
3671 spin_unlock_irqrestore(&il->sta_lock, flags); in il4965_sta_modify_sleep_tx_count()
3678 if (il->ops->set_rxon_chain) { in il4965_update_chain_flags()
3679 il->ops->set_rxon_chain(il); in il4965_update_chain_flags()
3680 if (il->active.rx_chain != il->staging.rx_chain) in il4965_update_chain_flags()
3690 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); in il4965_clear_free_frames()
3692 while (!list_empty(&il->free_frames)) { in il4965_clear_free_frames()
3693 element = il->free_frames.next; in il4965_clear_free_frames()
3696 il->frames_count--; in il4965_clear_free_frames()
3699 if (il->frames_count) { in il4965_clear_free_frames()
3701 il->frames_count); in il4965_clear_free_frames()
3702 il->frames_count = 0; in il4965_clear_free_frames()
3711 if (list_empty(&il->free_frames)) { in il4965_get_free_frame()
3718 il->frames_count++; in il4965_get_free_frame()
3722 element = il->free_frames.next; in il4965_get_free_frame()
3731 list_add(&frame->list, &il->free_frames); in il4965_free_frame()
3738 lockdep_assert_held(&il->mutex); in il4965_fill_beacon_frame()
3740 if (!il->beacon_skb) in il4965_fill_beacon_frame()
3743 if (il->beacon_skb->len > left) in il4965_fill_beacon_frame()
3746 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); in il4965_fill_beacon_frame()
3748 return il->beacon_skb->len; in il4965_fill_beacon_frame()
3762 * variable-length part of the beacon. in il4965_set_beacon_tim()
3764 tim_idx = mgmt->u.beacon.variable - beacon; in il4965_set_beacon_tim()
3766 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ in il4965_set_beacon_tim()
3767 while ((tim_idx < (frame_size - 2)) && in il4965_set_beacon_tim()
3772 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { in il4965_set_beacon_tim()
3773 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); in il4965_set_beacon_tim()
3774 tx_beacon_cmd->tim_size = beacon[tim_idx + 1]; in il4965_set_beacon_tim()
3787 * We have to set up the TX command, the TX Beacon command, and the in il4965_hw_get_beacon_cmd()
3791 lockdep_assert_held(&il->mutex); in il4965_hw_get_beacon_cmd()
3793 if (!il->beacon_enabled) { in il4965_hw_get_beacon_cmd()
3799 tx_beacon_cmd = &frame->u.beacon; in il4965_hw_get_beacon_cmd()
3802 /* Set up TX beacon contents */ in il4965_hw_get_beacon_cmd()
3804 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame, in il4965_hw_get_beacon_cmd()
3805 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); in il4965_hw_get_beacon_cmd()
3811 /* Set up TX command fields */ in il4965_hw_get_beacon_cmd()
3812 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); in il4965_hw_get_beacon_cmd()
3813 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id; in il4965_hw_get_beacon_cmd()
3814 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; in il4965_hw_get_beacon_cmd()
3815 tx_beacon_cmd->tx.tx_flags = in il4965_hw_get_beacon_cmd()
3819 /* Set up TX beacon command fields */ in il4965_hw_get_beacon_cmd()
3820 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame, in il4965_hw_get_beacon_cmd()
3825 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); in il4965_hw_get_beacon_cmd()
3826 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; in il4965_hw_get_beacon_cmd()
3829 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags); in il4965_hw_get_beacon_cmd()
3845 return -ENOMEM; in il4965_send_beacon_cmd()
3852 return -EINVAL; in il4965_send_beacon_cmd()
3855 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); in il4965_send_beacon_cmd()
3865 struct il_tfd_tb *tb = &tfd->tbs[idx]; in il4965_tfd_tb_get_addr()
3867 dma_addr_t addr = get_unaligned_le32(&tb->lo); in il4965_tfd_tb_get_addr()
3870 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << in il4965_tfd_tb_get_addr()
3879 struct il_tfd_tb *tb = &tfd->tbs[idx]; in il4965_tfd_tb_get_len()
3881 return le16_to_cpu(tb->hi_n_len) >> 4; in il4965_tfd_tb_get_len()
3887 struct il_tfd_tb *tb = &tfd->tbs[idx]; in il4965_tfd_set_tb()
3890 put_unaligned_le32(addr, &tb->lo); in il4965_tfd_set_tb()
3894 tb->hi_n_len = cpu_to_le16(hi_n_len); in il4965_tfd_set_tb()
3896 tfd->num_tbs = idx + 1; in il4965_tfd_set_tb()
3902 return tfd->num_tbs & 0x1f; in il4965_tfd_get_num_tbs()
3906 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3914 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds; in il4965_hw_txq_free_tfd()
3916 struct pci_dev *dev = il->pci_dev; in il4965_hw_txq_free_tfd()
3917 int idx = txq->q.read_ptr; in il4965_hw_txq_free_tfd()
3934 dma_unmap_single(&dev->dev, in il4965_hw_txq_free_tfd()
3935 dma_unmap_addr(&txq->meta[idx], mapping), in il4965_hw_txq_free_tfd()
3936 dma_unmap_len(&txq->meta[idx], len), in il4965_hw_txq_free_tfd()
3941 dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i), in il4965_hw_txq_free_tfd()
3945 if (txq->skbs) { in il4965_hw_txq_free_tfd()
3946 struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; in il4965_hw_txq_free_tfd()
3948 /* can be called from irqs-disabled context */ in il4965_hw_txq_free_tfd()
3951 txq->skbs[txq->q.read_ptr] = NULL; in il4965_hw_txq_free_tfd()
3964 q = &txq->q; in il4965_hw_txq_attach_buf_to_tfd()
3965 tfd_tmp = (struct il_tfd *)txq->tfds; in il4965_hw_txq_attach_buf_to_tfd()
3966 tfd = &tfd_tmp[q->write_ptr]; in il4965_hw_txq_attach_buf_to_tfd()
3973 /* Each TFD can point to a maximum 20 Tx buffers */ in il4965_hw_txq_attach_buf_to_tfd()
3977 return -EINVAL; in il4965_hw_txq_attach_buf_to_tfd()
3990 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3991 * given Tx queue, and enable the DMA channel used for that queue.
3993 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3999 int txq_id = txq->q.id; in il4965_hw_tx_queue_init()
4001 /* Circular buffer (TFD queue in DRAM) physical base address */ in il4965_hw_tx_queue_init()
4002 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); in il4965_hw_tx_queue_init()
4015 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_alive() local
4019 palive = &pkt->u.alive_frame; in il4965_hdl_alive()
4022 palive->is_valid, palive->ver_type, palive->ver_subtype); in il4965_hdl_alive()
4024 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { in il4965_hdl_alive()
4026 memcpy(&il->card_alive_init, &pkt->u.raw, in il4965_hdl_alive()
4028 pwork = &il->init_alive_start; in il4965_hdl_alive()
4031 memcpy(&il->card_alive, &pkt->u.alive_frame, in il4965_hdl_alive()
4033 pwork = &il->alive_start; in il4965_hdl_alive()
4038 if (palive->is_valid == UCODE_VALID_OK) in il4965_hdl_alive()
4039 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); in il4965_hdl_alive()
4045 * il4965_bg_stats_periodic - Timer callback to queue stats
4059 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_bg_stats_periodic()
4062 /* dont send host command if rf-kill is on */ in il4965_bg_stats_periodic()
4072 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_beacon() local
4074 (struct il4965_beacon_notif *)pkt->u.raw; in il4965_hdl_beacon()
4076 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); in il4965_hdl_beacon()
4079 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, in il4965_hdl_beacon()
4080 beacon->beacon_notify_hdr.failure_frame, in il4965_hdl_beacon()
4081 le32_to_cpu(beacon->ibss_mgr_status), in il4965_hdl_beacon()
4082 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); in il4965_hdl_beacon()
4084 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); in il4965_hdl_beacon()
4094 if (il->mac80211_registered) in il4965_perform_ct_kill_task()
4095 ieee80211_stop_queues(il->hw); in il4965_perform_ct_kill_task()
4101 spin_lock_irqsave(&il->reg_lock, flags); in il4965_perform_ct_kill_task()
4104 spin_unlock_irqrestore(&il->reg_lock, flags); in il4965_perform_ct_kill_task()
4112 struct il_rx_pkt *pkt = rxb_addr(rxb); in il4965_hdl_card_state() local
4113 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); in il4965_hdl_card_state()
4114 unsigned long status = il->status; in il4965_hdl_card_state()
4140 set_bit(S_RFKILL, &il->status); in il4965_hdl_card_state()
4142 clear_bit(S_RFKILL, &il->status); in il4965_hdl_card_state()
4148 test_bit(S_RFKILL, &il->status))) in il4965_hdl_card_state()
4149 wiphy_rfkill_set_hw_state(il->hw->wiphy, in il4965_hdl_card_state()
4150 test_bit(S_RFKILL, &il->status)); in il4965_hdl_card_state()
4152 wake_up(&il->wait_command_queue); in il4965_hdl_card_state()
4156 * il4965_setup_handlers - Initialize Rx handler callbacks
4167 il->handlers[N_ALIVE] = il4965_hdl_alive; in il4965_setup_handlers()
4168 il->handlers[N_ERROR] = il_hdl_error; in il4965_setup_handlers()
4169 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; in il4965_setup_handlers()
4170 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; in il4965_setup_handlers()
4171 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; in il4965_setup_handlers()
4172 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; in il4965_setup_handlers()
4173 il->handlers[N_BEACON] = il4965_hdl_beacon; in il4965_setup_handlers()
4180 il->handlers[C_STATS] = il4965_hdl_c_stats; in il4965_setup_handlers()
4181 il->handlers[N_STATS] = il4965_hdl_stats; in il4965_setup_handlers()
4186 il->handlers[N_CARD_STATE] = il4965_hdl_card_state; in il4965_setup_handlers()
4188 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon; in il4965_setup_handlers()
4190 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy; in il4965_setup_handlers()
4191 il->handlers[N_RX_MPDU] = il4965_hdl_rx; in il4965_setup_handlers()
4192 il->handlers[N_RX] = il4965_hdl_rx; in il4965_setup_handlers()
4194 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba; in il4965_setup_handlers()
4195 /* Tx response */ in il4965_setup_handlers()
4196 il->handlers[C_TX] = il4965_hdl_tx; in il4965_setup_handlers()
4200 * il4965_rx_handle - Main entry function for receiving responses from uCode
4202 * Uses the il->handlers callback function array to invoke
4204 * frame-received notifications, and other notifications.
4210 struct il_rx_pkt *pkt; in il4965_rx_handle() local
4211 struct il_rx_queue *rxq = &il->rxq; in il4965_rx_handle()
4221 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; in il4965_rx_handle()
4222 i = rxq->read; in il4965_rx_handle()
4229 total_empty = r - rxq->write_actual; in il4965_rx_handle()
4237 rxb = rxq->queue[i]; in il4965_rx_handle()
4239 /* If an RXB doesn't have a Rx queue slot associated with it, in il4965_rx_handle()
4240 * then a bug has been introduced in the queue refilling in il4965_rx_handle()
4241 * routines -- catch it here */ in il4965_rx_handle()
4244 rxq->queue[i] = NULL; in il4965_rx_handle()
4246 dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, in il4965_rx_handle()
4247 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_handle()
4249 pkt = rxb_addr(rxb); in il4965_rx_handle()
4250 reclaim = il_need_reclaim(il, pkt); in il4965_rx_handle()
4255 if (il->handlers[pkt->hdr.cmd]) { in il4965_rx_handle()
4257 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); in il4965_rx_handle()
4258 il->isr_stats.handlers[pkt->hdr.cmd]++; in il4965_rx_handle()
4259 il->handlers[pkt->hdr.cmd] (il, rxb); in il4965_rx_handle()
4263 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); in il4965_rx_handle()
4267 * XXX: After here, we should always check rxb->page in il4965_rx_handle()
4269 * memory (pkt). Because some handler might have in il4965_rx_handle()
4276 * as we reclaim the driver command queue */ in il4965_rx_handle()
4277 if (rxb->page) in il4965_rx_handle()
4286 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_handle()
4287 if (rxb->page != NULL) { in il4965_rx_handle()
4288 rxb->page_dma = in il4965_rx_handle()
4289 dma_map_page(&il->pci_dev->dev, rxb->page, 0, in il4965_rx_handle()
4290 PAGE_SIZE << il->hw_params.rx_page_order, in il4965_rx_handle()
4293 if (unlikely(dma_mapping_error(&il->pci_dev->dev, in il4965_rx_handle()
4294 rxb->page_dma))) { in il4965_rx_handle()
4295 __il_free_pages(il, rxb->page); in il4965_rx_handle()
4296 rxb->page = NULL; in il4965_rx_handle()
4297 list_add_tail(&rxb->list, &rxq->rx_used); in il4965_rx_handle()
4299 list_add_tail(&rxb->list, &rxq->rx_free); in il4965_rx_handle()
4300 rxq->free_count++; in il4965_rx_handle()
4303 list_add_tail(&rxb->list, &rxq->rx_used); in il4965_rx_handle()
4305 spin_unlock_irqrestore(&rxq->lock, flags); in il4965_rx_handle()
4309 * restock the Rx queue so ucode wont assert. */ in il4965_rx_handle()
4313 rxq->read = i; in il4965_rx_handle()
4321 rxq->read = i; in il4965_rx_handle()
4333 synchronize_irq(il->pci_dev->irq); in il4965_synchronize_irq()
4334 tasklet_kill(&il->irq_tasklet); in il4965_synchronize_irq()
4349 spin_lock_irqsave(&il->lock, flags); in il4965_irq_tasklet()
4357 /* Ack/clear/reset pending flow-handler (DMA) interrupts. in il4965_irq_tasklet()
4372 spin_unlock_irqrestore(&il->lock, flags); in il4965_irq_tasklet()
4390 il->isr_stats.hw++; in il4965_irq_tasklet()
4403 il->isr_stats.sch++; in il4965_irq_tasklet()
4409 il->isr_stats.alive++; in il4965_irq_tasklet()
4426 il->isr_stats.rfkill++; in il4965_irq_tasklet()
4434 set_bit(S_RFKILL, &il->status); in il4965_irq_tasklet()
4436 clear_bit(S_RFKILL, &il->status); in il4965_irq_tasklet()
4439 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); in il4965_irq_tasklet()
4447 il->isr_stats.ctkill++; in il4965_irq_tasklet()
4455 il->isr_stats.sw++; in il4965_irq_tasklet()
4461 * uCode wakes up after power-down sleep. in il4965_irq_tasklet()
4462 * Tell device about any new tx or host commands enqueued, in il4965_irq_tasklet()
4467 il_rx_queue_update_write_ptr(il, &il->rxq); in il4965_irq_tasklet()
4468 for (i = 0; i < il->hw_params.max_txq_num; i++) in il4965_irq_tasklet()
4469 il_txq_update_write_ptr(il, &il->txq[i]); in il4965_irq_tasklet()
4470 il->isr_stats.wakeup++; in il4965_irq_tasklet()
4474 /* All uCode command responses, including Tx command responses, in il4965_irq_tasklet()
4475 * Rx "responses" (frame-received notification), and other in il4965_irq_tasklet()
4479 il->isr_stats.rx++; in il4965_irq_tasklet()
4483 /* This "Tx" DMA channel is used only for loading uCode */ in il4965_irq_tasklet()
4486 il->isr_stats.tx++; in il4965_irq_tasklet()
4489 il->ucode_write_complete = 1; in il4965_irq_tasklet()
4490 wake_up(&il->wait_command_queue); in il4965_irq_tasklet()
4495 il->isr_stats.unhandled++; in il4965_irq_tasklet()
4498 if (inta & ~(il->inta_mask)) { in il4965_irq_tasklet()
4500 inta & ~il->inta_mask); in il4965_irq_tasklet()
4504 /* Re-enable all interrupts */ in il4965_irq_tasklet()
4505 /* only Re-enable if disabled by irq */ in il4965_irq_tasklet()
4506 if (test_bit(S_INT_ENABLED, &il->status)) in il4965_irq_tasklet()
4508 /* Re-enable RF_KILL if it occurred */ in il4965_irq_tasklet()
4562 il->debug_level = val; in il4965_store_debug_level()
4579 return -EAGAIN; in il4965_show_temperature()
4581 return sprintf(buf, "%d\n", il->temperature); in il4965_show_temperature()
4594 return sprintf(buf, "%d\n", il->tx_power_user_lmt); in il4965_show_tx_power()
4611 IL_ERR("failed setting tx power (0x%08x).\n", ret); in il4965_store_tx_power()
4644 il_free_fw_desc(il->pci_dev, &il->ucode_code); in il4965_dealloc_ucode_pci()
4645 il_free_fw_desc(il->pci_dev, &il->ucode_data); in il4965_dealloc_ucode_pci()
4646 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); in il4965_dealloc_ucode_pci()
4647 il_free_fw_desc(il->pci_dev, &il->ucode_init); in il4965_dealloc_ucode_pci()
4648 il_free_fw_desc(il->pci_dev, &il->ucode_init_data); in il4965_dealloc_ucode_pci()
4649 il_free_fw_desc(il->pci_dev, &il->ucode_boot); in il4965_dealloc_ucode_pci()
4666 const char *name_pre = il->cfg->fw_name_pre; in il4965_request_firmware()
4670 il->fw_idx = il->cfg->ucode_api_max; in il4965_request_firmware()
4671 sprintf(tag, "%d", il->fw_idx); in il4965_request_firmware()
4673 il->fw_idx--; in il4965_request_firmware()
4674 sprintf(tag, "%d", il->fw_idx); in il4965_request_firmware()
4677 if (il->fw_idx < il->cfg->ucode_api_min) { in il4965_request_firmware()
4679 return -ENOENT; in il4965_request_firmware()
4682 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); in il4965_request_firmware()
4684 D_INFO("attempting to load firmware '%s'\n", il->firmware_name); in il4965_request_firmware()
4686 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name, in il4965_request_firmware()
4687 &il->pci_dev->dev, GFP_KERNEL, il, in il4965_request_firmware()
4700 struct il_ucode_header *ucode = (void *)ucode_raw->data; in il4965_load_firmware()
4704 il->ucode_ver = le32_to_cpu(ucode->ver); in il4965_load_firmware()
4705 api_ver = IL_UCODE_API(il->ucode_ver); in il4965_load_firmware()
4713 if (ucode_raw->size < hdr_size) { in il4965_load_firmware()
4715 return -EINVAL; in il4965_load_firmware()
4717 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); in il4965_load_firmware()
4718 pieces->data_size = le32_to_cpu(ucode->v1.data_size); in il4965_load_firmware()
4719 pieces->init_size = le32_to_cpu(ucode->v1.init_size); in il4965_load_firmware()
4720 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size); in il4965_load_firmware()
4721 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); in il4965_load_firmware()
4722 src = ucode->v1.data; in il4965_load_firmware()
4727 if (ucode_raw->size != in il4965_load_firmware()
4728 hdr_size + pieces->inst_size + pieces->data_size + in il4965_load_firmware()
4729 pieces->init_size + pieces->init_data_size + pieces->boot_size) { in il4965_load_firmware()
4732 (int)ucode_raw->size); in il4965_load_firmware()
4733 return -EINVAL; in il4965_load_firmware()
4736 pieces->inst = src; in il4965_load_firmware()
4737 src += pieces->inst_size; in il4965_load_firmware()
4738 pieces->data = src; in il4965_load_firmware()
4739 src += pieces->data_size; in il4965_load_firmware()
4740 pieces->init = src; in il4965_load_firmware()
4741 src += pieces->init_size; in il4965_load_firmware()
4742 pieces->init_data = src; in il4965_load_firmware()
4743 src += pieces->init_data_size; in il4965_load_firmware()
4744 pieces->boot = src; in il4965_load_firmware()
4745 src += pieces->boot_size; in il4965_load_firmware()
4751 * il4965_ucode_callback - callback when firmware was loaded
4762 const unsigned int api_max = il->cfg->ucode_api_max; in il4965_ucode_callback()
4763 const unsigned int api_min = il->cfg->ucode_api_min; in il4965_ucode_callback()
4773 if (il->fw_idx <= il->cfg->ucode_api_max) in il4965_ucode_callback()
4775 il->firmware_name); in il4965_ucode_callback()
4779 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name, in il4965_ucode_callback()
4780 ucode_raw->size); in il4965_ucode_callback()
4783 if (ucode_raw->size < 4) { in il4965_ucode_callback()
4794 api_ver = IL_UCODE_API(il->ucode_ver); in il4965_ucode_callback()
4815 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), in il4965_ucode_callback()
4816 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); in il4965_ucode_callback()
4818 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), in il4965_ucode_callback()
4819 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), in il4965_ucode_callback()
4820 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), in il4965_ucode_callback()
4821 IL_UCODE_SERIAL(il->ucode_ver)); in il4965_ucode_callback()
4825 * we will try to load a version with a smaller API -- maybe the in il4965_ucode_callback()
4829 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); in il4965_ucode_callback()
4837 if (pieces.inst_size > il->hw_params.max_inst_size) { in il4965_ucode_callback()
4843 if (pieces.data_size > il->hw_params.max_data_size) { in il4965_ucode_callback()
4849 if (pieces.init_size > il->hw_params.max_inst_size) { in il4965_ucode_callback()
4855 if (pieces.init_data_size > il->hw_params.max_data_size) { in il4965_ucode_callback()
4861 if (pieces.boot_size > il->hw_params.max_bsm_size) { in il4965_ucode_callback()
4867 /* Allocate ucode buffers for card's bus-master loading ... */ in il4965_ucode_callback()
4871 * 2) backup cache for save/restore during power-downs */ in il4965_ucode_callback()
4872 il->ucode_code.len = pieces.inst_size; in il4965_ucode_callback()
4873 il_alloc_fw_desc(il->pci_dev, &il->ucode_code); in il4965_ucode_callback()
4875 il->ucode_data.len = pieces.data_size; in il4965_ucode_callback()
4876 il_alloc_fw_desc(il->pci_dev, &il->ucode_data); in il4965_ucode_callback()
4878 il->ucode_data_backup.len = pieces.data_size; in il4965_ucode_callback()
4879 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); in il4965_ucode_callback()
4881 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || in il4965_ucode_callback()
4882 !il->ucode_data_backup.v_addr) in il4965_ucode_callback()
4887 il->ucode_init.len = pieces.init_size; in il4965_ucode_callback()
4888 il_alloc_fw_desc(il->pci_dev, &il->ucode_init); in il4965_ucode_callback()
4890 il->ucode_init_data.len = pieces.init_data_size; in il4965_ucode_callback()
4891 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); in il4965_ucode_callback()
4893 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) in il4965_ucode_callback()
4899 il->ucode_boot.len = pieces.boot_size; in il4965_ucode_callback()
4900 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); in il4965_ucode_callback()
4902 if (!il->ucode_boot.v_addr) in il4965_ucode_callback()
4908 il->sta_key_max_num = STA_KEY_MAX_NUM; in il4965_ucode_callback()
4910 /* Copy images into buffers for card's bus-master reads ... */ in il4965_ucode_callback()
4915 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size); in il4965_ucode_callback()
4918 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); in il4965_ucode_callback()
4926 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size); in il4965_ucode_callback()
4927 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size); in il4965_ucode_callback()
4933 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size); in il4965_ucode_callback()
4940 memcpy(il->ucode_init_data.v_addr, pieces.init_data, in il4965_ucode_callback()
4947 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size); in il4965_ucode_callback()
4953 il->_4965.phy_calib_chain_noise_reset_cmd = in il4965_ucode_callback()
4955 il->_4965.phy_calib_chain_noise_gain_cmd = in il4965_ucode_callback()
4969 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); in il4965_ucode_callback()
4977 complete(&il->_4965.firmware_loading_complete); in il4965_ucode_callback()
4991 complete(&il->_4965.firmware_loading_complete); in il4965_ucode_callback()
4992 device_release_driver(&il->pci_dev->dev); in il4965_ucode_callback()
5058 max = ARRAY_SIZE(advanced_lookup) - 1; in il4965_desc_lookup()
5077 if (il->ucode_type == UCODE_INIT) in il4965_dump_nic_error_log()
5078 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr); in il4965_dump_nic_error_log()
5080 base = le32_to_cpu(il->card_alive.error_event_table_ptr); in il4965_dump_nic_error_log()
5082 if (!il->ops->is_valid_rtc_data_addr(base)) { in il4965_dump_nic_error_log()
5084 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT"); in il4965_dump_nic_error_log()
5092 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); in il4965_dump_nic_error_log()
5096 il->isr_stats.err_code = desc; in il4965_dump_nic_error_log()
5110 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", in il4965_dump_nic_error_log()
5124 spin_lock_irqsave(&il->lock, flags); in il4965_rf_kill_ct_config()
5127 spin_unlock_irqrestore(&il->lock, flags); in il4965_rf_kill_ct_config()
5130 cpu_to_le32(il->hw_params.ct_kill_threshold); in il4965_rf_kill_ct_config()
5138 il->hw_params.ct_kill_threshold); in il4965_rf_kill_ct_config()
5151 #define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5161 spin_lock_irqsave(&il->lock, flags); in il4965_alive_notify()
5163 /* Clear 4965's internal Tx Scheduler data base */ in il4965_alive_notify()
5164 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR); in il4965_alive_notify()
5165 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET; in il4965_alive_notify()
5166 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) in il4965_alive_notify()
5168 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) in il4965_alive_notify()
5172 il->scd_base_addr + in il4965_alive_notify()
5173 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); in il4965_alive_notify()
5177 /* Tel 4965 where to find Tx byte count tables */ in il4965_alive_notify()
5178 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10); in il4965_alive_notify()
5194 /* Initialize each Tx queue (including the command queue) */ in il4965_alive_notify()
5195 for (i = 0; i < il->hw_params.max_txq_num; i++) { in il4965_alive_notify()
5201 /* Max Tx Window size for Scheduler-ACK mode */ in il4965_alive_notify()
5203 il->scd_base_addr + in il4965_alive_notify()
5211 il->scd_base_addr + in il4965_alive_notify()
5220 (1 << il->hw_params.max_txq_num) - 1); in il4965_alive_notify()
5222 /* Activate all Tx DMA/FIFO channels */ in il4965_alive_notify()
5227 /* make sure all queue are not stopped */ in il4965_alive_notify()
5228 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped)); in il4965_alive_notify()
5230 atomic_set(&il->queue_stop_count[i], 0); in il4965_alive_notify()
5232 /* reset to 0 to enable all the queue first */ in il4965_alive_notify()
5233 il->txq_ctx_active_msk = 0; in il4965_alive_notify()
5234 /* Map each Tx/cmd queue to its corresponding fifo */ in il4965_alive_notify()
5245 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0); in il4965_alive_notify()
5248 spin_unlock_irqrestore(&il->lock, flags); in il4965_alive_notify()
5254 * il4965_alive_start - called after N_ALIVE notification received
5265 if (il->card_alive.is_valid != UCODE_VALID_OK) { in il4965_alive_start()
5289 set_bit(S_ALIVE, &il->status); in il4965_alive_start()
5291 /* Enable watchdog to monitor the driver tx queues */ in il4965_alive_start()
5297 ieee80211_wake_queues(il->hw); in il4965_alive_start()
5299 il->active_rate = RATES_MASK; in il4965_alive_start()
5306 (struct il_rxon_cmd *)&il->active; in il4965_alive_start()
5308 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; in il4965_alive_start()
5309 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; in il4965_alive_start()
5314 if (il->ops->set_rxon_chain) in il4965_alive_start()
5315 il->ops->set_rxon_chain(il); in il4965_alive_start()
5323 set_bit(S_READY, &il->status); in il4965_alive_start()
5332 wake_up(&il->wait_command_queue); in il4965_alive_start()
5337 queue_work(il->workqueue, &il->restart); in il4965_alive_start()
5352 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); in __il4965_down()
5354 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set in __il4965_down()
5356 timer_delete_sync(&il->watchdog); in __il4965_down()
5361 spin_lock_irq(&il->sta_lock); in __il4965_down()
5369 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys)); in __il4965_down()
5370 il->_4965.key_mapping_keys = 0; in __il4965_down()
5371 spin_unlock_irq(&il->sta_lock); in __il4965_down()
5377 wake_up_all(&il->wait_command_queue); in __il4965_down()
5382 clear_bit(S_EXIT_PENDING, &il->status); in __il4965_down()
5384 /* stop and reset the on-board processor */ in __il4965_down()
5388 spin_lock_irqsave(&il->lock, flags); in __il4965_down()
5390 spin_unlock_irqrestore(&il->lock, flags); in __il4965_down()
5393 if (il->mac80211_registered) in __il4965_down()
5394 ieee80211_stop_queues(il->hw); in __il4965_down()
5399 il->status = in __il4965_down()
5400 test_bit(S_RFKILL, &il->status) << S_RFKILL | in __il4965_down()
5401 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | in __il4965_down()
5402 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; in __il4965_down()
5408 il->status &= in __il4965_down()
5409 test_bit(S_RFKILL, &il->status) << S_RFKILL | in __il4965_down()
5410 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | in __il4965_down()
5411 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR | in __il4965_down()
5412 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; in __il4965_down()
5415 * We disabled and synchronized interrupt, and priv->mutex is taken, so in __il4965_down()
5419 spin_lock_irq(&il->reg_lock); in __il4965_down()
5424 /* Power-down device's busmaster DMA clocks */ in __il4965_down()
5432 spin_unlock_irq(&il->reg_lock); in __il4965_down()
5436 memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); in __il4965_down()
5438 dev_kfree_skb(il->beacon_skb); in __il4965_down()
5439 il->beacon_skb = NULL; in __il4965_down()
5448 mutex_lock(&il->mutex); in il4965_down()
5450 mutex_unlock(&il->mutex); in il4965_down()
5470 il->hw_ready = true; in il4965_set_hw_ready()
5472 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not"); in il4965_set_hw_ready()
5480 il->hw_ready = false; in il4965_prepare_card_hw()
5483 if (il->hw_ready) in il4965_prepare_card_hw()
5495 if (ret != -ETIMEDOUT) in il4965_prepare_card_hw()
5507 if (test_bit(S_EXIT_PENDING, &il->status)) { in __il4965_up()
5509 return -EIO; in __il4965_up()
5512 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { in __il4965_up()
5514 return -EIO; in __il4965_up()
5524 if (!il->hw_ready) { in __il4965_up()
5527 return -EIO; in __il4965_up()
5532 clear_bit(S_RFKILL, &il->status); in __il4965_up()
5534 set_bit(S_RFKILL, &il->status); in __il4965_up()
5535 wiphy_rfkill_set_hw_state(il->hw->wiphy, true); in __il4965_up()
5546 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM; in __il4965_up()
5568 * This will be used to initialize the on-board processor's in __il4965_up()
5570 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, in __il4965_up()
5571 il->ucode_data.len); in __il4965_up()
5578 ret = il->ops->load_ucode(il); in __il4965_up()
5593 set_bit(S_EXIT_PENDING, &il->status); in __il4965_up()
5595 clear_bit(S_EXIT_PENDING, &il->status); in __il4965_up()
5600 return -EIO; in __il4965_up()
5615 mutex_lock(&il->mutex); in il4965_bg_init_alive_start()
5616 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_bg_init_alive_start()
5619 il->ops->init_alive_start(il); in il4965_bg_init_alive_start()
5621 mutex_unlock(&il->mutex); in il4965_bg_init_alive_start()
5630 mutex_lock(&il->mutex); in il4965_bg_alive_start()
5631 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_bg_alive_start()
5636 mutex_unlock(&il->mutex); in il4965_bg_alive_start()
5645 mutex_lock(&il->mutex); in il4965_bg_run_time_calib_work()
5647 if (test_bit(S_EXIT_PENDING, &il->status) || in il4965_bg_run_time_calib_work()
5648 test_bit(S_SCANNING, &il->status)) { in il4965_bg_run_time_calib_work()
5649 mutex_unlock(&il->mutex); in il4965_bg_run_time_calib_work()
5653 if (il->start_calib) { in il4965_bg_run_time_calib_work()
5654 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats); in il4965_bg_run_time_calib_work()
5655 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats); in il4965_bg_run_time_calib_work()
5658 mutex_unlock(&il->mutex); in il4965_bg_run_time_calib_work()
5666 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_bg_restart()
5669 if (test_and_clear_bit(S_FW_ERROR, &il->status)) { in il4965_bg_restart()
5670 mutex_lock(&il->mutex); in il4965_bg_restart()
5671 il->is_open = 0; in il4965_bg_restart()
5675 mutex_unlock(&il->mutex); in il4965_bg_restart()
5677 ieee80211_restart_hw(il->hw); in il4965_bg_restart()
5681 mutex_lock(&il->mutex); in il4965_bg_restart()
5682 if (test_bit(S_EXIT_PENDING, &il->status)) { in il4965_bg_restart()
5683 mutex_unlock(&il->mutex); in il4965_bg_restart()
5688 mutex_unlock(&il->mutex); in il4965_bg_restart()
5697 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_bg_rx_replenish()
5700 mutex_lock(&il->mutex); in il4965_bg_rx_replenish()
5702 mutex_unlock(&il->mutex); in il4965_bg_rx_replenish()
5721 struct ieee80211_hw *hw = il->hw; in il4965_mac_setup_register()
5723 hw->rate_control_algorithm = "iwl-4965-rs"; in il4965_mac_setup_register()
5733 if (il->cfg->sku & IL_SKU_N) in il4965_mac_setup_register()
5734 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS | in il4965_mac_setup_register()
5737 hw->sta_data_size = sizeof(struct il_station_priv); in il4965_mac_setup_register()
5738 hw->vif_data_size = sizeof(struct il_vif_priv); in il4965_mac_setup_register()
5740 hw->wiphy->interface_modes = in il4965_mac_setup_register()
5743 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; in il4965_mac_setup_register()
5744 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | in il4965_mac_setup_register()
5751 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; in il4965_mac_setup_register()
5753 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; in il4965_mac_setup_register()
5754 /* we create the 802.11 header and a zero-length SSID element */ in il4965_mac_setup_register()
5755 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; in il4965_mac_setup_register()
5758 hw->queues = 4; in il4965_mac_setup_register()
5760 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; in il4965_mac_setup_register()
5762 if (il->bands[NL80211_BAND_2GHZ].n_channels) in il4965_mac_setup_register()
5763 il->hw->wiphy->bands[NL80211_BAND_2GHZ] = in il4965_mac_setup_register()
5764 &il->bands[NL80211_BAND_2GHZ]; in il4965_mac_setup_register()
5765 if (il->bands[NL80211_BAND_5GHZ].n_channels) in il4965_mac_setup_register()
5766 il->hw->wiphy->bands[NL80211_BAND_5GHZ] = in il4965_mac_setup_register()
5767 &il->bands[NL80211_BAND_5GHZ]; in il4965_mac_setup_register()
5771 wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); in il4965_mac_setup_register()
5773 ret = ieee80211_register_hw(il->hw); in il4965_mac_setup_register()
5778 il->mac80211_registered = 1; in il4965_mac_setup_register()
5786 struct il_priv *il = hw->priv; in il4965_mac_start()
5792 mutex_lock(&il->mutex); in il4965_mac_start()
5794 mutex_unlock(&il->mutex); in il4965_mac_start()
5806 ret = wait_event_timeout(il->wait_command_queue, in il4965_mac_start()
5807 test_bit(S_READY, &il->status), in il4965_mac_start()
5810 if (!test_bit(S_READY, &il->status)) { in il4965_mac_start()
5813 return -ETIMEDOUT; in il4965_mac_start()
5820 il->is_open = 1; in il4965_mac_start()
5828 struct il_priv *il = hw->priv; in il4965_mac_stop()
5832 if (!il->is_open) in il4965_mac_stop()
5835 il->is_open = 0; in il4965_mac_stop()
5839 flush_workqueue(il->workqueue); in il4965_mac_stop()
5854 struct il_priv *il = hw->priv; in il4965_mac_tx()
5858 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, in il4965_mac_tx()
5859 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); in il4965_mac_tx()
5861 if (il4965_tx_skb(il, control->sta, skb)) in il4965_mac_tx()
5872 struct il_priv *il = hw->priv; in il4965_mac_update_tkip_key()
5886 struct il_priv *il = hw->priv; in il4965_mac_set_key()
5893 if (il->cfg->mod_params->sw_crypto) { in il4965_mac_set_key()
5894 D_MAC80211("leave - hwcrypto disabled\n"); in il4965_mac_set_key()
5895 return -EOPNOTSUPP; in il4965_mac_set_key()
5902 if (vif->type == NL80211_IFTYPE_ADHOC && in il4965_mac_set_key()
5903 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { in il4965_mac_set_key()
5904 D_MAC80211("leave - ad-hoc group key\n"); in il4965_mac_set_key()
5905 return -EOPNOTSUPP; in il4965_mac_set_key()
5910 return -EINVAL; in il4965_mac_set_key()
5912 mutex_lock(&il->mutex); in il4965_mac_set_key()
5921 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || in il4965_mac_set_key()
5922 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { in il4965_mac_set_key()
5924 is_default_wep_key = !il->_4965.key_mapping_keys; in il4965_mac_set_key()
5927 (key->hw_key_idx == HW_KEY_DEFAULT); in il4965_mac_set_key()
5948 ret = -EINVAL; in il4965_mac_set_key()
5951 mutex_unlock(&il->mutex); in il4965_mac_set_key()
5961 struct il_priv *il = hw->priv; in il4965_mac_ampdu_action()
5962 int ret = -EINVAL; in il4965_mac_ampdu_action()
5963 struct ieee80211_sta *sta = params->sta; in il4965_mac_ampdu_action()
5964 enum ieee80211_ampdu_mlme_action action = params->action; in il4965_mac_ampdu_action()
5965 u16 tid = params->tid; in il4965_mac_ampdu_action()
5966 u16 *ssn = &params->ssn; in il4965_mac_ampdu_action()
5968 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); in il4965_mac_ampdu_action()
5970 if (!(il->cfg->sku & IL_SKU_N)) in il4965_mac_ampdu_action()
5971 return -EACCES; in il4965_mac_ampdu_action()
5973 mutex_lock(&il->mutex); in il4965_mac_ampdu_action()
5983 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_mac_ampdu_action()
5987 D_HT("start Tx\n"); in il4965_mac_ampdu_action()
5993 D_HT("stop Tx\n"); in il4965_mac_ampdu_action()
5995 if (test_bit(S_EXIT_PENDING, &il->status)) in il4965_mac_ampdu_action()
6002 mutex_unlock(&il->mutex); in il4965_mac_ampdu_action()
6011 struct il_priv *il = hw->priv; in il4965_mac_sta_add()
6012 struct il_station_priv *sta_priv = (void *)sta->drv_priv; in il4965_mac_sta_add()
6013 bool is_ap = vif->type == NL80211_IFTYPE_STATION; in il4965_mac_sta_add()
6017 D_INFO("received request to add station %pM\n", sta->addr); in il4965_mac_sta_add()
6018 mutex_lock(&il->mutex); in il4965_mac_sta_add()
6019 D_INFO("proceeding to add station %pM\n", sta->addr); in il4965_mac_sta_add()
6020 sta_priv->common.sta_id = IL_INVALID_STATION; in il4965_mac_sta_add()
6022 atomic_set(&sta_priv->pending_frames, 0); in il4965_mac_sta_add()
6025 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id); in il4965_mac_sta_add()
6027 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); in il4965_mac_sta_add()
6029 mutex_unlock(&il->mutex); in il4965_mac_sta_add()
6033 sta_priv->common.sta_id = sta_id; in il4965_mac_sta_add()
6036 D_INFO("Initializing rate scaling for station %pM\n", sta->addr); in il4965_mac_sta_add()
6038 mutex_unlock(&il->mutex); in il4965_mac_sta_add()
6047 struct il_priv *il = hw->priv; in il4965_mac_channel_switch()
6049 struct ieee80211_conf *conf = &hw->conf; in il4965_mac_channel_switch()
6050 struct ieee80211_channel *channel = ch_switch->chandef.chan; in il4965_mac_channel_switch()
6051 struct il_ht_config *ht_conf = &il->current_ht_config; in il4965_mac_channel_switch()
6056 mutex_lock(&il->mutex); in il4965_mac_channel_switch()
6061 if (test_bit(S_EXIT_PENDING, &il->status) || in il4965_mac_channel_switch()
6062 test_bit(S_SCANNING, &il->status) || in il4965_mac_channel_switch()
6063 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) in il4965_mac_channel_switch()
6069 if (!il->ops->set_channel_switch) in il4965_mac_channel_switch()
6072 ch = channel->hw_value; in il4965_mac_channel_switch()
6073 if (le16_to_cpu(il->active.channel) == ch) in il4965_mac_channel_switch()
6076 ch_info = il_get_channel_info(il, channel->band, ch); in il4965_mac_channel_switch()
6082 spin_lock_irq(&il->lock); in il4965_mac_channel_switch()
6084 il->current_ht_config.smps = conf->smps_mode; in il4965_mac_channel_switch()
6087 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) { in il4965_mac_channel_switch()
6090 il->ht.is_40mhz = false; in il4965_mac_channel_switch()
6091 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; in il4965_mac_channel_switch()
6094 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; in il4965_mac_channel_switch()
6095 il->ht.is_40mhz = true; in il4965_mac_channel_switch()
6098 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; in il4965_mac_channel_switch()
6099 il->ht.is_40mhz = true; in il4965_mac_channel_switch()
6103 if ((le16_to_cpu(il->staging.channel) != ch)) in il4965_mac_channel_switch()
6104 il->staging.flags = 0; in il4965_mac_channel_switch()
6108 il_set_flags_for_band(il, channel->band, il->vif); in il4965_mac_channel_switch()
6110 spin_unlock_irq(&il->lock); in il4965_mac_channel_switch()
6117 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status); in il4965_mac_channel_switch()
6118 il->switch_channel = cpu_to_le16(ch); in il4965_mac_channel_switch()
6119 if (il->ops->set_channel_switch(il, ch_switch)) { in il4965_mac_channel_switch()
6120 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); in il4965_mac_channel_switch()
6121 il->switch_channel = 0; in il4965_mac_channel_switch()
6122 ieee80211_chswitch_done(il->vif, false, 0); in il4965_mac_channel_switch()
6126 mutex_unlock(&il->mutex); in il4965_mac_channel_switch()
6134 struct il_priv *il = hw->priv; in il4965_configure_filter()
6154 mutex_lock(&il->mutex); in il4965_configure_filter()
6156 il->staging.filter_flags &= ~filter_nand; in il4965_configure_filter()
6157 il->staging.filter_flags |= filter_or; in il4965_configure_filter()
6164 mutex_unlock(&il->mutex); in il4965_configure_filter()
6189 mutex_lock(&il->mutex); in il4965_bg_txpower_work()
6195 if (test_bit(S_EXIT_PENDING, &il->status) || in il4965_bg_txpower_work()
6196 test_bit(S_SCANNING, &il->status)) in il4965_bg_txpower_work()
6200 * TX power since frames can be sent on non-radar channels while in il4965_bg_txpower_work()
6202 il->ops->send_tx_power(il); in il4965_bg_txpower_work()
6206 il->last_temperature = il->temperature; in il4965_bg_txpower_work()
6208 mutex_unlock(&il->mutex); in il4965_bg_txpower_work()
6214 il->workqueue = create_singlethread_workqueue(DRV_NAME); in il4965_setup_deferred_work()
6215 if (!il->workqueue) in il4965_setup_deferred_work()
6216 return -ENOMEM; in il4965_setup_deferred_work()
6218 init_waitqueue_head(&il->wait_command_queue); in il4965_setup_deferred_work()
6220 INIT_WORK(&il->restart, il4965_bg_restart); in il4965_setup_deferred_work()
6221 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish); in il4965_setup_deferred_work()
6222 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work); in il4965_setup_deferred_work()
6223 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start); in il4965_setup_deferred_work()
6224 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start); in il4965_setup_deferred_work()
6228 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work); in il4965_setup_deferred_work()
6230 timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0); in il4965_setup_deferred_work()
6232 timer_setup(&il->watchdog, il_bg_watchdog, 0); in il4965_setup_deferred_work()
6234 tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet); in il4965_setup_deferred_work()
6242 cancel_work_sync(&il->txpower_work); in il4965_cancel_deferred_work()
6243 cancel_delayed_work_sync(&il->init_alive_start); in il4965_cancel_deferred_work()
6244 cancel_delayed_work(&il->alive_start); in il4965_cancel_deferred_work()
6245 cancel_work_sync(&il->run_time_calib_work); in il4965_cancel_deferred_work()
6249 timer_delete_sync(&il->stats_periodic); in il4965_cancel_deferred_work()
6274 * Acquire il->lock before calling this function !
6287 int txq_id = txq->q.id; in il4965_tx_queue_set_status()
6289 /* Find out whether to activate Tx queue */ in il4965_tx_queue_set_status()
6290 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0; in il4965_tx_queue_set_status()
6300 txq->sched_retry = scd_retry; in il4965_tx_queue_set_status()
6302 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate", in il4965_tx_queue_set_status()
6311 .tx = il4965_mac_tx,
6339 spin_lock_init(&il->sta_lock); in il4965_init_drv()
6340 spin_lock_init(&il->hcmd_lock); in il4965_init_drv()
6342 INIT_LIST_HEAD(&il->free_frames); in il4965_init_drv()
6344 mutex_init(&il->mutex); in il4965_init_drv()
6346 il->ieee_channels = NULL; in il4965_init_drv()
6347 il->ieee_rates = NULL; in il4965_init_drv()
6348 il->band = NL80211_BAND_2GHZ; in il4965_init_drv()
6350 il->iw_mode = NL80211_IFTYPE_STATION; in il4965_init_drv()
6351 il->current_ht_config.smps = IEEE80211_SMPS_STATIC; in il4965_init_drv()
6352 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; in il4965_init_drv()
6355 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; in il4965_init_drv()
6358 if (il->ops->set_rxon_chain) in il4965_init_drv()
6359 il->ops->set_rxon_chain(il); in il4965_init_drv()
6374 il4965_init_hw_rates(il, il->ieee_rates); in il4965_init_drv()
6389 kfree(il->scan_cmd); in il4965_uninit_drv()
6395 il->hw_rev = _il_rd(il, CSR_HW_REV); in il4965_hw_detect()
6396 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG); in il4965_hw_detect()
6397 il->rev_id = il->pci_dev->revision; in il4965_hw_detect()
6398 D_INFO("HW Revision ID = 0x%X\n", il->rev_id); in il4965_hw_detect()
6431 il->hw_params.bcast_id = IL4965_BROADCAST_ID; in il4965_set_hw_params()
6432 il->hw_params.max_rxq_size = RX_QUEUE_SIZE; in il4965_set_hw_params()
6433 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; in il4965_set_hw_params()
6434 if (il->cfg->mod_params->amsdu_size_8K) in il4965_set_hw_params()
6435 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K); in il4965_set_hw_params()
6437 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K); in il4965_set_hw_params()
6439 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL; in il4965_set_hw_params()
6441 if (il->cfg->mod_params->disable_11n) in il4965_set_hw_params()
6442 il->cfg->sku &= ~IL_SKU_N; in il4965_set_hw_params()
6444 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES && in il4965_set_hw_params()
6445 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES) in il4965_set_hw_params()
6446 il->cfg->num_of_queues = in il4965_set_hw_params()
6447 il->cfg->mod_params->num_of_queues; in il4965_set_hw_params()
6449 il->hw_params.max_txq_num = il->cfg->num_of_queues; in il4965_set_hw_params()
6450 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; in il4965_set_hw_params()
6451 il->hw_params.scd_bc_tbls_size = in il4965_set_hw_params()
6452 il->cfg->num_of_queues * in il4965_set_hw_params()
6455 il->hw_params.tfd_size = sizeof(struct il_tfd); in il4965_set_hw_params()
6456 il->hw_params.max_stations = IL4965_STATION_COUNT; in il4965_set_hw_params()
6457 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; in il4965_set_hw_params()
6458 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; in il4965_set_hw_params()
6459 il->hw_params.max_bsm_size = BSM_SRAM_SIZE; in il4965_set_hw_params()
6460 il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ); in il4965_set_hw_params()
6462 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; in il4965_set_hw_params()
6464 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant); in il4965_set_hw_params()
6465 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant); in il4965_set_hw_params()
6466 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant; in il4965_set_hw_params()
6467 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant; in il4965_set_hw_params()
6469 il->hw_params.ct_kill_threshold = in il4965_set_hw_params()
6472 il->hw_params.sens = &il4965_sensitivity; in il4965_set_hw_params()
6473 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS; in il4965_set_hw_params()
6482 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); in il4965_pci_probe()
6492 err = -ENOMEM; in il4965_pci_probe()
6495 il = hw->priv; in il4965_pci_probe()
6496 il->hw = hw; in il4965_pci_probe()
6497 SET_IEEE80211_DEV(hw, &pdev->dev); in il4965_pci_probe()
6500 il->cfg = cfg; in il4965_pci_probe()
6501 il->ops = &il4965_ops; in il4965_pci_probe()
6503 il->debugfs_ops = &il4965_debugfs_ops; in il4965_pci_probe()
6505 il->pci_dev = pdev; in il4965_pci_probe()
6506 il->inta_mask = CSR_INI_SET_MASK; in il4965_pci_probe()
6516 err = -ENODEV; in il4965_pci_probe()
6522 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); in il4965_pci_probe()
6524 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in il4965_pci_probe()
6541 il->hw_base = pci_ioremap_bar(pdev, 0); in il4965_pci_probe()
6542 if (!il->hw_base) { in il4965_pci_probe()
6543 err = -ENODEV; in il4965_pci_probe()
6549 D_INFO("pci_resource_base = %p\n", il->hw_base); in il4965_pci_probe()
6554 spin_lock_init(&il->reg_lock); in il4965_pci_probe()
6555 spin_lock_init(&il->lock); in il4965_pci_probe()
6558 * stop and reset the on-board processor just in case it is in a in il4965_pci_probe()
6565 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev); in il4965_pci_probe()
6568 * PCI Tx retries from interfering with C3 CPU state */ in il4965_pci_probe()
6572 if (!il->hw_ready) { in il4965_pci_probe()
6574 err = -EIO; in il4965_pci_probe()
6592 il4965_eeprom_get_mac(il, il->addresses[0].addr); in il4965_pci_probe()
6593 D_INFO("MAC address: %pM\n", il->addresses[0].addr); in il4965_pci_probe()
6594 il->hw->wiphy->addresses = il->addresses; in il4965_pci_probe()
6595 il->hw->wiphy->n_addresses = 1; in il4965_pci_probe()
6614 spin_lock_irqsave(&il->lock, flags); in il4965_pci_probe()
6616 spin_unlock_irqrestore(&il->lock, flags); in il4965_pci_probe()
6618 pci_enable_msi(il->pci_dev); in il4965_pci_probe()
6620 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); in il4965_pci_probe()
6622 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); in il4965_pci_probe()
6637 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd); in il4965_pci_probe()
6640 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd); in il4965_pci_probe()
6647 clear_bit(S_RFKILL, &il->status); in il4965_pci_probe()
6649 set_bit(S_RFKILL, &il->status); in il4965_pci_probe()
6651 wiphy_rfkill_set_hw_state(il->hw->wiphy, in il4965_pci_probe()
6652 test_bit(S_RFKILL, &il->status)); in il4965_pci_probe()
6656 init_completion(&il->_4965.firmware_loading_complete); in il4965_pci_probe()
6665 destroy_workqueue(il->workqueue); in il4965_pci_probe()
6666 il->workqueue = NULL; in il4965_pci_probe()
6668 free_irq(il->pci_dev->irq, il); in il4965_pci_probe()
6670 pci_disable_msi(il->pci_dev); in il4965_pci_probe()
6675 iounmap(il->hw_base); in il4965_pci_probe()
6681 ieee80211_free_hw(il->hw); in il4965_pci_probe()
6695 wait_for_completion(&il->_4965.firmware_loading_complete); in il4965_pci_remove()
6700 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group); in il4965_pci_remove()
6706 set_bit(S_EXIT_PENDING, &il->status); in il4965_pci_remove()
6710 if (il->mac80211_registered) { in il4965_pci_remove()
6711 ieee80211_unregister_hw(il->hw); in il4965_pci_remove()
6712 il->mac80211_registered = 0; in il4965_pci_remove()
6729 spin_lock_irqsave(&il->lock, flags); in il4965_pci_remove()
6731 spin_unlock_irqrestore(&il->lock, flags); in il4965_pci_remove()
6737 if (il->rxq.bd) in il4965_pci_remove()
6738 il4965_rx_queue_free(il, &il->rxq); in il4965_pci_remove()
6746 * il->workqueue... so we can't take down the workqueue in il4965_pci_remove()
6748 destroy_workqueue(il->workqueue); in il4965_pci_remove()
6749 il->workqueue = NULL; in il4965_pci_remove()
6751 free_irq(il->pci_dev->irq, il); in il4965_pci_remove()
6752 pci_disable_msi(il->pci_dev); in il4965_pci_remove()
6753 iounmap(il->hw_base); in il4965_pci_remove()
6759 dev_kfree_skb(il->beacon_skb); in il4965_pci_remove()
6761 ieee80211_free_hw(il->hw); in il4965_pci_remove()
6765 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6766 * must be called under il->lock and mac access