Lines Matching +full:trade +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
24 if (rate->idx < 0) in wfx_get_hw_rate()
25 return -1; in wfx_get_hw_rate()
26 if (rate->flags & IEEE80211_TX_RC_MCS) { in wfx_get_hw_rate()
27 if (rate->idx > 7) { in wfx_get_hw_rate()
28 WARN(1, "wrong rate->idx value: %d", rate->idx); in wfx_get_hw_rate()
29 return -1; in wfx_get_hw_rate()
31 return rate->idx + 14; in wfx_get_hw_rate()
36 band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; in wfx_get_hw_rate()
37 if (rate->idx >= band->n_bitrates) { in wfx_get_hw_rate()
38 WARN(1, "wrong rate->idx value: %d", rate->idx); in wfx_get_hw_rate()
39 return -1; in wfx_get_hw_rate()
41 return band->bitrates[rate->idx].hw_value; in wfx_get_hw_rate()
49 struct wfx_dev *wdev = wvif->wdev; in wfx_tx_policy_build()
60 /* Pack two values in each byte of policy->rates */ in wfx_tx_policy_build()
64 policy->rates[rateid / 2] |= count; in wfx_tx_policy_build()
70 return !memcmp(a->rates, b->rates, sizeof(a->rates)); in wfx_tx_policy_is_equal()
77 list_for_each_entry(it, &cache->used, link) in wfx_tx_policy_find()
79 return it - cache->cache; in wfx_tx_policy_find()
80 list_for_each_entry(it, &cache->free, link) in wfx_tx_policy_find()
82 return it - cache->cache; in wfx_tx_policy_find()
83 return -1; in wfx_tx_policy_find()
88 ++entry->usage_count; in wfx_tx_policy_use()
89 list_move(&entry->link, &cache->used); in wfx_tx_policy_use()
94 int ret = --entry->usage_count; in wfx_tx_policy_release()
97 list_move(&entry->link, &cache->free); in wfx_tx_policy_release()
104 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; in wfx_tx_policy_get()
110 spin_lock_bh(&cache->lock); in wfx_tx_policy_get()
111 if (list_empty(&cache->free)) { in wfx_tx_policy_get()
113 spin_unlock_bh(&cache->lock); in wfx_tx_policy_get()
122 entry = list_entry(cache->free.prev, struct wfx_tx_policy, link); in wfx_tx_policy_get()
123 memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); in wfx_tx_policy_get()
124 entry->uploaded = false; in wfx_tx_policy_get()
125 entry->usage_count = 0; in wfx_tx_policy_get()
126 idx = entry - cache->cache; in wfx_tx_policy_get()
128 wfx_tx_policy_use(cache, &cache->cache[idx]); in wfx_tx_policy_get()
129 if (list_empty(&cache->free)) in wfx_tx_policy_get()
130 ieee80211_stop_queues(wvif->wdev->hw); in wfx_tx_policy_get()
131 spin_unlock_bh(&cache->lock); in wfx_tx_policy_get()
138 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; in wfx_tx_policy_put()
142 spin_lock_bh(&cache->lock); in wfx_tx_policy_put()
143 locked = list_empty(&cache->free); in wfx_tx_policy_put()
144 usage = wfx_tx_policy_release(cache, &cache->cache[idx]); in wfx_tx_policy_put()
146 ieee80211_wake_queues(wvif->wdev->hw); in wfx_tx_policy_put()
147 spin_unlock_bh(&cache->lock); in wfx_tx_policy_put()
152 struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache; in wfx_tx_policy_upload()
157 spin_lock_bh(&wvif->tx_policy_cache.lock); in wfx_tx_policy_upload()
158 for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) { in wfx_tx_policy_upload()
163 if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) { in wfx_tx_policy_upload()
166 spin_unlock_bh(&wvif->tx_policy_cache.lock); in wfx_tx_policy_upload()
169 spin_unlock_bh(&wvif->tx_policy_cache.lock); in wfx_tx_policy_upload()
171 } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)); in wfx_tx_policy_upload()
180 wfx_tx_unlock(wvif->wdev); in wfx_tx_policy_upload_work()
185 struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; in wfx_tx_policy_init()
190 spin_lock_init(&cache->lock); in wfx_tx_policy_init()
191 INIT_LIST_HEAD(&cache->used); in wfx_tx_policy_init()
192 INIT_LIST_HEAD(&cache->free); in wfx_tx_policy_init()
194 for (i = 0; i < ARRAY_SIZE(cache->cache); ++i) in wfx_tx_policy_init()
195 list_add(&cache->cache[i].link, &cache->free); in wfx_tx_policy_init()
204 if (!ieee80211_is_action(mgmt->frame_control)) in wfx_is_action_back()
206 if (mgmt->u.action.category != WLAN_CATEGORY_BACK) in wfx_is_action_back()
218 return (struct wfx_tx_priv *)tx_info->rate_driver_data; in wfx_skb_tx_priv()
223 struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; in wfx_skb_txreq()
224 struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; in wfx_skb_txreq()
232 struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; in wfx_skb_wvif()
234 if (tx_priv->vif_id != hif->interface && hif->interface != 2) { in wfx_skb_wvif()
235 dev_err(wdev->dev, "corrupted skb"); in wfx_skb_wvif()
236 return wdev_to_wvif(wdev, hif->interface); in wfx_skb_wvif()
238 return wdev_to_wvif(wdev, tx_priv->vif_id); in wfx_skb_wvif()
244 struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; in wfx_tx_get_link_id()
248 if (sta_priv && sta_priv->link_id) in wfx_tx_get_link_id()
249 return sta_priv->link_id; in wfx_tx_get_link_id()
250 if (vif->type != NL80211_IFTYPE_AP) in wfx_tx_get_link_id()
263 if (rates[j].idx == -1) in wfx_tx_fixup_rates()
266 * We have to trade off here. Most important is to respect the primary rate in wfx_tx_fixup_rates()
270 if (rates[j].idx >= rates[i - 1].idx) { in wfx_tx_fixup_rates()
271 rates[i - 1].count += rates[j].count; in wfx_tx_fixup_rates()
272 rates[i - 1].count = min_t(u16, 15, rates[i - 1].count); in wfx_tx_fixup_rates()
285 rates[i].count = 8; /* == hw->max_rate_tries */ in wfx_tx_fixup_rates()
291 rates[i].idx = -1; in wfx_tx_fixup_rates()
300 ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); in wfx_tx_get_retry_policy_id()
302 dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); in wfx_tx_get_retry_policy_id()
305 wfx_tx_lock(wvif->wdev); in wfx_tx_get_retry_policy_id()
306 if (!schedule_work(&wvif->tx_policy_upload_work)) in wfx_tx_get_retry_policy_id()
307 wfx_tx_unlock(wvif->wdev); in wfx_tx_get_retry_policy_id()
314 if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS)) in wfx_tx_get_frame_format()
316 else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) in wfx_tx_get_frame_format()
328 if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) in wfx_tx_get_icv_len()
330 mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0; in wfx_tx_get_icv_len()
331 return hw_key->icv_len + mic_space; in wfx_tx_get_icv_len()
340 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; in wfx_tx_inner()
341 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in wfx_tx_inner()
343 size_t offset = (size_t)skb->data & 3; in wfx_tx_inner()
347 wfx_tx_fixup_rates(tx_info->driver_rates); in wfx_tx_inner()
349 /* From now tx_info->control is unusable */ in wfx_tx_inner()
350 memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv)); in wfx_tx_inner()
352 tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; in wfx_tx_inner()
353 tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); in wfx_tx_inner()
354 tx_priv->vif_id = wvif->id; in wfx_tx_inner()
359 skb_put(skb, tx_priv->icv_size); in wfx_tx_inner()
361 memset(skb->data, 0, wmsg_len); in wfx_tx_inner()
362 hif_msg = (struct wfx_hif_msg *)skb->data; in wfx_tx_inner()
363 hif_msg->len = cpu_to_le16(skb->len); in wfx_tx_inner()
364 hif_msg->id = HIF_REQ_ID_TX; in wfx_tx_inner()
365 if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) in wfx_tx_inner()
366 hif_msg->interface = 2; in wfx_tx_inner()
368 hif_msg->interface = wvif->id; in wfx_tx_inner()
369 if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { in wfx_tx_inner()
370 dev_warn(wvif->wdev->dev, in wfx_tx_inner()
372 skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)); in wfx_tx_inner()
374 return -EIO; in wfx_tx_inner()
378 req = (struct wfx_hif_req_tx *)hif_msg->body; in wfx_tx_inner()
382 req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF; in wfx_tx_inner()
383 req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16; in wfx_tx_inner()
384 req->packet_id |= queue_id << 28; in wfx_tx_inner()
386 req->fc_offset = offset; in wfx_tx_inner()
388 req->queue_id = 3 - queue_id; in wfx_tx_inner()
389 if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { in wfx_tx_inner()
390 req->peer_sta_id = HIF_LINK_ID_NOT_ASSOCIATED; in wfx_tx_inner()
391 req->retry_policy_index = HIF_TX_RETRY_POLICY_INVALID; in wfx_tx_inner()
392 req->frame_format = HIF_FRAME_FORMAT_NON_HT; in wfx_tx_inner()
394 req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); in wfx_tx_inner()
395 req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); in wfx_tx_inner()
396 req->frame_format = wfx_tx_get_frame_format(tx_info); in wfx_tx_inner()
398 if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) in wfx_tx_inner()
399 req->short_gi = 1; in wfx_tx_inner()
400 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) in wfx_tx_inner()
401 req->after_dtim = 1; in wfx_tx_inner()
405 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) in wfx_tx_inner()
406 schedule_work(&wvif->update_tim_work); in wfx_tx_inner()
407 wfx_bh_request_tx(wvif->wdev); in wfx_tx_inner()
413 struct wfx_dev *wdev = hw->priv; in wfx_tx()
415 struct ieee80211_sta *sta = control ? control->sta : NULL; in wfx_tx()
417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in wfx_tx()
422 WARN(skb->next || skb->prev, "skb is already member of a list"); in wfx_tx()
424 if (tx_info->control.vif) in wfx_tx()
425 wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv; in wfx_tx()
434 dev_info(wdev->dev, "drop BA action\n"); in wfx_tx()
443 ieee80211_tx_status_irqsafe(wdev->hw, skb); in wfx_tx()
448 struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; in wfx_skb_dtor()
449 struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; in wfx_skb_dtor()
451 req->fc_offset; in wfx_skb_dtor()
457 wfx_tx_policy_put(wvif, req->retry_policy_index); in wfx_skb_dtor()
459 ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb); in wfx_skb_dtor()
469 tx_count = arg->ack_failures; in wfx_tx_fill_rates()
470 if (!arg->status || arg->ack_failures) in wfx_tx_fill_rates()
473 rate = &tx_info->status.rates[i]; in wfx_tx_fill_rates()
474 if (rate->idx < 0) in wfx_tx_fill_rates()
476 if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES && in wfx_tx_fill_rates()
477 arg->ack_failures) in wfx_tx_fill_rates()
478 dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n", in wfx_tx_fill_rates()
479 rate->count, tx_count); in wfx_tx_fill_rates()
480 if (tx_count <= rate->count && tx_count && in wfx_tx_fill_rates()
481 arg->txed_rate != wfx_get_hw_rate(wdev, rate)) in wfx_tx_fill_rates()
482 dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n", in wfx_tx_fill_rates()
483 arg->txed_rate, wfx_get_hw_rate(wdev, rate)); in wfx_tx_fill_rates()
484 if (tx_count > rate->count) { in wfx_tx_fill_rates()
485 tx_count -= rate->count; in wfx_tx_fill_rates()
487 rate->count = 0; in wfx_tx_fill_rates()
488 rate->idx = -1; in wfx_tx_fill_rates()
490 rate->count = tx_count; in wfx_tx_fill_rates()
495 dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count); in wfx_tx_fill_rates()
505 skb = wfx_pending_get(wdev, arg->packet_id); in wfx_tx_confirm_cb()
507 dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n", in wfx_tx_confirm_cb()
508 arg->packet_id); in wfx_tx_confirm_cb()
521 skb_trim(skb, skb->len - tx_priv->icv_size); in wfx_tx_confirm_cb()
523 /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */ in wfx_tx_confirm_cb()
525 memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data)); in wfx_tx_confirm_cb()
526 memset(tx_info->pad, 0, sizeof(tx_info->pad)); in wfx_tx_confirm_cb()
528 if (!arg->status) { in wfx_tx_confirm_cb()
529 tx_info->status.tx_time = le32_to_cpu(arg->media_delay) - in wfx_tx_confirm_cb()
530 le32_to_cpu(arg->tx_queue_delay); in wfx_tx_confirm_cb()
531 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) in wfx_tx_confirm_cb()
532 tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; in wfx_tx_confirm_cb()
534 tx_info->flags |= IEEE80211_TX_STAT_ACK; in wfx_tx_confirm_cb()
535 } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) { in wfx_tx_confirm_cb()
536 WARN(!arg->requeue, "incoherent status and result_flags"); in wfx_tx_confirm_cb()
537 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { in wfx_tx_confirm_cb()
538 wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */ in wfx_tx_confirm_cb()
539 schedule_work(&wvif->update_tim_work); in wfx_tx_confirm_cb()
541 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; in wfx_tx_confirm_cb()
554 queue = &wvif->tx_queue[i]; in wfx_flush_vif()
558 if (wvif->wdev->chip_frozen) in wfx_flush_vif()
563 queue = &wvif->tx_queue[i]; in wfx_flush_vif()
564 if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue), in wfx_flush_vif()
566 dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?"); in wfx_flush_vif()
572 struct wfx_dev *wdev = hw->priv; in wfx_flush()
579 wvif = (struct wfx_vif *)vif->drv_priv; in wfx_flush()
587 if (wdev->chip_frozen) in wfx_flush()