Lines Matching +full:hw +full:- +full:gro

1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
25 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
29 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
39 /* wil_ring_wmark_low - low watermark for available descriptor space */
42 return ring->size / 8; in wil_ring_wmark_low()
45 /* wil_ring_wmark_high - high watermark for available descriptor space */
48 return ring->size / 4; in wil_ring_wmark_high()
71 struct wil_ring *vring = &wil->ring_tx[i]; in wil_is_tx_idle()
72 int vring_index = vring - wil->ring_tx; in wil_is_tx_idle()
74 &wil->ring_tx_data[vring_index]; in wil_is_tx_idle()
76 spin_lock(&txdata->lock); in wil_is_tx_idle()
78 if (!vring->va || !txdata->enabled) { in wil_is_tx_idle()
79 spin_unlock(&txdata->lock); in wil_is_tx_idle()
85 if (test_bit(wil_status_napi_en, wil->status)) { in wil_is_tx_idle()
90 spin_unlock(&txdata->lock); in wil_is_tx_idle()
94 "tx vring is not empty -> NAPI\n"); in wil_is_tx_idle()
95 spin_unlock(&txdata->lock); in wil_is_tx_idle()
96 napi_synchronize(&wil->napi_tx); in wil_is_tx_idle()
98 spin_lock(&txdata->lock); in wil_is_tx_idle()
99 if (!vring->va || !txdata->enabled) in wil_is_tx_idle()
104 spin_unlock(&txdata->lock); in wil_is_tx_idle()
113 size_t sz = vring->size * sizeof(vring->va[0]); in wil_vring_alloc()
118 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); in wil_vring_alloc()
120 vring->swhead = 0; in wil_vring_alloc()
121 vring->swtail = 0; in wil_vring_alloc()
122 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); in wil_vring_alloc()
123 if (!vring->ctx) { in wil_vring_alloc()
124 vring->va = NULL; in wil_vring_alloc()
125 return -ENOMEM; in wil_vring_alloc()
128 /* vring->va should be aligned on its size rounded up to power of 2 in wil_vring_alloc()
131 * HW has limitation that all vrings addresses must share the same in wil_vring_alloc()
140 if (wil->dma_addr_size > 32) in wil_vring_alloc()
143 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); in wil_vring_alloc()
144 if (!vring->va) { in wil_vring_alloc()
145 kfree(vring->ctx); in wil_vring_alloc()
146 vring->ctx = NULL; in wil_vring_alloc()
147 return -ENOMEM; in wil_vring_alloc()
150 if (wil->dma_addr_size > 32) in wil_vring_alloc()
152 DMA_BIT_MASK(wil->dma_addr_size)); in wil_vring_alloc()
158 for (i = 0; i < vring->size; i++) { in wil_vring_alloc()
160 &vring->va[i].tx.legacy; in wil_vring_alloc()
162 _d->dma.status = TX_DMA_STATUS_DU; in wil_vring_alloc()
165 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, in wil_vring_alloc()
166 vring->va, &vring->pa, vring->ctx); in wil_vring_alloc()
174 struct vring_tx_desc *d = &desc->legacy; in wil_txdesc_unmap()
175 dma_addr_t pa = wil_desc_addr(&d->dma.addr); in wil_txdesc_unmap()
176 u16 dmalen = le16_to_cpu(d->dma.length); in wil_txdesc_unmap()
178 switch (ctx->mapped_as) { in wil_txdesc_unmap()
193 size_t sz = vring->size * sizeof(vring->va[0]); in wil_vring_free()
195 lockdep_assert_held(&wil->mutex); in wil_vring_free()
196 if (!vring->is_rx) { in wil_vring_free()
197 int vring_index = vring - wil->ring_tx; in wil_vring_free()
200 vring_index, vring->size, vring->va, in wil_vring_free()
201 &vring->pa, vring->ctx); in wil_vring_free()
204 vring->size, vring->va, in wil_vring_free()
205 &vring->pa, vring->ctx); in wil_vring_free()
213 if (!vring->is_rx) { in wil_vring_free()
216 &vring->va[vring->swtail].tx.legacy; in wil_vring_free()
218 ctx = &vring->ctx[vring->swtail]; in wil_vring_free()
222 vring->swtail); in wil_vring_free()
223 vring->swtail = wil_ring_next_tail(vring); in wil_vring_free()
228 if (ctx->skb) in wil_vring_free()
229 dev_kfree_skb_any(ctx->skb); in wil_vring_free()
230 vring->swtail = wil_ring_next_tail(vring); in wil_vring_free()
234 &vring->va[vring->swhead].rx.legacy; in wil_vring_free()
236 ctx = &vring->ctx[vring->swhead]; in wil_vring_free()
238 pa = wil_desc_addr(&d->dma.addr); in wil_vring_free()
239 dmalen = le16_to_cpu(d->dma.length); in wil_vring_free()
241 kfree_skb(ctx->skb); in wil_vring_free()
245 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); in wil_vring_free()
246 kfree(vring->ctx); in wil_vring_free()
247 vring->pa = 0; in wil_vring_free()
248 vring->va = NULL; in wil_vring_free()
249 vring->ctx = NULL; in wil_vring_free()
260 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); in wil_vring_alloc_skb()
262 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy; in wil_vring_alloc_skb()
267 return -ENOMEM; in wil_vring_alloc_skb()
274 * which failed the HW checksum calculation in wil_vring_alloc_skb()
276 skb->ip_summed = CHECKSUM_NONE; in wil_vring_alloc_skb()
278 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); in wil_vring_alloc_skb()
281 return -ENOMEM; in wil_vring_alloc_skb()
284 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; in wil_vring_alloc_skb()
285 wil_desc_addr_set(&d->dma.addr, pa); in wil_vring_alloc_skb()
289 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ in wil_vring_alloc_skb()
290 d->dma.length = cpu_to_le16(sz); in wil_vring_alloc_skb()
292 vring->ctx[i].skb = skb; in wil_vring_alloc_skb()
301 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
302 * - Rx descriptor: 32 bytes
303 * - Phy info
324 struct ieee80211_channel *ch = wil->monitor_chandef.chan; in wil_rx_add_radiotap_header()
335 rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION; in wil_rx_add_radiotap_header()
336 rtap->rthdr.it_len = cpu_to_le16(rtap_len); in wil_rx_add_radiotap_header()
337 rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | in wil_rx_add_radiotap_header()
340 if (d->dma.status & RX_DMA_STATUS_ERROR) in wil_rx_add_radiotap_header()
341 rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS; in wil_rx_add_radiotap_header()
343 rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); in wil_rx_add_radiotap_header()
344 rtap->chnl_flags = cpu_to_le16(0); in wil_rx_add_radiotap_header()
346 rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; in wil_rx_add_radiotap_header()
347 rtap->mcs_flags = 0; in wil_rx_add_radiotap_header()
348 rtap->mcs_index = wil_rxdesc_mcs(d); in wil_rx_add_radiotap_header()
354 struct wil_ring *ring = &wil->ring_rx; in wil_is_rx_idle()
356 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy; in wil_is_rx_idle()
357 if (_d->dma.status & RX_DMA_STATUS_DU) in wil_is_rx_idle()
367 struct wil6210_vif *vif = wil->vifs[mid]; in wil_rx_get_cid_by_skb()
380 if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR) in wil_rx_get_cid_by_skb()
385 if (unlikely(skb->len < ETH_HLEN + snaplen)) { in wil_rx_get_cid_by_skb()
388 skb->len); in wil_rx_get_cid_by_skb()
389 return -ENOENT; in wil_rx_get_cid_by_skb()
393 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { in wil_rx_get_cid_by_skb()
395 skb->len); in wil_rx_get_cid_by_skb()
396 return -ENOENT; in wil_rx_get_cid_by_skb()
398 hdr = (void *)skb->data; in wil_rx_get_cid_by_skb()
399 ta = hdr->addr2; in wil_rx_get_cid_by_skb()
402 if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID) in wil_rx_get_cid_by_skb()
409 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO && in wil_rx_get_cid_by_skb()
410 vif->wdev.iftype != NL80211_IFTYPE_AP) in wil_rx_get_cid_by_skb()
417 for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) { in wil_rx_get_cid_by_skb()
418 if (wil->sta[i].status != wil_sta_unused && in wil_rx_get_cid_by_skb()
419 ether_addr_equal(wil->sta[i].addr, ta)) { in wil_rx_get_cid_by_skb()
424 if (i >= wil->max_assoc_sta) { in wil_rx_get_cid_by_skb()
426 ta, vif->wdev.iftype, ftype, skb->len); in wil_rx_get_cid_by_skb()
427 cid = -ENOENT; in wil_rx_get_cid_by_skb()
435 * Rx descriptor copied to skb->cb
450 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; in wil_vring_reap_rx()
457 BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb)); in wil_vring_reap_rx()
463 i = (int)vring->swhead; in wil_vring_reap_rx()
464 _d = &vring->va[i].rx.legacy; in wil_vring_reap_rx()
465 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { in wil_vring_reap_rx()
470 skb = vring->ctx[i].skb; in wil_vring_reap_rx()
471 vring->ctx[i].skb = NULL; in wil_vring_reap_rx()
479 pa = wil_desc_addr(&d->dma.addr); in wil_vring_reap_rx()
482 dmalen = le16_to_cpu(d->dma.length); in wil_vring_reap_rx()
490 vif = wil->vifs[mid]; in wil_vring_reap_rx()
507 prefetch(skb->data); in wil_vring_reap_rx()
510 skb->data, skb_headlen(skb), false); in wil_vring_reap_rx()
513 if (cid == -ENOENT) { in wil_vring_reap_rx()
518 stats = &wil->sta[cid].stats; in wil_vring_reap_rx()
520 stats->last_mcs_rx = wil_rxdesc_mcs(d); in wil_vring_reap_rx()
521 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) in wil_vring_reap_rx()
522 stats->rx_per_mcs[stats->last_mcs_rx]++; in wil_vring_reap_rx()
525 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) in wil_vring_reap_rx()
529 if (ndev->type != ARPHRD_ETHER) in wil_vring_reap_rx()
531 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) in wil_vring_reap_rx()
542 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", in wil_vring_reap_rx()
544 stats->rx_non_data_frame++; in wil_vring_reap_rx()
555 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", in wil_vring_reap_rx()
560 skb->data, skb_headlen(skb), false); in wil_vring_reap_rx()
566 /* L4 IDENT is on when HW calculated checksum, check status in wil_vring_reap_rx()
570 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { in wil_vring_reap_rx()
572 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) in wil_vring_reap_rx()
573 skb->ip_summed = CHECKSUM_UNNECESSARY; in wil_vring_reap_rx()
574 /* If HW reports bad checksum, let IP stack re-check it in wil_vring_reap_rx()
575 * For example, HW don't understand Microsoft IP stack that in wil_vring_reap_rx()
576 * mis-calculates TCP checksum - if it should be 0x0, in wil_vring_reap_rx()
580 stats->rx_csum_err++; in wil_vring_reap_rx()
585 * +-------+-------+---------+------------+------+ in wil_vring_reap_rx()
587 * +-------+-------+---------+------------+------+ in wil_vring_reap_rx()
590 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); in wil_vring_reap_rx()
607 struct net_device *ndev = wil->main_ndev; in wil_rx_refill()
608 struct wil_ring *v = &wil->ring_rx; in wil_rx_refill()
611 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? in wil_rx_refill()
615 (next_tail != v->swhead) && (count-- > 0); in wil_rx_refill()
616 v->swtail = next_tail) { in wil_rx_refill()
617 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); in wil_rx_refill()
620 rc, v->swtail); in wil_rx_refill()
626 * committing them to HW in wil_rx_refill()
630 wil_w(wil, v->hwtail, v->swtail); in wil_rx_refill()
636 * reverse_memcmp - Compare two areas of memory, in reverse order
649 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; in reverse_memcmp()
650 --su1, --su2, count--) { in reverse_memcmp()
651 res = *su1 - *su2; in reverse_memcmp()
665 struct wil_sta_info *s = &wil->sta[cid]; in wil_rx_crypto_check()
666 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : in wil_rx_crypto_check()
667 &s->tid_crypto_rx[tid]; in wil_rx_crypto_check()
668 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; in wil_rx_crypto_check()
669 const u8 *pn = (u8 *)&d->mac.pn; in wil_rx_crypto_check()
671 if (!cc->key_set) { in wil_rx_crypto_check()
675 return -EINVAL; in wil_rx_crypto_check()
678 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { in wil_rx_crypto_check()
681 cid, tid, mc, key_id, pn, cc->pn); in wil_rx_crypto_check()
682 return -EINVAL; in wil_rx_crypto_check()
684 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); in wil_rx_crypto_check()
694 if ((d->dma.status & RX_DMA_STATUS_ERROR) && in wil_rx_error_check()
695 (d->dma.error & RX_DMA_ERROR_MIC)) { in wil_rx_error_check()
696 stats->rx_mic_error++; in wil_rx_error_check()
698 return -EFAULT; in wil_rx_error_check()
726 int len = skb->len; in wil_is_ptk_eapol_key()
733 len -= skb_mac_offset(skb); in wil_is_ptk_eapol_key()
742 if (hdr->type != WIL_1X_TYPE_EAPOL_KEY) in wil_is_ptk_eapol_key()
746 if (key->type != WIL_EAPOL_KEY_TYPE_WPA && in wil_is_ptk_eapol_key()
747 key->type != WIL_EAPOL_KEY_TYPE_RSN) in wil_is_ptk_eapol_key()
750 key_info = be16_to_cpu(key->key_info); in wil_is_ptk_eapol_key()
766 key_info = be16_to_cpu(key->key_info); in wil_skb_is_eap_3()
769 /* 3/4 of 4-Way Handshake */ in wil_skb_is_eap_3()
773 /* 1/4 of 4-Way Handshake */ in wil_skb_is_eap_3()
788 nonce = (u32 *)key->key_nonce; in wil_skb_is_eap_4()
809 if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) { in wil_enable_tx_key_worker()
811 vif->ptk_rekey_state); in wil_enable_tx_key_worker()
816 cid = wil_find_cid_by_idx(wil, vif->mid, 0); in wil_enable_tx_key_worker()
824 rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL, in wil_enable_tx_key_worker()
827 vif->ptk_rekey_state = WIL_REKEY_IDLE; in wil_enable_tx_key_worker()
840 if (wdev->iftype != NL80211_IFTYPE_STATION || in wil_tx_complete_handle_eapol()
841 !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities)) in wil_tx_complete_handle_eapol()
848 spin_lock_bh(&wil->eap_lock); in wil_tx_complete_handle_eapol()
849 switch (vif->ptk_rekey_state) { in wil_tx_complete_handle_eapol()
854 vif->ptk_rekey_state = WIL_REKEY_IDLE; in wil_tx_complete_handle_eapol()
861 vif->ptk_rekey_state); in wil_tx_complete_handle_eapol()
863 spin_unlock_bh(&wil->eap_lock); in wil_tx_complete_handle_eapol()
866 q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker); in wil_tx_complete_handle_eapol()
867 wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n", in wil_tx_complete_handle_eapol()
877 if (wdev->iftype != NL80211_IFTYPE_STATION || in wil_rx_handle_eapol()
878 !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities)) in wil_rx_handle_eapol()
885 if (vif->ptk_rekey_state == WIL_REKEY_IDLE) in wil_rx_handle_eapol()
886 vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED; in wil_rx_handle_eapol()
894 struct wil_net_stats *stats, bool gro) in wil_netif_rx() argument
899 unsigned int len = skb->len; in wil_netif_rx()
907 if (wdev->iftype == NL80211_IFTYPE_STATION) { in wil_netif_rx()
909 if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { in wil_netif_rx()
912 ndev->stats.rx_dropped++; in wil_netif_rx()
913 stats->rx_dropped++; in wil_netif_rx()
917 } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { in wil_netif_rx()
924 int xmit_cid = wil_find_cid(wil, vif->mid, da); in wil_netif_rx()
942 xmit_skb->dev = ndev; in wil_netif_rx()
943 xmit_skb->priority += 256; in wil_netif_rx()
944 xmit_skb->protocol = htons(ETH_P_802_3); in wil_netif_rx()
947 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); in wil_netif_rx()
952 skb->protocol = eth_type_trans(skb, ndev); in wil_netif_rx()
953 skb->dev = ndev; in wil_netif_rx()
955 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) in wil_netif_rx()
958 if (gro) in wil_netif_rx()
959 napi_gro_receive(&wil->napi_rx, skb); in wil_netif_rx()
963 ndev->stats.rx_packets++; in wil_netif_rx()
964 stats->rx_packets++; in wil_netif_rx()
965 ndev->stats.rx_bytes += len; in wil_netif_rx()
966 stats->rx_bytes += len; in wil_netif_rx()
968 ndev->stats.multicast++; in wil_netif_rx()
977 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); in wil_netif_rx_any()
979 stats = &wil->sta[cid].stats; in wil_netif_rx_any()
983 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { in wil_netif_rx_any()
984 wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len); in wil_netif_rx_any()
986 ndev->stats.rx_dropped++; in wil_netif_rx_any()
987 stats->rx_replay++; in wil_netif_rx_any()
988 stats->rx_dropped++; in wil_netif_rx_any()
992 /* check errors reported by HW and update statistics */ in wil_netif_rx_any()
993 if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) { in wil_netif_rx_any()
1007 struct net_device *ndev = wil->main_ndev; in wil_rx_handle()
1008 struct wireless_dev *wdev = ndev->ieee80211_ptr; in wil_rx_handle()
1009 struct wil_ring *v = &wil->ring_rx; in wil_rx_handle()
1012 if (unlikely(!v->va)) { in wil_rx_handle()
1018 (*quota)--; in wil_rx_handle()
1021 if (wdev->iftype == NL80211_IFTYPE_MONITOR) { in wil_rx_handle()
1022 skb->dev = ndev; in wil_rx_handle()
1024 skb->ip_summed = CHECKSUM_UNNECESSARY; in wil_rx_handle()
1025 skb->pkt_type = PACKET_OTHERHOST; in wil_rx_handle()
1026 skb->protocol = htons(ETH_P_802_2); in wil_rx_handle()
1032 wil_rx_refill(wil, v->size); in wil_rx_handle()
1037 wil->rx_buf_len = rx_large_buf ? in wil_rx_buf_len_init()
1038 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; in wil_rx_buf_len_init()
1039 if (mtu_max > wil->rx_buf_len) { in wil_rx_buf_len_init()
1045 wil->rx_buf_len = mtu_max; in wil_rx_buf_len_init()
1051 struct wil_ring *vring = &wil->ring_rx; in wil_rx_init()
1056 if (vring->va) { in wil_rx_init()
1058 return -EINVAL; in wil_rx_init()
1063 vring->size = 1 << order; in wil_rx_init()
1064 vring->is_rx = true; in wil_rx_init()
1073 rc = wil_rx_refill(wil, vring->size); in wil_rx_init()
1086 struct wil_ring *vring = &wil->ring_rx; in wil_rx_fini()
1090 if (vring->va) in wil_rx_fini()
1097 struct vring_tx_desc *d = &desc->legacy; in wil_tx_desc_map()
1099 wil_desc_addr_set(&d->dma.addr, pa); in wil_tx_desc_map()
1100 d->dma.ip_length = 0; in wil_tx_desc_map()
1101 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ in wil_tx_desc_map()
1102 d->dma.b11 = 0/*14 | BIT(7)*/; in wil_tx_desc_map()
1103 d->dma.error = 0; in wil_tx_desc_map()
1104 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ in wil_tx_desc_map()
1105 d->dma.length = cpu_to_le16((u16)len); in wil_tx_desc_map()
1106 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); in wil_tx_desc_map()
1107 d->mac.d[0] = 0; in wil_tx_desc_map()
1108 d->mac.d[1] = 0; in wil_tx_desc_map()
1109 d->mac.d[2] = 0; in wil_tx_desc_map()
1110 d->mac.ucode_cmd = 0; in wil_tx_desc_map()
1111 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ in wil_tx_desc_map()
1112 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | in wil_tx_desc_map()
1120 spin_lock_bh(&txdata->lock); in wil_tx_data_init()
1121 txdata->dot1x_open = false; in wil_tx_data_init()
1122 txdata->enabled = 0; in wil_tx_data_init()
1123 txdata->idle = 0; in wil_tx_data_init()
1124 txdata->last_idle = 0; in wil_tx_data_init()
1125 txdata->begin = 0; in wil_tx_data_init()
1126 txdata->agg_wsize = 0; in wil_tx_data_init()
1127 txdata->agg_timeout = 0; in wil_tx_data_init()
1128 txdata->agg_amsdu = 0; in wil_tx_data_init()
1129 txdata->addba_in_progress = false; in wil_tx_data_init()
1130 txdata->mid = U8_MAX; in wil_tx_data_init()
1131 spin_unlock_bh(&txdata->lock); in wil_tx_data_init()
1164 struct wil_ring *vring = &wil->ring_tx[id]; in wil_vring_init_tx()
1165 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; in wil_vring_init_tx()
1177 lockdep_assert_held(&wil->mutex); in wil_vring_init_tx()
1179 if (vring->va) { in wil_vring_init_tx()
1181 rc = -EINVAL; in wil_vring_init_tx()
1186 vring->is_rx = false; in wil_vring_init_tx()
1187 vring->size = size; in wil_vring_init_tx()
1192 wil->ring2cid_tid[id][0] = cid; in wil_vring_init_tx()
1193 wil->ring2cid_tid[id][1] = tid; in wil_vring_init_tx()
1195 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); in wil_vring_init_tx()
1197 if (!vif->privacy) in wil_vring_init_tx()
1198 txdata->dot1x_open = true; in wil_vring_init_tx()
1199 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), in wil_vring_init_tx()
1208 rc = -EINVAL; in wil_vring_init_tx()
1212 spin_lock_bh(&txdata->lock); in wil_vring_init_tx()
1213 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); in wil_vring_init_tx()
1214 txdata->mid = vif->mid; in wil_vring_init_tx()
1215 txdata->enabled = 1; in wil_vring_init_tx()
1216 spin_unlock_bh(&txdata->lock); in wil_vring_init_tx()
1218 if (txdata->dot1x_open && (agg_wsize >= 0)) in wil_vring_init_tx()
1223 spin_lock_bh(&txdata->lock); in wil_vring_init_tx()
1224 txdata->dot1x_open = false; in wil_vring_init_tx()
1225 txdata->enabled = 0; in wil_vring_init_tx()
1226 spin_unlock_bh(&txdata->lock); in wil_vring_init_tx()
1228 wil->ring2cid_tid[id][0] = wil->max_assoc_sta; in wil_vring_init_tx()
1229 wil->ring2cid_tid[id][1] = 0; in wil_vring_init_tx()
1267 struct wil_ring *vring = &wil->ring_tx[ring_id]; in wil_tx_vring_modify()
1268 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; in wil_tx_vring_modify()
1272 lockdep_assert_held(&wil->mutex); in wil_tx_vring_modify()
1274 if (!vring->va) { in wil_tx_vring_modify()
1276 return -EINVAL; in wil_tx_vring_modify()
1279 if (wil->ring2cid_tid[ring_id][0] != cid || in wil_tx_vring_modify()
1280 wil->ring2cid_tid[ring_id][1] != tid) { in wil_tx_vring_modify()
1282 wil->ring2cid_tid[ring_id][0], in wil_tx_vring_modify()
1283 wil->ring2cid_tid[ring_id][1]); in wil_tx_vring_modify()
1286 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); in wil_tx_vring_modify()
1288 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), in wil_tx_vring_modify()
1297 rc = -EINVAL; in wil_tx_vring_modify()
1304 txdata->agg_wsize = 0; in wil_tx_vring_modify()
1305 if (txdata->dot1x_open && agg_wsize >= 0) in wil_tx_vring_modify()
1310 spin_lock_bh(&txdata->lock); in wil_tx_vring_modify()
1311 txdata->dot1x_open = false; in wil_tx_vring_modify()
1312 txdata->enabled = 0; in wil_tx_vring_modify()
1313 spin_unlock_bh(&txdata->lock); in wil_tx_vring_modify()
1314 wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta; in wil_tx_vring_modify()
1315 wil->ring2cid_tid[ring_id][1] = 0; in wil_tx_vring_modify()
1341 struct wil_ring *vring = &wil->ring_tx[id]; in wil_vring_init_bcast()
1342 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; in wil_vring_init_bcast()
1346 lockdep_assert_held(&wil->mutex); in wil_vring_init_bcast()
1348 if (vring->va) { in wil_vring_init_bcast()
1350 rc = -EINVAL; in wil_vring_init_bcast()
1355 vring->is_rx = false; in wil_vring_init_bcast()
1356 vring->size = size; in wil_vring_init_bcast()
1361 wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */ in wil_vring_init_bcast()
1362 wil->ring2cid_tid[id][1] = 0; /* TID */ in wil_vring_init_bcast()
1364 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); in wil_vring_init_bcast()
1366 if (!vif->privacy) in wil_vring_init_bcast()
1367 txdata->dot1x_open = true; in wil_vring_init_bcast()
1368 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid, in wil_vring_init_bcast()
1378 rc = -EINVAL; in wil_vring_init_bcast()
1382 spin_lock_bh(&txdata->lock); in wil_vring_init_bcast()
1383 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); in wil_vring_init_bcast()
1384 txdata->mid = vif->mid; in wil_vring_init_bcast()
1385 txdata->enabled = 1; in wil_vring_init_bcast()
1386 spin_unlock_bh(&txdata->lock); in wil_vring_init_bcast()
1390 spin_lock_bh(&txdata->lock); in wil_vring_init_bcast()
1391 txdata->enabled = 0; in wil_vring_init_bcast()
1392 txdata->dot1x_open = false; in wil_vring_init_bcast()
1393 spin_unlock_bh(&txdata->lock); in wil_vring_init_bcast()
1408 cid = wil_find_cid(wil, vif->mid, da); in wil_find_tx_ucast()
1410 if (cid < 0 || cid >= wil->max_assoc_sta) in wil_find_tx_ucast()
1414 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { in wil_find_tx_ucast()
1415 if (!wil->ring_tx_data[i].dot1x_open && in wil_find_tx_ucast()
1416 skb->protocol != cpu_to_be16(ETH_P_PAE)) in wil_find_tx_ucast()
1418 if (wil->ring2cid_tid[i][0] == cid) { in wil_find_tx_ucast()
1419 struct wil_ring *v = &wil->ring_tx[i]; in wil_find_tx_ucast()
1420 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; in wil_find_tx_ucast()
1422 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", in wil_find_tx_ucast()
1424 if (v->va && txdata->enabled) { in wil_find_tx_ucast()
1453 * find 1-st vring eligible for this skb and use it. in wil_find_tx_ring_sta()
1456 ring = &wil->ring_tx[i]; in wil_find_tx_ring_sta()
1457 txdata = &wil->ring_tx_data[i]; in wil_find_tx_ring_sta()
1458 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) in wil_find_tx_ring_sta()
1461 cid = wil->ring2cid_tid[i][0]; in wil_find_tx_ring_sta()
1462 if (cid >= wil->max_assoc_sta) /* skip BCAST */ in wil_find_tx_ring_sta()
1465 if (!wil->ring_tx_data[i].dot1x_open && in wil_find_tx_ring_sta()
1466 skb->protocol != cpu_to_be16(ETH_P_PAE)) in wil_find_tx_ring_sta()
1469 wil_dbg_txrx(wil, "Tx -> ring %d\n", i); in wil_find_tx_ring_sta()
1483 * 2. Old (pseudo-DMS):
1484 * Find 1-st vring and return it;
1488 * - for PBSS
1496 int i = vif->bcast_ring; in wil_find_tx_bcast_1()
1500 v = &wil->ring_tx[i]; in wil_find_tx_bcast_1()
1501 txdata = &wil->ring_tx_data[i]; in wil_find_tx_bcast_1()
1502 if (!v->va || !txdata->enabled) in wil_find_tx_bcast_1()
1504 if (!wil->ring_tx_data[i].dot1x_open && in wil_find_tx_bcast_1()
1505 skb->protocol != cpu_to_be16(ETH_P_PAE)) in wil_find_tx_bcast_1()
1517 const struct ethhdr *eth = (void *)skb->data; in wil_check_multicast_to_unicast()
1518 const struct vlan_ethhdr *ethvlan = (void *)skb->data; in wil_check_multicast_to_unicast()
1521 if (!wil->multicast_to_unicast) in wil_check_multicast_to_unicast()
1525 ethertype = eth->h_proto; in wil_check_multicast_to_unicast()
1526 if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN) in wil_check_multicast_to_unicast()
1527 ethertype = ethvlan->h_vlan_encapsulated_proto; in wil_check_multicast_to_unicast()
1544 int cid = wil->ring2cid_tid[vring_index][0]; in wil_set_da_for_vring()
1546 ether_addr_copy(da, wil->sta[cid].addr); in wil_set_da_for_vring()
1561 /* find 1-st vring eligible for data */ in wil_find_tx_bcast_2()
1563 v = &wil->ring_tx[i]; in wil_find_tx_bcast_2()
1564 txdata = &wil->ring_tx_data[i]; in wil_find_tx_bcast_2()
1565 if (!v->va || !txdata->enabled || txdata->mid != vif->mid) in wil_find_tx_bcast_2()
1568 cid = wil->ring2cid_tid[i][0]; in wil_find_tx_bcast_2()
1569 if (cid >= wil->max_assoc_sta) /* skip BCAST */ in wil_find_tx_bcast_2()
1571 if (!wil->ring_tx_data[i].dot1x_open && in wil_find_tx_bcast_2()
1572 skb->protocol != cpu_to_be16(ETH_P_PAE)) in wil_find_tx_bcast_2()
1575 /* don't Tx back to source when re-routing Rx->Tx at the AP */ in wil_find_tx_bcast_2()
1576 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) in wil_find_tx_bcast_2()
1587 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); in wil_find_tx_bcast_2()
1592 v2 = &wil->ring_tx[i]; in wil_find_tx_bcast_2()
1593 txdata2 = &wil->ring_tx_data[i]; in wil_find_tx_bcast_2()
1594 if (!v2->va || txdata2->mid != vif->mid) in wil_find_tx_bcast_2()
1596 cid = wil->ring2cid_tid[i][0]; in wil_find_tx_bcast_2()
1597 if (cid >= wil->max_assoc_sta) /* skip BCAST */ in wil_find_tx_bcast_2()
1599 if (!wil->ring_tx_data[i].dot1x_open && in wil_find_tx_bcast_2()
1600 skb->protocol != cpu_to_be16(ETH_P_PAE)) in wil_find_tx_bcast_2()
1603 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) in wil_find_tx_bcast_2()
1608 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); in wil_find_tx_bcast_2()
1624 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); in wil_tx_desc_set_nr_frags()
1629 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1630 * 2 - middle, 3 - last descriptor.
1638 d->dma.b11 = ETH_HLEN; /* MAC header length */ in wil_tx_desc_offload_setup_tso()
1639 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; in wil_tx_desc_offload_setup_tso()
1641 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); in wil_tx_desc_offload_setup_tso()
1643 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); in wil_tx_desc_offload_setup_tso()
1646 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | in wil_tx_desc_offload_setup_tso()
1648 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); in wil_tx_desc_offload_setup_tso()
1650 d->dma.ip_length = skb_net_hdr_len; in wil_tx_desc_offload_setup_tso()
1652 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); in wil_tx_desc_offload_setup_tso()
1653 /* Calculate pseudo-header */ in wil_tx_desc_offload_setup_tso()
1654 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); in wil_tx_desc_offload_setup_tso()
1659 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1670 if (skb->ip_summed != CHECKSUM_PARTIAL) in wil_tx_desc_offload_setup()
1673 d->dma.b11 = ETH_HLEN; /* MAC header length */ in wil_tx_desc_offload_setup()
1675 switch (skb->protocol) { in wil_tx_desc_offload_setup()
1677 protocol = ip_hdr(skb)->protocol; in wil_tx_desc_offload_setup()
1678 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); in wil_tx_desc_offload_setup()
1681 protocol = ipv6_hdr(skb)->nexthdr; in wil_tx_desc_offload_setup()
1684 return -EINVAL; in wil_tx_desc_offload_setup()
1689 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); in wil_tx_desc_offload_setup()
1691 d->dma.d0 |= in wil_tx_desc_offload_setup()
1696 d->dma.d0 |= in wil_tx_desc_offload_setup()
1700 return -EINVAL; in wil_tx_desc_offload_setup()
1703 d->dma.ip_length = skb_network_header_len(skb); in wil_tx_desc_offload_setup()
1705 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); in wil_tx_desc_offload_setup()
1706 /* Calculate pseudo-header */ in wil_tx_desc_offload_setup()
1707 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); in wil_tx_desc_offload_setup()
1714 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | in wil_tx_last_desc()
1721 d->dma.d0 |= wil_tso_type_lst << in wil_set_tx_desc_last_tso()
1745 u32 swhead = vring->swhead; in __wil_tx_vring_tso()
1747 int nr_frags = skb_shinfo(skb)->nr_frags; in __wil_tx_vring_tso()
1749 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ in __wil_tx_vring_tso()
1751 int vring_index = vring - wil->ring_tx; in __wil_tx_vring_tso()
1752 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; in __wil_tx_vring_tso()
1764 int rc = -EINVAL; in __wil_tx_vring_tso()
1766 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, in __wil_tx_vring_tso()
1769 if (unlikely(!txdata->enabled)) in __wil_tx_vring_tso()
1770 return -EINVAL; in __wil_tx_vring_tso()
1772 /* A typical page 4K is 3-4 payloads, we assume each fragment in __wil_tx_vring_tso()
1781 return -ENOMEM; in __wil_tx_vring_tso()
1787 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); in __wil_tx_vring_tso()
1793 ip_hdr(skb)->tot_len = 0; in __wil_tx_vring_tso()
1794 ip_hdr(skb)->check = 0; in __wil_tx_vring_tso()
1799 ipv6_hdr(skb)->payload_len = 0; in __wil_tx_vring_tso()
1806 return -EINVAL; in __wil_tx_vring_tso()
1809 if (skb->ip_summed != CHECKSUM_PARTIAL) in __wil_tx_vring_tso()
1810 return -EINVAL; in __wil_tx_vring_tso()
1813 * packet's descriptors - read then once here in __wil_tx_vring_tso()
1818 _hdr_desc = &vring->va[i].tx.legacy; in __wil_tx_vring_tso()
1820 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); in __wil_tx_vring_tso()
1826 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa, in __wil_tx_vring_tso()
1832 vring->ctx[i].mapped_as = wil_mapped_as_single; in __wil_tx_vring_tso()
1833 hdr_ctx = &vring->ctx[i]; in __wil_tx_vring_tso()
1836 headlen = skb_headlen(skb) - hdrlen; in __wil_tx_vring_tso()
1838 for (f = headlen ? -1 : 0; f < nr_frags; f++) { in __wil_tx_vring_tso()
1844 frag = &skb_shinfo(skb)->frags[f]; in __wil_tx_vring_tso()
1856 rc = -ENOMEM; in __wil_tx_vring_tso()
1861 i = (swhead + descs_used) % vring->size; in __wil_tx_vring_tso()
1866 skb_frag_size(frag) - len, in __wil_tx_vring_tso()
1868 vring->ctx[i].mapped_as = wil_mapped_as_page; in __wil_tx_vring_tso()
1871 skb->data + in __wil_tx_vring_tso()
1872 skb_headlen(skb) - headlen, in __wil_tx_vring_tso()
1875 vring->ctx[i].mapped_as = wil_mapped_as_single; in __wil_tx_vring_tso()
1876 headlen -= lenmss; in __wil_tx_vring_tso()
1884 _desc = &vring->va[i].tx.legacy; in __wil_tx_vring_tso()
1888 first_ctx = &vring->ctx[i]; in __wil_tx_vring_tso()
1894 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, in __wil_tx_vring_tso()
1905 len -= lenmss; in __wil_tx_vring_tso()
1906 rem_data -= lenmss; in __wil_tx_vring_tso()
1913 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { in __wil_tx_vring_tso()
1918 hdr_ctx->nr_frags = sg_desc_cnt; in __wil_tx_vring_tso()
1927 first_ctx->nr_frags = sg_desc_cnt - 1; in __wil_tx_vring_tso()
1932 * for this mss - make sure not to copy in __wil_tx_vring_tso()
1941 if (f < nr_frags - 1 || len > 0) in __wil_tx_vring_tso()
1973 vring->ctx[i].skb = skb_get(skb); in __wil_tx_vring_tso()
1977 if (wil_val_in_range(wil->ring_idle_trsh, in __wil_tx_vring_tso()
1979 txdata->idle += get_cycles() - txdata->last_idle; in __wil_tx_vring_tso()
1980 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", in __wil_tx_vring_tso()
1993 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); in __wil_tx_vring_tso()
1996 * committing them to HW in __wil_tx_vring_tso()
2000 if (wil->tx_latency) in __wil_tx_vring_tso()
2001 *(ktime_t *)&skb->cb = ktime_get(); in __wil_tx_vring_tso()
2003 memset(skb->cb, 0, sizeof(ktime_t)); in __wil_tx_vring_tso()
2005 wil_w(wil, vring->hwtail, vring->swhead); in __wil_tx_vring_tso()
2012 i = (swhead + descs_used - 1) % vring->size; in __wil_tx_vring_tso()
2013 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy; in __wil_tx_vring_tso()
2014 _desc = &vring->va[i].tx.legacy; in __wil_tx_vring_tso()
2016 _desc->dma.status = TX_DMA_STATUS_DU; in __wil_tx_vring_tso()
2017 ctx = &vring->ctx[i]; in __wil_tx_vring_tso()
2020 descs_used--; in __wil_tx_vring_tso()
2032 u32 swhead = ring->swhead; in __wil_tx_ring()
2034 int nr_frags = skb_shinfo(skb)->nr_frags; in __wil_tx_ring()
2036 int ring_index = ring - wil->ring_tx; in __wil_tx_ring()
2037 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; in __wil_tx_ring()
2041 bool mcast = (ring_index == vif->bcast_ring); in __wil_tx_ring()
2045 skb->len, ring_index, nr_frags); in __wil_tx_ring()
2047 if (unlikely(!txdata->enabled)) in __wil_tx_ring()
2048 return -EINVAL; in __wil_tx_ring()
2054 return -ENOMEM; in __wil_tx_ring()
2056 _d = &ring->va[i].tx.legacy; in __wil_tx_ring()
2058 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in __wil_tx_ring()
2060 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index, in __wil_tx_ring()
2061 skb_headlen(skb), skb->data, &pa); in __wil_tx_ring()
2063 skb->data, skb_headlen(skb), false); in __wil_tx_ring()
2066 return -EINVAL; in __wil_tx_ring()
2067 ring->ctx[i].mapped_as = wil_mapped_as_single; in __wil_tx_ring()
2068 /* 1-st segment */ in __wil_tx_ring()
2069 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len, in __wil_tx_ring()
2072 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ in __wil_tx_ring()
2074 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); in __wil_tx_ring()
2083 ring->ctx[i].nr_frags = nr_frags; in __wil_tx_ring()
2088 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in __wil_tx_ring()
2095 i = (swhead + f + 1) % ring->size; in __wil_tx_ring()
2096 _d = &ring->va[i].tx.legacy; in __wil_tx_ring()
2104 ring->ctx[i].mapped_as = wil_mapped_as_page; in __wil_tx_ring()
2105 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, in __wil_tx_ring()
2107 /* no need to check return code - in __wil_tx_ring()
2108 * if it succeeded for 1-st descriptor, in __wil_tx_ring()
2114 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); in __wil_tx_ring()
2115 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); in __wil_tx_ring()
2116 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); in __wil_tx_ring()
2126 ring->ctx[i].skb = skb_get(skb); in __wil_tx_ring()
2130 if (wil_val_in_range(wil->ring_idle_trsh, in __wil_tx_ring()
2132 txdata->idle += get_cycles() - txdata->last_idle; in __wil_tx_ring()
2133 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", in __wil_tx_ring()
2146 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead, in __wil_tx_ring()
2147 ring->swhead); in __wil_tx_ring()
2148 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags); in __wil_tx_ring()
2151 * committing them to HW in __wil_tx_ring()
2155 if (wil->tx_latency) in __wil_tx_ring()
2156 *(ktime_t *)&skb->cb = ktime_get(); in __wil_tx_ring()
2158 memset(skb->cb, 0, sizeof(ktime_t)); in __wil_tx_ring()
2160 wil_w(wil, ring->hwtail, ring->swhead); in __wil_tx_ring()
2169 i = (swhead + f) % ring->size; in __wil_tx_ring()
2170 ctx = &ring->ctx[i]; in __wil_tx_ring()
2171 _d = &ring->va[i].tx.legacy; in __wil_tx_ring()
2173 _d->dma.status = TX_DMA_STATUS_DU; in __wil_tx_ring()
2174 wil->txrx_ops.tx_desc_unmap(dev, in __wil_tx_ring()
2181 return -EINVAL; in __wil_tx_ring()
2187 int ring_index = ring - wil->ring_tx; in wil_tx_ring()
2188 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; in wil_tx_ring()
2191 spin_lock(&txdata->lock); in wil_tx_ring()
2193 if (test_bit(wil_status_suspending, wil->status) || in wil_tx_ring()
2194 test_bit(wil_status_suspended, wil->status) || in wil_tx_ring()
2195 test_bit(wil_status_resuming, wil->status)) { in wil_tx_ring()
2198 spin_unlock(&txdata->lock); in wil_tx_ring()
2199 return -EINVAL; in wil_tx_ring()
2202 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring) in wil_tx_ring()
2205 spin_unlock(&txdata->lock); in wil_tx_ring()
2239 (int)(ring - wil->ring_tx), vif->mid, check_stop, in __wil_update_net_queues()
2240 vif->net_queue_stopped); in __wil_update_net_queues()
2243 check_stop, vif->mid, vif->net_queue_stopped); in __wil_update_net_queues()
2249 if (check_stop == vif->net_queue_stopped) in __wil_update_net_queues()
2257 vif->net_queue_stopped = true; in __wil_update_net_queues()
2264 if (test_bit(wil_status_suspending, wil->status) || in __wil_update_net_queues()
2265 test_bit(wil_status_suspended, wil->status)) in __wil_update_net_queues()
2270 struct wil_ring *cur_ring = &wil->ring_tx[i]; in __wil_update_net_queues()
2271 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; in __wil_update_net_queues()
2273 if (txdata->mid != vif->mid || !cur_ring->va || in __wil_update_net_queues()
2274 !txdata->enabled || cur_ring == ring) in __wil_update_net_queues()
2279 (int)(cur_ring - wil->ring_tx)); in __wil_update_net_queues()
2288 vif->net_queue_stopped = false; in __wil_update_net_queues()
2295 spin_lock(&wil->net_queue_lock); in wil_update_net_queues()
2297 spin_unlock(&wil->net_queue_lock); in wil_update_net_queues()
2303 spin_lock_bh(&wil->net_queue_lock); in wil_update_net_queues_bh()
2305 spin_unlock_bh(&wil->net_queue_lock); in wil_update_net_queues_bh()
2319 if (unlikely(!test_bit(wil_status_fwready, wil->status))) { in wil_start_xmit()
2326 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) { in wil_start_xmit()
2331 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) { in wil_start_xmit()
2338 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { in wil_start_xmit()
2342 if (vif->pbss || wil_check_multicast_to_unicast(wil, skb)) in wil_start_xmit()
2343 /* in pbss, no bcast VRING - duplicate skb in in wil_start_xmit()
2347 else if (vif->wdev.iftype == NL80211_IFTYPE_AP) in wil_start_xmit()
2373 case -ENOMEM: in wil_start_xmit()
2381 ndev->stats.tx_dropped++; in wil_start_xmit()
2393 if (!wil->tx_latency) in wil_tx_latency_calc()
2396 if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0) in wil_tx_latency_calc()
2399 skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb); in wil_tx_latency_calc()
2400 bin = skb_time_us / wil->tx_latency_res; in wil_tx_latency_calc()
2401 bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1); in wil_tx_latency_calc()
2404 sta->tx_latency_bins[bin]++; in wil_tx_latency_calc()
2405 sta->stats.tx_latency_total_us += skb_time_us; in wil_tx_latency_calc()
2406 if (skb_time_us < sta->stats.tx_latency_min_us) in wil_tx_latency_calc()
2407 sta->stats.tx_latency_min_us = skb_time_us; in wil_tx_latency_calc()
2408 if (skb_time_us > sta->stats.tx_latency_max_us) in wil_tx_latency_calc()
2409 sta->stats.tx_latency_max_us = skb_time_us; in wil_tx_latency_calc()
2423 struct wil_ring *vring = &wil->ring_tx[ringid]; in wil_tx_complete()
2424 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid]; in wil_tx_complete()
2426 int cid = wil->ring2cid_tid[ringid][0]; in wil_tx_complete()
2432 if (unlikely(!vring->va)) { in wil_tx_complete()
2437 if (unlikely(!txdata->enabled)) { in wil_tx_complete()
2446 if (cid < wil->max_assoc_sta) in wil_tx_complete()
2447 stats = &wil->sta[cid].stats; in wil_tx_complete()
2451 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; in wil_tx_complete()
2452 /* For the fragmented skb, HW will set DU bit only for the in wil_tx_complete()
2456 int lf = (vring->swtail + ctx->nr_frags) % vring->size; in wil_tx_complete()
2459 _d = &vring->va[lf].tx.legacy; in wil_tx_complete()
2460 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) in wil_tx_complete()
2463 new_swtail = (lf + 1) % vring->size; in wil_tx_complete()
2464 while (vring->swtail != new_swtail) { in wil_tx_complete()
2469 ctx = &vring->ctx[vring->swtail]; in wil_tx_complete()
2470 skb = ctx->skb; in wil_tx_complete()
2471 _d = &vring->va[vring->swtail].tx.legacy; in wil_tx_complete()
2475 dmalen = le16_to_cpu(d->dma.length); in wil_tx_complete()
2476 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, in wil_tx_complete()
2477 d->dma.error); in wil_tx_complete()
2480 ringid, vring->swtail, dmalen, in wil_tx_complete()
2481 d->dma.status, d->dma.error); in wil_tx_complete()
2485 wil->txrx_ops.tx_desc_unmap(dev, in wil_tx_complete()
2490 if (likely(d->dma.error == 0)) { in wil_tx_complete()
2491 ndev->stats.tx_packets++; in wil_tx_complete()
2492 ndev->stats.tx_bytes += skb->len; in wil_tx_complete()
2494 stats->tx_packets++; in wil_tx_complete()
2495 stats->tx_bytes += skb->len; in wil_tx_complete()
2498 &wil->sta[cid]); in wil_tx_complete()
2501 ndev->stats.tx_errors++; in wil_tx_complete()
2503 stats->tx_errors++; in wil_tx_complete()
2506 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) in wil_tx_complete()
2509 wil_consume_skb(skb, d->dma.error == 0); in wil_tx_complete()
2518 /* There is no need to touch HW descriptor: in wil_tx_complete()
2519 * - ststus bit TX_DMA_STATUS_DU is set by design, in wil_tx_complete()
2521 * - rest of descriptor will be initialized on Tx. in wil_tx_complete()
2523 vring->swtail = wil_ring_next_tail(vring); in wil_tx_complete()
2530 if (wil_val_in_range(wil->ring_idle_trsh, in wil_tx_complete()
2532 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", in wil_tx_complete()
2534 txdata->last_idle = get_cycles(); in wil_tx_complete()
2567 wil->txrx_ops.configure_interrupt_moderation = in wil_init_txrx_ops_legacy_dma()
2570 wil->txrx_ops.tx_desc_map = wil_tx_desc_map; in wil_init_txrx_ops_legacy_dma()
2571 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap; in wil_init_txrx_ops_legacy_dma()
2572 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso; in wil_init_txrx_ops_legacy_dma()
2573 wil->txrx_ops.ring_init_tx = wil_vring_init_tx; in wil_init_txrx_ops_legacy_dma()
2574 wil->txrx_ops.ring_fini_tx = wil_vring_free; in wil_init_txrx_ops_legacy_dma()
2575 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; in wil_init_txrx_ops_legacy_dma()
2576 wil->txrx_ops.tx_init = wil_tx_init; in wil_init_txrx_ops_legacy_dma()
2577 wil->txrx_ops.tx_fini = wil_tx_fini; in wil_init_txrx_ops_legacy_dma()
2578 wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify; in wil_init_txrx_ops_legacy_dma()
2580 wil->txrx_ops.rx_init = wil_rx_init; in wil_init_txrx_ops_legacy_dma()
2581 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp; in wil_init_txrx_ops_legacy_dma()
2582 wil->txrx_ops.get_reorder_params = wil_get_reorder_params; in wil_init_txrx_ops_legacy_dma()
2583 wil->txrx_ops.get_netif_rx_params = in wil_init_txrx_ops_legacy_dma()
2585 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check; in wil_init_txrx_ops_legacy_dma()
2586 wil->txrx_ops.rx_error_check = wil_rx_error_check; in wil_init_txrx_ops_legacy_dma()
2587 wil->txrx_ops.is_rx_idle = wil_is_rx_idle; in wil_init_txrx_ops_legacy_dma()
2588 wil->txrx_ops.rx_fini = wil_rx_fini; in wil_init_txrx_ops_legacy_dma()